1 /* 2 * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved. 3 * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 */ 34 35 #include <linux/module.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/err.h> 39 #include <linux/ctype.h> 40 #include <linux/kthread.h> 41 #include <linux/string.h> 42 #include <linux/delay.h> 43 #include <linux/atomic.h> 44 #include <linux/inet.h> 45 #include <rdma/ib_cache.h> 46 #include <scsi/scsi_proto.h> 47 #include <scsi/scsi_tcq.h> 48 #include <target/target_core_base.h> 49 #include <target/target_core_fabric.h> 50 #include "ib_srpt.h" 51 52 /* Name of this kernel module. */ 53 #define DRV_NAME "ib_srpt" 54 55 #define SRPT_ID_STRING "Linux SRP target" 56 57 #undef pr_fmt 58 #define pr_fmt(fmt) DRV_NAME " " fmt 59 60 MODULE_AUTHOR("Vu Pham and Bart Van Assche"); 61 MODULE_DESCRIPTION("SCSI RDMA Protocol target driver"); 62 MODULE_LICENSE("Dual BSD/GPL"); 63 64 /* 65 * Global Variables 66 */ 67 68 static u64 srpt_service_guid; 69 static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */ 70 static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */ 71 72 static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE; 73 module_param(srp_max_req_size, int, 0444); 74 MODULE_PARM_DESC(srp_max_req_size, 75 "Maximum size of SRP request messages in bytes."); 76 77 static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE; 78 module_param(srpt_srq_size, int, 0444); 79 MODULE_PARM_DESC(srpt_srq_size, 80 "Shared receive queue (SRQ) size."); 81 82 static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) 83 { 84 return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg); 85 } 86 module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, 87 0444); 88 MODULE_PARM_DESC(srpt_service_guid, 89 "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA."); 90 91 static struct ib_client srpt_client; 92 /* Protects both rdma_cm_port and rdma_cm_id. */ 93 static DEFINE_MUTEX(rdma_cm_mutex); 94 /* Port number RDMA/CM will bind to. */ 95 static u16 rdma_cm_port; 96 static struct rdma_cm_id *rdma_cm_id; 97 static void srpt_release_cmd(struct se_cmd *se_cmd); 98 static void srpt_free_ch(struct kref *kref); 99 static int srpt_queue_status(struct se_cmd *cmd); 100 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc); 101 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); 102 static void srpt_process_wait_list(struct srpt_rdma_ch *ch); 103 104 /* 105 * The only allowed channel state changes are those that change the channel 106 * state into a state with a higher numerical value. Hence the new > prev test. 107 */ 108 static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new) 109 { 110 unsigned long flags; 111 enum rdma_ch_state prev; 112 bool changed = false; 113 114 spin_lock_irqsave(&ch->spinlock, flags); 115 prev = ch->state; 116 if (new > prev) { 117 ch->state = new; 118 changed = true; 119 } 120 spin_unlock_irqrestore(&ch->spinlock, flags); 121 122 return changed; 123 } 124 125 /** 126 * srpt_event_handler - asynchronous IB event callback function 127 * @handler: IB event handler registered by ib_register_event_handler(). 128 * @event: Description of the event that occurred. 129 * 130 * Callback function called by the InfiniBand core when an asynchronous IB 131 * event occurs. This callback may occur in interrupt context. See also 132 * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand 133 * Architecture Specification. 134 */ 135 static void srpt_event_handler(struct ib_event_handler *handler, 136 struct ib_event *event) 137 { 138 struct srpt_device *sdev = 139 container_of(handler, struct srpt_device, event_handler); 140 struct srpt_port *sport; 141 u8 port_num; 142 143 pr_debug("ASYNC event= %d on device= %s\n", event->event, 144 dev_name(&sdev->device->dev)); 145 146 switch (event->event) { 147 case IB_EVENT_PORT_ERR: 148 port_num = event->element.port_num - 1; 149 if (port_num < sdev->device->phys_port_cnt) { 150 sport = &sdev->port[port_num]; 151 sport->lid = 0; 152 sport->sm_lid = 0; 153 } else { 154 WARN(true, "event %d: port_num %d out of range 1..%d\n", 155 event->event, port_num + 1, 156 sdev->device->phys_port_cnt); 157 } 158 break; 159 case IB_EVENT_PORT_ACTIVE: 160 case IB_EVENT_LID_CHANGE: 161 case IB_EVENT_PKEY_CHANGE: 162 case IB_EVENT_SM_CHANGE: 163 case IB_EVENT_CLIENT_REREGISTER: 164 case IB_EVENT_GID_CHANGE: 165 /* Refresh port data asynchronously. */ 166 port_num = event->element.port_num - 1; 167 if (port_num < sdev->device->phys_port_cnt) { 168 sport = &sdev->port[port_num]; 169 if (!sport->lid && !sport->sm_lid) 170 schedule_work(&sport->work); 171 } else { 172 WARN(true, "event %d: port_num %d out of range 1..%d\n", 173 event->event, port_num + 1, 174 sdev->device->phys_port_cnt); 175 } 176 break; 177 default: 178 pr_err("received unrecognized IB event %d\n", event->event); 179 break; 180 } 181 } 182 183 /** 184 * srpt_srq_event - SRQ event callback function 185 * @event: Description of the event that occurred. 186 * @ctx: Context pointer specified at SRQ creation time. 187 */ 188 static void srpt_srq_event(struct ib_event *event, void *ctx) 189 { 190 pr_debug("SRQ event %d\n", event->event); 191 } 192 193 static const char *get_ch_state_name(enum rdma_ch_state s) 194 { 195 switch (s) { 196 case CH_CONNECTING: 197 return "connecting"; 198 case CH_LIVE: 199 return "live"; 200 case CH_DISCONNECTING: 201 return "disconnecting"; 202 case CH_DRAINING: 203 return "draining"; 204 case CH_DISCONNECTED: 205 return "disconnected"; 206 } 207 return "???"; 208 } 209 210 /** 211 * srpt_qp_event - QP event callback function 212 * @event: Description of the event that occurred. 213 * @ch: SRPT RDMA channel. 214 */ 215 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) 216 { 217 pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n", 218 event->event, ch, ch->sess_name, ch->qp->qp_num, 219 get_ch_state_name(ch->state)); 220 221 switch (event->event) { 222 case IB_EVENT_COMM_EST: 223 if (ch->using_rdma_cm) 224 rdma_notify(ch->rdma_cm.cm_id, event->event); 225 else 226 ib_cm_notify(ch->ib_cm.cm_id, event->event); 227 break; 228 case IB_EVENT_QP_LAST_WQE_REACHED: 229 pr_debug("%s-%d, state %s: received Last WQE event.\n", 230 ch->sess_name, ch->qp->qp_num, 231 get_ch_state_name(ch->state)); 232 break; 233 default: 234 pr_err("received unrecognized IB QP event %d\n", event->event); 235 break; 236 } 237 } 238 239 /** 240 * srpt_set_ioc - initialize a IOUnitInfo structure 241 * @c_list: controller list. 242 * @slot: one-based slot number. 243 * @value: four-bit value. 244 * 245 * Copies the lowest four bits of value in element slot of the array of four 246 * bit elements called c_list (controller list). The index slot is one-based. 247 */ 248 static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value) 249 { 250 u16 id; 251 u8 tmp; 252 253 id = (slot - 1) / 2; 254 if (slot & 0x1) { 255 tmp = c_list[id] & 0xf; 256 c_list[id] = (value << 4) | tmp; 257 } else { 258 tmp = c_list[id] & 0xf0; 259 c_list[id] = (value & 0xf) | tmp; 260 } 261 } 262 263 /** 264 * srpt_get_class_port_info - copy ClassPortInfo to a management datagram 265 * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO. 266 * 267 * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture 268 * Specification. 269 */ 270 static void srpt_get_class_port_info(struct ib_dm_mad *mad) 271 { 272 struct ib_class_port_info *cif; 273 274 cif = (struct ib_class_port_info *)mad->data; 275 memset(cif, 0, sizeof(*cif)); 276 cif->base_version = 1; 277 cif->class_version = 1; 278 279 ib_set_cpi_resp_time(cif, 20); 280 mad->mad_hdr.status = 0; 281 } 282 283 /** 284 * srpt_get_iou - write IOUnitInfo to a management datagram 285 * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO. 286 * 287 * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture 288 * Specification. See also section B.7, table B.6 in the SRP r16a document. 289 */ 290 static void srpt_get_iou(struct ib_dm_mad *mad) 291 { 292 struct ib_dm_iou_info *ioui; 293 u8 slot; 294 int i; 295 296 ioui = (struct ib_dm_iou_info *)mad->data; 297 ioui->change_id = cpu_to_be16(1); 298 ioui->max_controllers = 16; 299 300 /* set present for slot 1 and empty for the rest */ 301 srpt_set_ioc(ioui->controller_list, 1, 1); 302 for (i = 1, slot = 2; i < 16; i++, slot++) 303 srpt_set_ioc(ioui->controller_list, slot, 0); 304 305 mad->mad_hdr.status = 0; 306 } 307 308 /** 309 * srpt_get_ioc - write IOControllerprofile to a management datagram 310 * @sport: HCA port through which the MAD has been received. 311 * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query. 312 * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE. 313 * 314 * See also section 16.3.3.4 IOControllerProfile in the InfiniBand 315 * Architecture Specification. See also section B.7, table B.7 in the SRP 316 * r16a document. 317 */ 318 static void srpt_get_ioc(struct srpt_port *sport, u32 slot, 319 struct ib_dm_mad *mad) 320 { 321 struct srpt_device *sdev = sport->sdev; 322 struct ib_dm_ioc_profile *iocp; 323 int send_queue_depth; 324 325 iocp = (struct ib_dm_ioc_profile *)mad->data; 326 327 if (!slot || slot > 16) { 328 mad->mad_hdr.status 329 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 330 return; 331 } 332 333 if (slot > 2) { 334 mad->mad_hdr.status 335 = cpu_to_be16(DM_MAD_STATUS_NO_IOC); 336 return; 337 } 338 339 if (sdev->use_srq) 340 send_queue_depth = sdev->srq_size; 341 else 342 send_queue_depth = min(MAX_SRPT_RQ_SIZE, 343 sdev->device->attrs.max_qp_wr); 344 345 memset(iocp, 0, sizeof(*iocp)); 346 strcpy(iocp->id_string, SRPT_ID_STRING); 347 iocp->guid = cpu_to_be64(srpt_service_guid); 348 iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id); 349 iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id); 350 iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver); 351 iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id); 352 iocp->subsys_device_id = 0x0; 353 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 354 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS); 355 iocp->protocol = cpu_to_be16(SRP_PROTOCOL); 356 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION); 357 iocp->send_queue_depth = cpu_to_be16(send_queue_depth); 358 iocp->rdma_read_depth = 4; 359 iocp->send_size = cpu_to_be32(srp_max_req_size); 360 iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size, 361 1U << 24)); 362 iocp->num_svc_entries = 1; 363 iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC | 364 SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC; 365 366 mad->mad_hdr.status = 0; 367 } 368 369 /** 370 * srpt_get_svc_entries - write ServiceEntries to a management datagram 371 * @ioc_guid: I/O controller GUID to use in reply. 372 * @slot: I/O controller number. 373 * @hi: End of the range of service entries to be specified in the reply. 374 * @lo: Start of the range of service entries to be specified in the reply.. 375 * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES. 376 * 377 * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture 378 * Specification. See also section B.7, table B.8 in the SRP r16a document. 379 */ 380 static void srpt_get_svc_entries(u64 ioc_guid, 381 u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad) 382 { 383 struct ib_dm_svc_entries *svc_entries; 384 385 WARN_ON(!ioc_guid); 386 387 if (!slot || slot > 16) { 388 mad->mad_hdr.status 389 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 390 return; 391 } 392 393 if (slot > 2 || lo > hi || hi > 1) { 394 mad->mad_hdr.status 395 = cpu_to_be16(DM_MAD_STATUS_NO_IOC); 396 return; 397 } 398 399 svc_entries = (struct ib_dm_svc_entries *)mad->data; 400 memset(svc_entries, 0, sizeof(*svc_entries)); 401 svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid); 402 snprintf(svc_entries->service_entries[0].name, 403 sizeof(svc_entries->service_entries[0].name), 404 "%s%016llx", 405 SRP_SERVICE_NAME_PREFIX, 406 ioc_guid); 407 408 mad->mad_hdr.status = 0; 409 } 410 411 /** 412 * srpt_mgmt_method_get - process a received management datagram 413 * @sp: HCA port through which the MAD has been received. 414 * @rq_mad: received MAD. 415 * @rsp_mad: response MAD. 416 */ 417 static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad, 418 struct ib_dm_mad *rsp_mad) 419 { 420 u16 attr_id; 421 u32 slot; 422 u8 hi, lo; 423 424 attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id); 425 switch (attr_id) { 426 case DM_ATTR_CLASS_PORT_INFO: 427 srpt_get_class_port_info(rsp_mad); 428 break; 429 case DM_ATTR_IOU_INFO: 430 srpt_get_iou(rsp_mad); 431 break; 432 case DM_ATTR_IOC_PROFILE: 433 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 434 srpt_get_ioc(sp, slot, rsp_mad); 435 break; 436 case DM_ATTR_SVC_ENTRIES: 437 slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod); 438 hi = (u8) ((slot >> 8) & 0xff); 439 lo = (u8) (slot & 0xff); 440 slot = (u16) ((slot >> 16) & 0xffff); 441 srpt_get_svc_entries(srpt_service_guid, 442 slot, hi, lo, rsp_mad); 443 break; 444 default: 445 rsp_mad->mad_hdr.status = 446 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 447 break; 448 } 449 } 450 451 /** 452 * srpt_mad_send_handler - MAD send completion callback 453 * @mad_agent: Return value of ib_register_mad_agent(). 454 * @mad_wc: Work completion reporting that the MAD has been sent. 455 */ 456 static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent, 457 struct ib_mad_send_wc *mad_wc) 458 { 459 rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); 460 ib_free_send_mad(mad_wc->send_buf); 461 } 462 463 /** 464 * srpt_mad_recv_handler - MAD reception callback function 465 * @mad_agent: Return value of ib_register_mad_agent(). 466 * @send_buf: Not used. 467 * @mad_wc: Work completion reporting that a MAD has been received. 468 */ 469 static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent, 470 struct ib_mad_send_buf *send_buf, 471 struct ib_mad_recv_wc *mad_wc) 472 { 473 struct srpt_port *sport = (struct srpt_port *)mad_agent->context; 474 struct ib_ah *ah; 475 struct ib_mad_send_buf *rsp; 476 struct ib_dm_mad *dm_mad; 477 478 if (!mad_wc || !mad_wc->recv_buf.mad) 479 return; 480 481 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, 482 mad_wc->recv_buf.grh, mad_agent->port_num); 483 if (IS_ERR(ah)) 484 goto err; 485 486 BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR); 487 488 rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp, 489 mad_wc->wc->pkey_index, 0, 490 IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA, 491 GFP_KERNEL, 492 IB_MGMT_BASE_VERSION); 493 if (IS_ERR(rsp)) 494 goto err_rsp; 495 496 rsp->ah = ah; 497 498 dm_mad = rsp->mad; 499 memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad)); 500 dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; 501 dm_mad->mad_hdr.status = 0; 502 503 switch (mad_wc->recv_buf.mad->mad_hdr.method) { 504 case IB_MGMT_METHOD_GET: 505 srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad); 506 break; 507 case IB_MGMT_METHOD_SET: 508 dm_mad->mad_hdr.status = 509 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 510 break; 511 default: 512 dm_mad->mad_hdr.status = 513 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 514 break; 515 } 516 517 if (!ib_post_send_mad(rsp, NULL)) { 518 ib_free_recv_mad(mad_wc); 519 /* will destroy_ah & free_send_mad in send completion */ 520 return; 521 } 522 523 ib_free_send_mad(rsp); 524 525 err_rsp: 526 rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); 527 err: 528 ib_free_recv_mad(mad_wc); 529 } 530 531 static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid) 532 { 533 const __be16 *g = (const __be16 *)guid; 534 535 return snprintf(buf, size, "%04x:%04x:%04x:%04x", 536 be16_to_cpu(g[0]), be16_to_cpu(g[1]), 537 be16_to_cpu(g[2]), be16_to_cpu(g[3])); 538 } 539 540 /** 541 * srpt_refresh_port - configure a HCA port 542 * @sport: SRPT HCA port. 543 * 544 * Enable InfiniBand management datagram processing, update the cached sm_lid, 545 * lid and gid values, and register a callback function for processing MADs 546 * on the specified port. 547 * 548 * Note: It is safe to call this function more than once for the same port. 549 */ 550 static int srpt_refresh_port(struct srpt_port *sport) 551 { 552 struct ib_mad_reg_req reg_req; 553 struct ib_port_modify port_modify; 554 struct ib_port_attr port_attr; 555 int ret; 556 557 ret = ib_query_port(sport->sdev->device, sport->port, &port_attr); 558 if (ret) 559 return ret; 560 561 sport->sm_lid = port_attr.sm_lid; 562 sport->lid = port_attr.lid; 563 564 ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid); 565 if (ret) 566 return ret; 567 568 sport->port_guid_id.wwn.priv = sport; 569 srpt_format_guid(sport->port_guid_id.name, 570 sizeof(sport->port_guid_id.name), 571 &sport->gid.global.interface_id); 572 sport->port_gid_id.wwn.priv = sport; 573 snprintf(sport->port_gid_id.name, sizeof(sport->port_gid_id.name), 574 "0x%016llx%016llx", 575 be64_to_cpu(sport->gid.global.subnet_prefix), 576 be64_to_cpu(sport->gid.global.interface_id)); 577 578 if (rdma_protocol_iwarp(sport->sdev->device, sport->port)) 579 return 0; 580 581 memset(&port_modify, 0, sizeof(port_modify)); 582 port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 583 port_modify.clr_port_cap_mask = 0; 584 585 ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify); 586 if (ret) { 587 pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n", 588 dev_name(&sport->sdev->device->dev), sport->port, ret); 589 return 0; 590 } 591 592 if (!sport->mad_agent) { 593 memset(®_req, 0, sizeof(reg_req)); 594 reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT; 595 reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION; 596 set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask); 597 set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask); 598 599 sport->mad_agent = ib_register_mad_agent(sport->sdev->device, 600 sport->port, 601 IB_QPT_GSI, 602 ®_req, 0, 603 srpt_mad_send_handler, 604 srpt_mad_recv_handler, 605 sport, 0); 606 if (IS_ERR(sport->mad_agent)) { 607 pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n", 608 dev_name(&sport->sdev->device->dev), sport->port, 609 PTR_ERR(sport->mad_agent)); 610 sport->mad_agent = NULL; 611 memset(&port_modify, 0, sizeof(port_modify)); 612 port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP; 613 ib_modify_port(sport->sdev->device, sport->port, 0, 614 &port_modify); 615 616 } 617 } 618 619 return 0; 620 } 621 622 /** 623 * srpt_unregister_mad_agent - unregister MAD callback functions 624 * @sdev: SRPT HCA pointer. 625 * 626 * Note: It is safe to call this function more than once for the same device. 627 */ 628 static void srpt_unregister_mad_agent(struct srpt_device *sdev) 629 { 630 struct ib_port_modify port_modify = { 631 .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, 632 }; 633 struct srpt_port *sport; 634 int i; 635 636 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 637 sport = &sdev->port[i - 1]; 638 WARN_ON(sport->port != i); 639 if (sport->mad_agent) { 640 ib_modify_port(sdev->device, i, 0, &port_modify); 641 ib_unregister_mad_agent(sport->mad_agent); 642 sport->mad_agent = NULL; 643 } 644 } 645 } 646 647 /** 648 * srpt_alloc_ioctx - allocate a SRPT I/O context structure 649 * @sdev: SRPT HCA pointer. 650 * @ioctx_size: I/O context size. 651 * @buf_cache: I/O buffer cache. 652 * @dir: DMA data direction. 653 */ 654 static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev, 655 int ioctx_size, 656 struct kmem_cache *buf_cache, 657 enum dma_data_direction dir) 658 { 659 struct srpt_ioctx *ioctx; 660 661 ioctx = kzalloc(ioctx_size, GFP_KERNEL); 662 if (!ioctx) 663 goto err; 664 665 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL); 666 if (!ioctx->buf) 667 goto err_free_ioctx; 668 669 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, 670 kmem_cache_size(buf_cache), dir); 671 if (ib_dma_mapping_error(sdev->device, ioctx->dma)) 672 goto err_free_buf; 673 674 return ioctx; 675 676 err_free_buf: 677 kmem_cache_free(buf_cache, ioctx->buf); 678 err_free_ioctx: 679 kfree(ioctx); 680 err: 681 return NULL; 682 } 683 684 /** 685 * srpt_free_ioctx - free a SRPT I/O context structure 686 * @sdev: SRPT HCA pointer. 687 * @ioctx: I/O context pointer. 688 * @buf_cache: I/O buffer cache. 689 * @dir: DMA data direction. 690 */ 691 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx, 692 struct kmem_cache *buf_cache, 693 enum dma_data_direction dir) 694 { 695 if (!ioctx) 696 return; 697 698 ib_dma_unmap_single(sdev->device, ioctx->dma, 699 kmem_cache_size(buf_cache), dir); 700 kmem_cache_free(buf_cache, ioctx->buf); 701 kfree(ioctx); 702 } 703 704 /** 705 * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures 706 * @sdev: Device to allocate the I/O context ring for. 707 * @ring_size: Number of elements in the I/O context ring. 708 * @ioctx_size: I/O context size. 709 * @buf_cache: I/O buffer cache. 710 * @alignment_offset: Offset in each ring buffer at which the SRP information 711 * unit starts. 712 * @dir: DMA data direction. 713 */ 714 static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev, 715 int ring_size, int ioctx_size, 716 struct kmem_cache *buf_cache, 717 int alignment_offset, 718 enum dma_data_direction dir) 719 { 720 struct srpt_ioctx **ring; 721 int i; 722 723 WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) && 724 ioctx_size != sizeof(struct srpt_send_ioctx)); 725 726 ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL); 727 if (!ring) 728 goto out; 729 for (i = 0; i < ring_size; ++i) { 730 ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir); 731 if (!ring[i]) 732 goto err; 733 ring[i]->index = i; 734 ring[i]->offset = alignment_offset; 735 } 736 goto out; 737 738 err: 739 while (--i >= 0) 740 srpt_free_ioctx(sdev, ring[i], buf_cache, dir); 741 kvfree(ring); 742 ring = NULL; 743 out: 744 return ring; 745 } 746 747 /** 748 * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures 749 * @ioctx_ring: I/O context ring to be freed. 750 * @sdev: SRPT HCA pointer. 751 * @ring_size: Number of ring elements. 752 * @buf_cache: I/O buffer cache. 753 * @dir: DMA data direction. 754 */ 755 static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring, 756 struct srpt_device *sdev, int ring_size, 757 struct kmem_cache *buf_cache, 758 enum dma_data_direction dir) 759 { 760 int i; 761 762 if (!ioctx_ring) 763 return; 764 765 for (i = 0; i < ring_size; ++i) 766 srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir); 767 kvfree(ioctx_ring); 768 } 769 770 /** 771 * srpt_set_cmd_state - set the state of a SCSI command 772 * @ioctx: Send I/O context. 773 * @new: New I/O context state. 774 * 775 * Does not modify the state of aborted commands. Returns the previous command 776 * state. 777 */ 778 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx, 779 enum srpt_command_state new) 780 { 781 enum srpt_command_state previous; 782 783 previous = ioctx->state; 784 if (previous != SRPT_STATE_DONE) 785 ioctx->state = new; 786 787 return previous; 788 } 789 790 /** 791 * srpt_test_and_set_cmd_state - test and set the state of a command 792 * @ioctx: Send I/O context. 793 * @old: Current I/O context state. 794 * @new: New I/O context state. 795 * 796 * Returns true if and only if the previous command state was equal to 'old'. 797 */ 798 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx, 799 enum srpt_command_state old, 800 enum srpt_command_state new) 801 { 802 enum srpt_command_state previous; 803 804 WARN_ON(!ioctx); 805 WARN_ON(old == SRPT_STATE_DONE); 806 WARN_ON(new == SRPT_STATE_NEW); 807 808 previous = ioctx->state; 809 if (previous == old) 810 ioctx->state = new; 811 812 return previous == old; 813 } 814 815 /** 816 * srpt_post_recv - post an IB receive request 817 * @sdev: SRPT HCA pointer. 818 * @ch: SRPT RDMA channel. 819 * @ioctx: Receive I/O context pointer. 820 */ 821 static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch, 822 struct srpt_recv_ioctx *ioctx) 823 { 824 struct ib_sge list; 825 struct ib_recv_wr wr; 826 827 BUG_ON(!sdev); 828 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset; 829 list.length = srp_max_req_size; 830 list.lkey = sdev->lkey; 831 832 ioctx->ioctx.cqe.done = srpt_recv_done; 833 wr.wr_cqe = &ioctx->ioctx.cqe; 834 wr.next = NULL; 835 wr.sg_list = &list; 836 wr.num_sge = 1; 837 838 if (sdev->use_srq) 839 return ib_post_srq_recv(sdev->srq, &wr, NULL); 840 else 841 return ib_post_recv(ch->qp, &wr, NULL); 842 } 843 844 /** 845 * srpt_zerolength_write - perform a zero-length RDMA write 846 * @ch: SRPT RDMA channel. 847 * 848 * A quote from the InfiniBand specification: C9-88: For an HCA responder 849 * using Reliable Connection service, for each zero-length RDMA READ or WRITE 850 * request, the R_Key shall not be validated, even if the request includes 851 * Immediate data. 852 */ 853 static int srpt_zerolength_write(struct srpt_rdma_ch *ch) 854 { 855 struct ib_rdma_wr wr = { 856 .wr = { 857 .next = NULL, 858 { .wr_cqe = &ch->zw_cqe, }, 859 .opcode = IB_WR_RDMA_WRITE, 860 .send_flags = IB_SEND_SIGNALED, 861 } 862 }; 863 864 pr_debug("%s-%d: queued zerolength write\n", ch->sess_name, 865 ch->qp->qp_num); 866 867 return ib_post_send(ch->qp, &wr.wr, NULL); 868 } 869 870 static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) 871 { 872 struct srpt_rdma_ch *ch = cq->cq_context; 873 874 pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num, 875 wc->status); 876 877 if (wc->status == IB_WC_SUCCESS) { 878 srpt_process_wait_list(ch); 879 } else { 880 if (srpt_set_ch_state(ch, CH_DISCONNECTED)) 881 schedule_work(&ch->release_work); 882 else 883 pr_debug("%s-%d: already disconnected.\n", 884 ch->sess_name, ch->qp->qp_num); 885 } 886 } 887 888 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx, 889 struct srp_direct_buf *db, int nbufs, struct scatterlist **sg, 890 unsigned *sg_cnt) 891 { 892 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd); 893 struct srpt_rdma_ch *ch = ioctx->ch; 894 struct scatterlist *prev = NULL; 895 unsigned prev_nents; 896 int ret, i; 897 898 if (nbufs == 1) { 899 ioctx->rw_ctxs = &ioctx->s_rw_ctx; 900 } else { 901 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs), 902 GFP_KERNEL); 903 if (!ioctx->rw_ctxs) 904 return -ENOMEM; 905 } 906 907 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) { 908 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; 909 u64 remote_addr = be64_to_cpu(db->va); 910 u32 size = be32_to_cpu(db->len); 911 u32 rkey = be32_to_cpu(db->key); 912 913 ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false, 914 i < nbufs - 1); 915 if (ret) 916 goto unwind; 917 918 ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port, 919 ctx->sg, ctx->nents, 0, remote_addr, rkey, dir); 920 if (ret < 0) { 921 target_free_sgl(ctx->sg, ctx->nents); 922 goto unwind; 923 } 924 925 ioctx->n_rdma += ret; 926 ioctx->n_rw_ctx++; 927 928 if (prev) { 929 sg_unmark_end(&prev[prev_nents - 1]); 930 sg_chain(prev, prev_nents + 1, ctx->sg); 931 } else { 932 *sg = ctx->sg; 933 } 934 935 prev = ctx->sg; 936 prev_nents = ctx->nents; 937 938 *sg_cnt += ctx->nents; 939 } 940 941 return 0; 942 943 unwind: 944 while (--i >= 0) { 945 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; 946 947 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port, 948 ctx->sg, ctx->nents, dir); 949 target_free_sgl(ctx->sg, ctx->nents); 950 } 951 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx) 952 kfree(ioctx->rw_ctxs); 953 return ret; 954 } 955 956 static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch, 957 struct srpt_send_ioctx *ioctx) 958 { 959 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd); 960 int i; 961 962 for (i = 0; i < ioctx->n_rw_ctx; i++) { 963 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; 964 965 rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port, 966 ctx->sg, ctx->nents, dir); 967 target_free_sgl(ctx->sg, ctx->nents); 968 } 969 970 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx) 971 kfree(ioctx->rw_ctxs); 972 } 973 974 static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd) 975 { 976 /* 977 * The pointer computations below will only be compiled correctly 978 * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check 979 * whether srp_cmd::add_data has been declared as a byte pointer. 980 */ 981 BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) && 982 !__same_type(srp_cmd->add_data[0], (u8)0)); 983 984 /* 985 * According to the SRP spec, the lower two bits of the 'ADDITIONAL 986 * CDB LENGTH' field are reserved and the size in bytes of this field 987 * is four times the value specified in bits 3..7. Hence the "& ~3". 988 */ 989 return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3); 990 } 991 992 /** 993 * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request 994 * @recv_ioctx: I/O context associated with the received command @srp_cmd. 995 * @ioctx: I/O context that will be used for responding to the initiator. 996 * @srp_cmd: Pointer to the SRP_CMD request data. 997 * @dir: Pointer to the variable to which the transfer direction will be 998 * written. 999 * @sg: [out] scatterlist for the parsed SRP_CMD. 1000 * @sg_cnt: [out] length of @sg. 1001 * @data_len: Pointer to the variable to which the total data length of all 1002 * descriptors in the SRP_CMD request will be written. 1003 * @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data 1004 * starts. 1005 * 1006 * This function initializes ioctx->nrbuf and ioctx->r_bufs. 1007 * 1008 * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors; 1009 * -ENOMEM when memory allocation fails and zero upon success. 1010 */ 1011 static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx, 1012 struct srpt_send_ioctx *ioctx, 1013 struct srp_cmd *srp_cmd, enum dma_data_direction *dir, 1014 struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len, 1015 u16 imm_data_offset) 1016 { 1017 BUG_ON(!dir); 1018 BUG_ON(!data_len); 1019 1020 /* 1021 * The lower four bits of the buffer format field contain the DATA-IN 1022 * buffer descriptor format, and the highest four bits contain the 1023 * DATA-OUT buffer descriptor format. 1024 */ 1025 if (srp_cmd->buf_fmt & 0xf) 1026 /* DATA-IN: transfer data from target to initiator (read). */ 1027 *dir = DMA_FROM_DEVICE; 1028 else if (srp_cmd->buf_fmt >> 4) 1029 /* DATA-OUT: transfer data from initiator to target (write). */ 1030 *dir = DMA_TO_DEVICE; 1031 else 1032 *dir = DMA_NONE; 1033 1034 /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */ 1035 ioctx->cmd.data_direction = *dir; 1036 1037 if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || 1038 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { 1039 struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd); 1040 1041 *data_len = be32_to_cpu(db->len); 1042 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt); 1043 } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || 1044 ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { 1045 struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd); 1046 int nbufs = be32_to_cpu(idb->table_desc.len) / 1047 sizeof(struct srp_direct_buf); 1048 1049 if (nbufs > 1050 (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) { 1051 pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n", 1052 srp_cmd->data_out_desc_cnt, 1053 srp_cmd->data_in_desc_cnt, 1054 be32_to_cpu(idb->table_desc.len), 1055 sizeof(struct srp_direct_buf)); 1056 return -EINVAL; 1057 } 1058 1059 *data_len = be32_to_cpu(idb->len); 1060 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs, 1061 sg, sg_cnt); 1062 } else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) { 1063 struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd); 1064 void *data = (void *)srp_cmd + imm_data_offset; 1065 uint32_t len = be32_to_cpu(imm_buf->len); 1066 uint32_t req_size = imm_data_offset + len; 1067 1068 if (req_size > srp_max_req_size) { 1069 pr_err("Immediate data (length %d + %d) exceeds request size %d\n", 1070 imm_data_offset, len, srp_max_req_size); 1071 return -EINVAL; 1072 } 1073 if (recv_ioctx->byte_len < req_size) { 1074 pr_err("Received too few data - %d < %d\n", 1075 recv_ioctx->byte_len, req_size); 1076 return -EIO; 1077 } 1078 /* 1079 * The immediate data buffer descriptor must occur before the 1080 * immediate data itself. 1081 */ 1082 if ((void *)(imm_buf + 1) > (void *)data) { 1083 pr_err("Received invalid write request\n"); 1084 return -EINVAL; 1085 } 1086 *data_len = len; 1087 ioctx->recv_ioctx = recv_ioctx; 1088 if ((uintptr_t)data & 511) { 1089 pr_warn_once("Internal error - the receive buffers are not aligned properly.\n"); 1090 return -EINVAL; 1091 } 1092 sg_init_one(&ioctx->imm_sg, data, len); 1093 *sg = &ioctx->imm_sg; 1094 *sg_cnt = 1; 1095 return 0; 1096 } else { 1097 *data_len = 0; 1098 return 0; 1099 } 1100 } 1101 1102 /** 1103 * srpt_init_ch_qp - initialize queue pair attributes 1104 * @ch: SRPT RDMA channel. 1105 * @qp: Queue pair pointer. 1106 * 1107 * Initialized the attributes of queue pair 'qp' by allowing local write, 1108 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. 1109 */ 1110 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) 1111 { 1112 struct ib_qp_attr *attr; 1113 int ret; 1114 1115 WARN_ON_ONCE(ch->using_rdma_cm); 1116 1117 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 1118 if (!attr) 1119 return -ENOMEM; 1120 1121 attr->qp_state = IB_QPS_INIT; 1122 attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; 1123 attr->port_num = ch->sport->port; 1124 1125 ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port, 1126 ch->pkey, &attr->pkey_index); 1127 if (ret < 0) 1128 pr_err("Translating pkey %#x failed (%d) - using index 0\n", 1129 ch->pkey, ret); 1130 1131 ret = ib_modify_qp(qp, attr, 1132 IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT | 1133 IB_QP_PKEY_INDEX); 1134 1135 kfree(attr); 1136 return ret; 1137 } 1138 1139 /** 1140 * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR) 1141 * @ch: channel of the queue pair. 1142 * @qp: queue pair to change the state of. 1143 * 1144 * Returns zero upon success and a negative value upon failure. 1145 * 1146 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 1147 * If this structure ever becomes larger, it might be necessary to allocate 1148 * it dynamically instead of on the stack. 1149 */ 1150 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) 1151 { 1152 struct ib_qp_attr qp_attr; 1153 int attr_mask; 1154 int ret; 1155 1156 WARN_ON_ONCE(ch->using_rdma_cm); 1157 1158 qp_attr.qp_state = IB_QPS_RTR; 1159 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); 1160 if (ret) 1161 goto out; 1162 1163 qp_attr.max_dest_rd_atomic = 4; 1164 1165 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 1166 1167 out: 1168 return ret; 1169 } 1170 1171 /** 1172 * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS) 1173 * @ch: channel of the queue pair. 1174 * @qp: queue pair to change the state of. 1175 * 1176 * Returns zero upon success and a negative value upon failure. 1177 * 1178 * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system. 1179 * If this structure ever becomes larger, it might be necessary to allocate 1180 * it dynamically instead of on the stack. 1181 */ 1182 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) 1183 { 1184 struct ib_qp_attr qp_attr; 1185 int attr_mask; 1186 int ret; 1187 1188 qp_attr.qp_state = IB_QPS_RTS; 1189 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); 1190 if (ret) 1191 goto out; 1192 1193 qp_attr.max_rd_atomic = 4; 1194 1195 ret = ib_modify_qp(qp, &qp_attr, attr_mask); 1196 1197 out: 1198 return ret; 1199 } 1200 1201 /** 1202 * srpt_ch_qp_err - set the channel queue pair state to 'error' 1203 * @ch: SRPT RDMA channel. 1204 */ 1205 static int srpt_ch_qp_err(struct srpt_rdma_ch *ch) 1206 { 1207 struct ib_qp_attr qp_attr; 1208 1209 qp_attr.qp_state = IB_QPS_ERR; 1210 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); 1211 } 1212 1213 /** 1214 * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator 1215 * @ch: SRPT RDMA channel. 1216 */ 1217 static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1218 { 1219 struct srpt_send_ioctx *ioctx; 1220 int tag, cpu; 1221 1222 BUG_ON(!ch); 1223 1224 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu); 1225 if (tag < 0) 1226 return NULL; 1227 1228 ioctx = ch->ioctx_ring[tag]; 1229 BUG_ON(ioctx->ch != ch); 1230 ioctx->state = SRPT_STATE_NEW; 1231 WARN_ON_ONCE(ioctx->recv_ioctx); 1232 ioctx->n_rdma = 0; 1233 ioctx->n_rw_ctx = 0; 1234 ioctx->queue_status_only = false; 1235 /* 1236 * transport_init_se_cmd() does not initialize all fields, so do it 1237 * here. 1238 */ 1239 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); 1240 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); 1241 ioctx->cmd.map_tag = tag; 1242 ioctx->cmd.map_cpu = cpu; 1243 1244 return ioctx; 1245 } 1246 1247 /** 1248 * srpt_abort_cmd - abort a SCSI command 1249 * @ioctx: I/O context associated with the SCSI command. 1250 */ 1251 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) 1252 { 1253 enum srpt_command_state state; 1254 1255 BUG_ON(!ioctx); 1256 1257 /* 1258 * If the command is in a state where the target core is waiting for 1259 * the ib_srpt driver, change the state to the next state. 1260 */ 1261 1262 state = ioctx->state; 1263 switch (state) { 1264 case SRPT_STATE_NEED_DATA: 1265 ioctx->state = SRPT_STATE_DATA_IN; 1266 break; 1267 case SRPT_STATE_CMD_RSP_SENT: 1268 case SRPT_STATE_MGMT_RSP_SENT: 1269 ioctx->state = SRPT_STATE_DONE; 1270 break; 1271 default: 1272 WARN_ONCE(true, "%s: unexpected I/O context state %d\n", 1273 __func__, state); 1274 break; 1275 } 1276 1277 pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, 1278 ioctx->state, ioctx->cmd.tag); 1279 1280 switch (state) { 1281 case SRPT_STATE_NEW: 1282 case SRPT_STATE_DATA_IN: 1283 case SRPT_STATE_MGMT: 1284 case SRPT_STATE_DONE: 1285 /* 1286 * Do nothing - defer abort processing until 1287 * srpt_queue_response() is invoked. 1288 */ 1289 break; 1290 case SRPT_STATE_NEED_DATA: 1291 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag); 1292 transport_generic_request_failure(&ioctx->cmd, 1293 TCM_CHECK_CONDITION_ABORT_CMD); 1294 break; 1295 case SRPT_STATE_CMD_RSP_SENT: 1296 /* 1297 * SRP_RSP sending failed or the SRP_RSP send completion has 1298 * not been received in time. 1299 */ 1300 transport_generic_free_cmd(&ioctx->cmd, 0); 1301 break; 1302 case SRPT_STATE_MGMT_RSP_SENT: 1303 transport_generic_free_cmd(&ioctx->cmd, 0); 1304 break; 1305 default: 1306 WARN(1, "Unexpected command state (%d)", state); 1307 break; 1308 } 1309 1310 return state; 1311 } 1312 1313 /** 1314 * srpt_rdma_read_done - RDMA read completion callback 1315 * @cq: Completion queue. 1316 * @wc: Work completion. 1317 * 1318 * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping 1319 * the data that has been transferred via IB RDMA had to be postponed until the 1320 * check_stop_free() callback. None of this is necessary anymore and needs to 1321 * be cleaned up. 1322 */ 1323 static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) 1324 { 1325 struct srpt_rdma_ch *ch = cq->cq_context; 1326 struct srpt_send_ioctx *ioctx = 1327 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe); 1328 1329 WARN_ON(ioctx->n_rdma <= 0); 1330 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 1331 ioctx->n_rdma = 0; 1332 1333 if (unlikely(wc->status != IB_WC_SUCCESS)) { 1334 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n", 1335 ioctx, wc->status); 1336 srpt_abort_cmd(ioctx); 1337 return; 1338 } 1339 1340 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA, 1341 SRPT_STATE_DATA_IN)) 1342 target_execute_cmd(&ioctx->cmd); 1343 else 1344 pr_err("%s[%d]: wrong state = %d\n", __func__, 1345 __LINE__, ioctx->state); 1346 } 1347 1348 /** 1349 * srpt_build_cmd_rsp - build a SRP_RSP response 1350 * @ch: RDMA channel through which the request has been received. 1351 * @ioctx: I/O context associated with the SRP_CMD request. The response will 1352 * be built in the buffer ioctx->buf points at and hence this function will 1353 * overwrite the request data. 1354 * @tag: tag of the request for which this response is being generated. 1355 * @status: value for the STATUS field of the SRP_RSP information unit. 1356 * 1357 * Returns the size in bytes of the SRP_RSP response. 1358 * 1359 * An SRP_RSP response contains a SCSI status or service response. See also 1360 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1361 * response. See also SPC-2 for more information about sense data. 1362 */ 1363 static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, 1364 struct srpt_send_ioctx *ioctx, u64 tag, 1365 int status) 1366 { 1367 struct se_cmd *cmd = &ioctx->cmd; 1368 struct srp_rsp *srp_rsp; 1369 const u8 *sense_data; 1370 int sense_data_len, max_sense_len; 1371 u32 resid = cmd->residual_count; 1372 1373 /* 1374 * The lowest bit of all SAM-3 status codes is zero (see also 1375 * paragraph 5.3 in SAM-3). 1376 */ 1377 WARN_ON(status & 1); 1378 1379 srp_rsp = ioctx->ioctx.buf; 1380 BUG_ON(!srp_rsp); 1381 1382 sense_data = ioctx->sense_data; 1383 sense_data_len = ioctx->cmd.scsi_sense_length; 1384 WARN_ON(sense_data_len > sizeof(ioctx->sense_data)); 1385 1386 memset(srp_rsp, 0, sizeof(*srp_rsp)); 1387 srp_rsp->opcode = SRP_RSP; 1388 srp_rsp->req_lim_delta = 1389 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1390 srp_rsp->tag = tag; 1391 srp_rsp->status = status; 1392 1393 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1394 if (cmd->data_direction == DMA_TO_DEVICE) { 1395 /* residual data from an underflow write */ 1396 srp_rsp->flags = SRP_RSP_FLAG_DOUNDER; 1397 srp_rsp->data_out_res_cnt = cpu_to_be32(resid); 1398 } else if (cmd->data_direction == DMA_FROM_DEVICE) { 1399 /* residual data from an underflow read */ 1400 srp_rsp->flags = SRP_RSP_FLAG_DIUNDER; 1401 srp_rsp->data_in_res_cnt = cpu_to_be32(resid); 1402 } 1403 } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1404 if (cmd->data_direction == DMA_TO_DEVICE) { 1405 /* residual data from an overflow write */ 1406 srp_rsp->flags = SRP_RSP_FLAG_DOOVER; 1407 srp_rsp->data_out_res_cnt = cpu_to_be32(resid); 1408 } else if (cmd->data_direction == DMA_FROM_DEVICE) { 1409 /* residual data from an overflow read */ 1410 srp_rsp->flags = SRP_RSP_FLAG_DIOVER; 1411 srp_rsp->data_in_res_cnt = cpu_to_be32(resid); 1412 } 1413 } 1414 1415 if (sense_data_len) { 1416 BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); 1417 max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); 1418 if (sense_data_len > max_sense_len) { 1419 pr_warn("truncated sense data from %d to %d bytes\n", 1420 sense_data_len, max_sense_len); 1421 sense_data_len = max_sense_len; 1422 } 1423 1424 srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID; 1425 srp_rsp->sense_data_len = cpu_to_be32(sense_data_len); 1426 memcpy(srp_rsp + 1, sense_data, sense_data_len); 1427 } 1428 1429 return sizeof(*srp_rsp) + sense_data_len; 1430 } 1431 1432 /** 1433 * srpt_build_tskmgmt_rsp - build a task management response 1434 * @ch: RDMA channel through which the request has been received. 1435 * @ioctx: I/O context in which the SRP_RSP response will be built. 1436 * @rsp_code: RSP_CODE that will be stored in the response. 1437 * @tag: Tag of the request for which this response is being generated. 1438 * 1439 * Returns the size in bytes of the SRP_RSP response. 1440 * 1441 * An SRP_RSP response contains a SCSI status or service response. See also 1442 * section 6.9 in the SRP r16a document for the format of an SRP_RSP 1443 * response. 1444 */ 1445 static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch, 1446 struct srpt_send_ioctx *ioctx, 1447 u8 rsp_code, u64 tag) 1448 { 1449 struct srp_rsp *srp_rsp; 1450 int resp_data_len; 1451 int resp_len; 1452 1453 resp_data_len = 4; 1454 resp_len = sizeof(*srp_rsp) + resp_data_len; 1455 1456 srp_rsp = ioctx->ioctx.buf; 1457 BUG_ON(!srp_rsp); 1458 memset(srp_rsp, 0, sizeof(*srp_rsp)); 1459 1460 srp_rsp->opcode = SRP_RSP; 1461 srp_rsp->req_lim_delta = 1462 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1463 srp_rsp->tag = tag; 1464 1465 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1466 srp_rsp->resp_data_len = cpu_to_be32(resp_data_len); 1467 srp_rsp->data[3] = rsp_code; 1468 1469 return resp_len; 1470 } 1471 1472 static int srpt_check_stop_free(struct se_cmd *cmd) 1473 { 1474 struct srpt_send_ioctx *ioctx = container_of(cmd, 1475 struct srpt_send_ioctx, cmd); 1476 1477 return target_put_sess_cmd(&ioctx->cmd); 1478 } 1479 1480 /** 1481 * srpt_handle_cmd - process a SRP_CMD information unit 1482 * @ch: SRPT RDMA channel. 1483 * @recv_ioctx: Receive I/O context. 1484 * @send_ioctx: Send I/O context. 1485 */ 1486 static void srpt_handle_cmd(struct srpt_rdma_ch *ch, 1487 struct srpt_recv_ioctx *recv_ioctx, 1488 struct srpt_send_ioctx *send_ioctx) 1489 { 1490 struct se_cmd *cmd; 1491 struct srp_cmd *srp_cmd; 1492 struct scatterlist *sg = NULL; 1493 unsigned sg_cnt = 0; 1494 u64 data_len; 1495 enum dma_data_direction dir; 1496 int rc; 1497 1498 BUG_ON(!send_ioctx); 1499 1500 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; 1501 cmd = &send_ioctx->cmd; 1502 cmd->tag = srp_cmd->tag; 1503 1504 switch (srp_cmd->task_attr) { 1505 case SRP_CMD_SIMPLE_Q: 1506 cmd->sam_task_attr = TCM_SIMPLE_TAG; 1507 break; 1508 case SRP_CMD_ORDERED_Q: 1509 default: 1510 cmd->sam_task_attr = TCM_ORDERED_TAG; 1511 break; 1512 case SRP_CMD_HEAD_OF_Q: 1513 cmd->sam_task_attr = TCM_HEAD_TAG; 1514 break; 1515 case SRP_CMD_ACA: 1516 cmd->sam_task_attr = TCM_ACA_TAG; 1517 break; 1518 } 1519 1520 rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir, 1521 &sg, &sg_cnt, &data_len, ch->imm_data_offset); 1522 if (rc) { 1523 if (rc != -EAGAIN) { 1524 pr_err("0x%llx: parsing SRP descriptor table failed.\n", 1525 srp_cmd->tag); 1526 } 1527 goto busy; 1528 } 1529 1530 rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb, 1531 &send_ioctx->sense_data[0], 1532 scsilun_to_int(&srp_cmd->lun), data_len, 1533 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF, 1534 sg, sg_cnt, NULL, 0, NULL, 0); 1535 if (rc != 0) { 1536 pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc, 1537 srp_cmd->tag); 1538 goto busy; 1539 } 1540 return; 1541 1542 busy: 1543 target_send_busy(cmd); 1544 } 1545 1546 static int srp_tmr_to_tcm(int fn) 1547 { 1548 switch (fn) { 1549 case SRP_TSK_ABORT_TASK: 1550 return TMR_ABORT_TASK; 1551 case SRP_TSK_ABORT_TASK_SET: 1552 return TMR_ABORT_TASK_SET; 1553 case SRP_TSK_CLEAR_TASK_SET: 1554 return TMR_CLEAR_TASK_SET; 1555 case SRP_TSK_LUN_RESET: 1556 return TMR_LUN_RESET; 1557 case SRP_TSK_CLEAR_ACA: 1558 return TMR_CLEAR_ACA; 1559 default: 1560 return -1; 1561 } 1562 } 1563 1564 /** 1565 * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit 1566 * @ch: SRPT RDMA channel. 1567 * @recv_ioctx: Receive I/O context. 1568 * @send_ioctx: Send I/O context. 1569 * 1570 * Returns 0 if and only if the request will be processed by the target core. 1571 * 1572 * For more information about SRP_TSK_MGMT information units, see also section 1573 * 6.7 in the SRP r16a document. 1574 */ 1575 static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch, 1576 struct srpt_recv_ioctx *recv_ioctx, 1577 struct srpt_send_ioctx *send_ioctx) 1578 { 1579 struct srp_tsk_mgmt *srp_tsk; 1580 struct se_cmd *cmd; 1581 struct se_session *sess = ch->sess; 1582 int tcm_tmr; 1583 int rc; 1584 1585 BUG_ON(!send_ioctx); 1586 1587 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; 1588 cmd = &send_ioctx->cmd; 1589 1590 pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n", 1591 srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch, 1592 ch->sess); 1593 1594 srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT); 1595 send_ioctx->cmd.tag = srp_tsk->tag; 1596 tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func); 1597 rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, 1598 scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr, 1599 GFP_KERNEL, srp_tsk->task_tag, 1600 TARGET_SCF_ACK_KREF); 1601 if (rc != 0) { 1602 send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED; 1603 cmd->se_tfo->queue_tm_rsp(cmd); 1604 } 1605 return; 1606 } 1607 1608 /** 1609 * srpt_handle_new_iu - process a newly received information unit 1610 * @ch: RDMA channel through which the information unit has been received. 1611 * @recv_ioctx: Receive I/O context associated with the information unit. 1612 */ 1613 static bool 1614 srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx) 1615 { 1616 struct srpt_send_ioctx *send_ioctx = NULL; 1617 struct srp_cmd *srp_cmd; 1618 bool res = false; 1619 u8 opcode; 1620 1621 BUG_ON(!ch); 1622 BUG_ON(!recv_ioctx); 1623 1624 if (unlikely(ch->state == CH_CONNECTING)) 1625 goto push; 1626 1627 ib_dma_sync_single_for_cpu(ch->sport->sdev->device, 1628 recv_ioctx->ioctx.dma, 1629 recv_ioctx->ioctx.offset + srp_max_req_size, 1630 DMA_FROM_DEVICE); 1631 1632 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset; 1633 opcode = srp_cmd->opcode; 1634 if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) { 1635 send_ioctx = srpt_get_send_ioctx(ch); 1636 if (unlikely(!send_ioctx)) 1637 goto push; 1638 } 1639 1640 if (!list_empty(&recv_ioctx->wait_list)) { 1641 WARN_ON_ONCE(!ch->processing_wait_list); 1642 list_del_init(&recv_ioctx->wait_list); 1643 } 1644 1645 switch (opcode) { 1646 case SRP_CMD: 1647 srpt_handle_cmd(ch, recv_ioctx, send_ioctx); 1648 break; 1649 case SRP_TSK_MGMT: 1650 srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx); 1651 break; 1652 case SRP_I_LOGOUT: 1653 pr_err("Not yet implemented: SRP_I_LOGOUT\n"); 1654 break; 1655 case SRP_CRED_RSP: 1656 pr_debug("received SRP_CRED_RSP\n"); 1657 break; 1658 case SRP_AER_RSP: 1659 pr_debug("received SRP_AER_RSP\n"); 1660 break; 1661 case SRP_RSP: 1662 pr_err("Received SRP_RSP\n"); 1663 break; 1664 default: 1665 pr_err("received IU with unknown opcode 0x%x\n", opcode); 1666 break; 1667 } 1668 1669 if (!send_ioctx || !send_ioctx->recv_ioctx) 1670 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); 1671 res = true; 1672 1673 out: 1674 return res; 1675 1676 push: 1677 if (list_empty(&recv_ioctx->wait_list)) { 1678 WARN_ON_ONCE(ch->processing_wait_list); 1679 list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list); 1680 } 1681 goto out; 1682 } 1683 1684 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc) 1685 { 1686 struct srpt_rdma_ch *ch = cq->cq_context; 1687 struct srpt_recv_ioctx *ioctx = 1688 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe); 1689 1690 if (wc->status == IB_WC_SUCCESS) { 1691 int req_lim; 1692 1693 req_lim = atomic_dec_return(&ch->req_lim); 1694 if (unlikely(req_lim < 0)) 1695 pr_err("req_lim = %d < 0\n", req_lim); 1696 ioctx->byte_len = wc->byte_len; 1697 srpt_handle_new_iu(ch, ioctx); 1698 } else { 1699 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n", 1700 ioctx, wc->status); 1701 } 1702 } 1703 1704 /* 1705 * This function must be called from the context in which RDMA completions are 1706 * processed because it accesses the wait list without protection against 1707 * access from other threads. 1708 */ 1709 static void srpt_process_wait_list(struct srpt_rdma_ch *ch) 1710 { 1711 struct srpt_recv_ioctx *recv_ioctx, *tmp; 1712 1713 WARN_ON_ONCE(ch->state == CH_CONNECTING); 1714 1715 if (list_empty(&ch->cmd_wait_list)) 1716 return; 1717 1718 WARN_ON_ONCE(ch->processing_wait_list); 1719 ch->processing_wait_list = true; 1720 list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list, 1721 wait_list) { 1722 if (!srpt_handle_new_iu(ch, recv_ioctx)) 1723 break; 1724 } 1725 ch->processing_wait_list = false; 1726 } 1727 1728 /** 1729 * srpt_send_done - send completion callback 1730 * @cq: Completion queue. 1731 * @wc: Work completion. 1732 * 1733 * Note: Although this has not yet been observed during tests, at least in 1734 * theory it is possible that the srpt_get_send_ioctx() call invoked by 1735 * srpt_handle_new_iu() fails. This is possible because the req_lim_delta 1736 * value in each response is set to one, and it is possible that this response 1737 * makes the initiator send a new request before the send completion for that 1738 * response has been processed. This could e.g. happen if the call to 1739 * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or 1740 * if IB retransmission causes generation of the send completion to be 1741 * delayed. Incoming information units for which srpt_get_send_ioctx() fails 1742 * are queued on cmd_wait_list. The code below processes these delayed 1743 * requests one at a time. 1744 */ 1745 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc) 1746 { 1747 struct srpt_rdma_ch *ch = cq->cq_context; 1748 struct srpt_send_ioctx *ioctx = 1749 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe); 1750 enum srpt_command_state state; 1751 1752 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 1753 1754 WARN_ON(state != SRPT_STATE_CMD_RSP_SENT && 1755 state != SRPT_STATE_MGMT_RSP_SENT); 1756 1757 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail); 1758 1759 if (wc->status != IB_WC_SUCCESS) 1760 pr_info("sending response for ioctx 0x%p failed with status %d\n", 1761 ioctx, wc->status); 1762 1763 if (state != SRPT_STATE_DONE) { 1764 transport_generic_free_cmd(&ioctx->cmd, 0); 1765 } else { 1766 pr_err("IB completion has been received too late for wr_id = %u.\n", 1767 ioctx->ioctx.index); 1768 } 1769 1770 srpt_process_wait_list(ch); 1771 } 1772 1773 /** 1774 * srpt_create_ch_ib - create receive and send completion queues 1775 * @ch: SRPT RDMA channel. 1776 */ 1777 static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) 1778 { 1779 struct ib_qp_init_attr *qp_init; 1780 struct srpt_port *sport = ch->sport; 1781 struct srpt_device *sdev = sport->sdev; 1782 const struct ib_device_attr *attrs = &sdev->device->attrs; 1783 int sq_size = sport->port_attrib.srp_sq_size; 1784 int i, ret; 1785 1786 WARN_ON(ch->rq_size < 1); 1787 1788 ret = -ENOMEM; 1789 qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL); 1790 if (!qp_init) 1791 goto out; 1792 1793 retry: 1794 ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size, 1795 IB_POLL_WORKQUEUE); 1796 if (IS_ERR(ch->cq)) { 1797 ret = PTR_ERR(ch->cq); 1798 pr_err("failed to create CQ cqe= %d ret= %d\n", 1799 ch->rq_size + sq_size, ret); 1800 goto out; 1801 } 1802 1803 qp_init->qp_context = (void *)ch; 1804 qp_init->event_handler 1805 = (void(*)(struct ib_event *, void*))srpt_qp_event; 1806 qp_init->send_cq = ch->cq; 1807 qp_init->recv_cq = ch->cq; 1808 qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; 1809 qp_init->qp_type = IB_QPT_RC; 1810 /* 1811 * We divide up our send queue size into half SEND WRs to send the 1812 * completions, and half R/W contexts to actually do the RDMA 1813 * READ/WRITE transfers. Note that we need to allocate CQ slots for 1814 * both both, as RDMA contexts will also post completions for the 1815 * RDMA READ case. 1816 */ 1817 qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr); 1818 qp_init->cap.max_rdma_ctxs = sq_size / 2; 1819 qp_init->cap.max_send_sge = attrs->max_send_sge; 1820 qp_init->cap.max_recv_sge = 1; 1821 qp_init->port_num = ch->sport->port; 1822 if (sdev->use_srq) 1823 qp_init->srq = sdev->srq; 1824 else 1825 qp_init->cap.max_recv_wr = ch->rq_size; 1826 1827 if (ch->using_rdma_cm) { 1828 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init); 1829 ch->qp = ch->rdma_cm.cm_id->qp; 1830 } else { 1831 ch->qp = ib_create_qp(sdev->pd, qp_init); 1832 if (!IS_ERR(ch->qp)) { 1833 ret = srpt_init_ch_qp(ch, ch->qp); 1834 if (ret) 1835 ib_destroy_qp(ch->qp); 1836 } else { 1837 ret = PTR_ERR(ch->qp); 1838 } 1839 } 1840 if (ret) { 1841 bool retry = sq_size > MIN_SRPT_SQ_SIZE; 1842 1843 if (retry) { 1844 pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n", 1845 sq_size, ret); 1846 ib_free_cq(ch->cq); 1847 sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE); 1848 goto retry; 1849 } else { 1850 pr_err("failed to create queue pair with sq_size = %d (%d)\n", 1851 sq_size, ret); 1852 goto err_destroy_cq; 1853 } 1854 } 1855 1856 atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr); 1857 1858 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n", 1859 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, 1860 qp_init->cap.max_send_wr, ch); 1861 1862 if (!sdev->use_srq) 1863 for (i = 0; i < ch->rq_size; i++) 1864 srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]); 1865 1866 out: 1867 kfree(qp_init); 1868 return ret; 1869 1870 err_destroy_cq: 1871 ch->qp = NULL; 1872 ib_free_cq(ch->cq); 1873 goto out; 1874 } 1875 1876 static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch) 1877 { 1878 ib_destroy_qp(ch->qp); 1879 ib_free_cq(ch->cq); 1880 } 1881 1882 /** 1883 * srpt_close_ch - close a RDMA channel 1884 * @ch: SRPT RDMA channel. 1885 * 1886 * Make sure all resources associated with the channel will be deallocated at 1887 * an appropriate time. 1888 * 1889 * Returns true if and only if the channel state has been modified into 1890 * CH_DRAINING. 1891 */ 1892 static bool srpt_close_ch(struct srpt_rdma_ch *ch) 1893 { 1894 int ret; 1895 1896 if (!srpt_set_ch_state(ch, CH_DRAINING)) { 1897 pr_debug("%s: already closed\n", ch->sess_name); 1898 return false; 1899 } 1900 1901 kref_get(&ch->kref); 1902 1903 ret = srpt_ch_qp_err(ch); 1904 if (ret < 0) 1905 pr_err("%s-%d: changing queue pair into error state failed: %d\n", 1906 ch->sess_name, ch->qp->qp_num, ret); 1907 1908 ret = srpt_zerolength_write(ch); 1909 if (ret < 0) { 1910 pr_err("%s-%d: queuing zero-length write failed: %d\n", 1911 ch->sess_name, ch->qp->qp_num, ret); 1912 if (srpt_set_ch_state(ch, CH_DISCONNECTED)) 1913 schedule_work(&ch->release_work); 1914 else 1915 WARN_ON_ONCE(true); 1916 } 1917 1918 kref_put(&ch->kref, srpt_free_ch); 1919 1920 return true; 1921 } 1922 1923 /* 1924 * Change the channel state into CH_DISCONNECTING. If a channel has not yet 1925 * reached the connected state, close it. If a channel is in the connected 1926 * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is 1927 * the responsibility of the caller to ensure that this function is not 1928 * invoked concurrently with the code that accepts a connection. This means 1929 * that this function must either be invoked from inside a CM callback 1930 * function or that it must be invoked with the srpt_port.mutex held. 1931 */ 1932 static int srpt_disconnect_ch(struct srpt_rdma_ch *ch) 1933 { 1934 int ret; 1935 1936 if (!srpt_set_ch_state(ch, CH_DISCONNECTING)) 1937 return -ENOTCONN; 1938 1939 if (ch->using_rdma_cm) { 1940 ret = rdma_disconnect(ch->rdma_cm.cm_id); 1941 } else { 1942 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); 1943 if (ret < 0) 1944 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0); 1945 } 1946 1947 if (ret < 0 && srpt_close_ch(ch)) 1948 ret = 0; 1949 1950 return ret; 1951 } 1952 1953 /* Send DREQ and wait for DREP. */ 1954 static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch) 1955 { 1956 DECLARE_COMPLETION_ONSTACK(closed); 1957 struct srpt_port *sport = ch->sport; 1958 1959 pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num, 1960 ch->state); 1961 1962 ch->closed = &closed; 1963 1964 mutex_lock(&sport->mutex); 1965 srpt_disconnect_ch(ch); 1966 mutex_unlock(&sport->mutex); 1967 1968 while (wait_for_completion_timeout(&closed, 5 * HZ) == 0) 1969 pr_info("%s(%s-%d state %d): still waiting ...\n", __func__, 1970 ch->sess_name, ch->qp->qp_num, ch->state); 1971 1972 } 1973 1974 static void __srpt_close_all_ch(struct srpt_port *sport) 1975 { 1976 struct srpt_nexus *nexus; 1977 struct srpt_rdma_ch *ch; 1978 1979 lockdep_assert_held(&sport->mutex); 1980 1981 list_for_each_entry(nexus, &sport->nexus_list, entry) { 1982 list_for_each_entry(ch, &nexus->ch_list, list) { 1983 if (srpt_disconnect_ch(ch) >= 0) 1984 pr_info("Closing channel %s-%d because target %s_%d has been disabled\n", 1985 ch->sess_name, ch->qp->qp_num, 1986 dev_name(&sport->sdev->device->dev), 1987 sport->port); 1988 srpt_close_ch(ch); 1989 } 1990 } 1991 } 1992 1993 /* 1994 * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if 1995 * it does not yet exist. 1996 */ 1997 static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport, 1998 const u8 i_port_id[16], 1999 const u8 t_port_id[16]) 2000 { 2001 struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n; 2002 2003 for (;;) { 2004 mutex_lock(&sport->mutex); 2005 list_for_each_entry(n, &sport->nexus_list, entry) { 2006 if (memcmp(n->i_port_id, i_port_id, 16) == 0 && 2007 memcmp(n->t_port_id, t_port_id, 16) == 0) { 2008 nexus = n; 2009 break; 2010 } 2011 } 2012 if (!nexus && tmp_nexus) { 2013 list_add_tail_rcu(&tmp_nexus->entry, 2014 &sport->nexus_list); 2015 swap(nexus, tmp_nexus); 2016 } 2017 mutex_unlock(&sport->mutex); 2018 2019 if (nexus) 2020 break; 2021 tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); 2022 if (!tmp_nexus) { 2023 nexus = ERR_PTR(-ENOMEM); 2024 break; 2025 } 2026 INIT_LIST_HEAD(&tmp_nexus->ch_list); 2027 memcpy(tmp_nexus->i_port_id, i_port_id, 16); 2028 memcpy(tmp_nexus->t_port_id, t_port_id, 16); 2029 } 2030 2031 kfree(tmp_nexus); 2032 2033 return nexus; 2034 } 2035 2036 static void srpt_set_enabled(struct srpt_port *sport, bool enabled) 2037 __must_hold(&sport->mutex) 2038 { 2039 lockdep_assert_held(&sport->mutex); 2040 2041 if (sport->enabled == enabled) 2042 return; 2043 sport->enabled = enabled; 2044 if (!enabled) 2045 __srpt_close_all_ch(sport); 2046 } 2047 2048 static void srpt_drop_sport_ref(struct srpt_port *sport) 2049 { 2050 if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels) 2051 complete(sport->freed_channels); 2052 } 2053 2054 static void srpt_free_ch(struct kref *kref) 2055 { 2056 struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref); 2057 2058 srpt_drop_sport_ref(ch->sport); 2059 kfree_rcu(ch, rcu); 2060 } 2061 2062 /* 2063 * Shut down the SCSI target session, tell the connection manager to 2064 * disconnect the associated RDMA channel, transition the QP to the error 2065 * state and remove the channel from the channel list. This function is 2066 * typically called from inside srpt_zerolength_write_done(). Concurrent 2067 * srpt_zerolength_write() calls from inside srpt_close_ch() are possible 2068 * as long as the channel is on sport->nexus_list. 2069 */ 2070 static void srpt_release_channel_work(struct work_struct *w) 2071 { 2072 struct srpt_rdma_ch *ch; 2073 struct srpt_device *sdev; 2074 struct srpt_port *sport; 2075 struct se_session *se_sess; 2076 2077 ch = container_of(w, struct srpt_rdma_ch, release_work); 2078 pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num); 2079 2080 sdev = ch->sport->sdev; 2081 BUG_ON(!sdev); 2082 2083 se_sess = ch->sess; 2084 BUG_ON(!se_sess); 2085 2086 target_sess_cmd_list_set_waiting(se_sess); 2087 target_wait_for_sess_cmds(se_sess); 2088 2089 target_remove_session(se_sess); 2090 ch->sess = NULL; 2091 2092 if (ch->using_rdma_cm) 2093 rdma_destroy_id(ch->rdma_cm.cm_id); 2094 else 2095 ib_destroy_cm_id(ch->ib_cm.cm_id); 2096 2097 sport = ch->sport; 2098 mutex_lock(&sport->mutex); 2099 list_del_rcu(&ch->list); 2100 mutex_unlock(&sport->mutex); 2101 2102 if (ch->closed) 2103 complete(ch->closed); 2104 2105 srpt_destroy_ch_ib(ch); 2106 2107 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2108 ch->sport->sdev, ch->rq_size, 2109 ch->rsp_buf_cache, DMA_TO_DEVICE); 2110 2111 kmem_cache_destroy(ch->rsp_buf_cache); 2112 2113 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, 2114 sdev, ch->rq_size, 2115 ch->req_buf_cache, DMA_FROM_DEVICE); 2116 2117 kmem_cache_destroy(ch->req_buf_cache); 2118 2119 kref_put(&ch->kref, srpt_free_ch); 2120 } 2121 2122 /** 2123 * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED 2124 * @sdev: HCA through which the login request was received. 2125 * @ib_cm_id: IB/CM connection identifier in case of IB/CM. 2126 * @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM. 2127 * @port_num: Port through which the REQ message was received. 2128 * @pkey: P_Key of the incoming connection. 2129 * @req: SRP login request. 2130 * @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted 2131 * the login request. 2132 * 2133 * Ownership of the cm_id is transferred to the target session if this 2134 * function returns zero. Otherwise the caller remains the owner of cm_id. 2135 */ 2136 static int srpt_cm_req_recv(struct srpt_device *const sdev, 2137 struct ib_cm_id *ib_cm_id, 2138 struct rdma_cm_id *rdma_cm_id, 2139 u8 port_num, __be16 pkey, 2140 const struct srp_login_req *req, 2141 const char *src_addr) 2142 { 2143 struct srpt_port *sport = &sdev->port[port_num - 1]; 2144 struct srpt_nexus *nexus; 2145 struct srp_login_rsp *rsp = NULL; 2146 struct srp_login_rej *rej = NULL; 2147 union { 2148 struct rdma_conn_param rdma_cm; 2149 struct ib_cm_rep_param ib_cm; 2150 } *rep_param = NULL; 2151 struct srpt_rdma_ch *ch = NULL; 2152 char i_port_id[36]; 2153 u32 it_iu_len; 2154 int i, tag_num, tag_size, ret; 2155 struct srpt_tpg *stpg; 2156 2157 WARN_ON_ONCE(irqs_disabled()); 2158 2159 if (WARN_ON(!sdev || !req)) 2160 return -EINVAL; 2161 2162 it_iu_len = be32_to_cpu(req->req_it_iu_len); 2163 2164 pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n", 2165 req->initiator_port_id, req->target_port_id, it_iu_len, 2166 port_num, &sport->gid, be16_to_cpu(pkey)); 2167 2168 nexus = srpt_get_nexus(sport, req->initiator_port_id, 2169 req->target_port_id); 2170 if (IS_ERR(nexus)) { 2171 ret = PTR_ERR(nexus); 2172 goto out; 2173 } 2174 2175 ret = -ENOMEM; 2176 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); 2177 rej = kzalloc(sizeof(*rej), GFP_KERNEL); 2178 rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL); 2179 if (!rsp || !rej || !rep_param) 2180 goto out; 2181 2182 ret = -EINVAL; 2183 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2184 rej->reason = cpu_to_be32( 2185 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2186 pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n", 2187 it_iu_len, 64, srp_max_req_size); 2188 goto reject; 2189 } 2190 2191 if (!sport->enabled) { 2192 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2193 pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n", 2194 dev_name(&sport->sdev->device->dev), port_num); 2195 goto reject; 2196 } 2197 2198 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2199 || *(__be64 *)(req->target_port_id + 8) != 2200 cpu_to_be64(srpt_service_guid)) { 2201 rej->reason = cpu_to_be32( 2202 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2203 pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n"); 2204 goto reject; 2205 } 2206 2207 ret = -ENOMEM; 2208 ch = kzalloc(sizeof(*ch), GFP_KERNEL); 2209 if (!ch) { 2210 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2211 pr_err("rejected SRP_LOGIN_REQ because out of memory.\n"); 2212 goto reject; 2213 } 2214 2215 kref_init(&ch->kref); 2216 ch->pkey = be16_to_cpu(pkey); 2217 ch->nexus = nexus; 2218 ch->zw_cqe.done = srpt_zerolength_write_done; 2219 INIT_WORK(&ch->release_work, srpt_release_channel_work); 2220 ch->sport = sport; 2221 if (ib_cm_id) { 2222 ch->ib_cm.cm_id = ib_cm_id; 2223 ib_cm_id->context = ch; 2224 } else { 2225 ch->using_rdma_cm = true; 2226 ch->rdma_cm.cm_id = rdma_cm_id; 2227 rdma_cm_id->context = ch; 2228 } 2229 /* 2230 * ch->rq_size should be at least as large as the initiator queue 2231 * depth to avoid that the initiator driver has to report QUEUE_FULL 2232 * to the SCSI mid-layer. 2233 */ 2234 ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr); 2235 spin_lock_init(&ch->spinlock); 2236 ch->state = CH_CONNECTING; 2237 INIT_LIST_HEAD(&ch->cmd_wait_list); 2238 ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size; 2239 2240 ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size, 2241 512, 0, NULL); 2242 if (!ch->rsp_buf_cache) 2243 goto free_ch; 2244 2245 ch->ioctx_ring = (struct srpt_send_ioctx **) 2246 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, 2247 sizeof(*ch->ioctx_ring[0]), 2248 ch->rsp_buf_cache, 0, DMA_TO_DEVICE); 2249 if (!ch->ioctx_ring) { 2250 pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n"); 2251 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2252 goto free_rsp_cache; 2253 } 2254 2255 for (i = 0; i < ch->rq_size; i++) 2256 ch->ioctx_ring[i]->ch = ch; 2257 if (!sdev->use_srq) { 2258 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ? 2259 be16_to_cpu(req->imm_data_offset) : 0; 2260 u16 alignment_offset; 2261 u32 req_sz; 2262 2263 if (req->req_flags & SRP_IMMED_REQUESTED) 2264 pr_debug("imm_data_offset = %d\n", 2265 be16_to_cpu(req->imm_data_offset)); 2266 if (imm_data_offset >= sizeof(struct srp_cmd)) { 2267 ch->imm_data_offset = imm_data_offset; 2268 rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP; 2269 } else { 2270 ch->imm_data_offset = 0; 2271 } 2272 alignment_offset = round_up(imm_data_offset, 512) - 2273 imm_data_offset; 2274 req_sz = alignment_offset + imm_data_offset + srp_max_req_size; 2275 ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz, 2276 512, 0, NULL); 2277 if (!ch->req_buf_cache) 2278 goto free_rsp_ring; 2279 2280 ch->ioctx_recv_ring = (struct srpt_recv_ioctx **) 2281 srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size, 2282 sizeof(*ch->ioctx_recv_ring[0]), 2283 ch->req_buf_cache, 2284 alignment_offset, 2285 DMA_FROM_DEVICE); 2286 if (!ch->ioctx_recv_ring) { 2287 pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n"); 2288 rej->reason = 2289 cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2290 goto free_recv_cache; 2291 } 2292 for (i = 0; i < ch->rq_size; i++) 2293 INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list); 2294 } 2295 2296 ret = srpt_create_ch_ib(ch); 2297 if (ret) { 2298 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2299 pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n"); 2300 goto free_recv_ring; 2301 } 2302 2303 strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name)); 2304 snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx", 2305 be64_to_cpu(*(__be64 *)nexus->i_port_id), 2306 be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8))); 2307 2308 pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name, 2309 i_port_id); 2310 2311 tag_num = ch->rq_size; 2312 tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */ 2313 2314 mutex_lock(&sport->port_guid_id.mutex); 2315 list_for_each_entry(stpg, &sport->port_guid_id.tpg_list, entry) { 2316 if (!IS_ERR_OR_NULL(ch->sess)) 2317 break; 2318 ch->sess = target_setup_session(&stpg->tpg, tag_num, 2319 tag_size, TARGET_PROT_NORMAL, 2320 ch->sess_name, ch, NULL); 2321 } 2322 mutex_unlock(&sport->port_guid_id.mutex); 2323 2324 mutex_lock(&sport->port_gid_id.mutex); 2325 list_for_each_entry(stpg, &sport->port_gid_id.tpg_list, entry) { 2326 if (!IS_ERR_OR_NULL(ch->sess)) 2327 break; 2328 ch->sess = target_setup_session(&stpg->tpg, tag_num, 2329 tag_size, TARGET_PROT_NORMAL, i_port_id, 2330 ch, NULL); 2331 if (!IS_ERR_OR_NULL(ch->sess)) 2332 break; 2333 /* Retry without leading "0x" */ 2334 ch->sess = target_setup_session(&stpg->tpg, tag_num, 2335 tag_size, TARGET_PROT_NORMAL, 2336 i_port_id + 2, ch, NULL); 2337 } 2338 mutex_unlock(&sport->port_gid_id.mutex); 2339 2340 if (IS_ERR_OR_NULL(ch->sess)) { 2341 WARN_ON_ONCE(ch->sess == NULL); 2342 ret = PTR_ERR(ch->sess); 2343 ch->sess = NULL; 2344 pr_info("Rejected login for initiator %s: ret = %d.\n", 2345 ch->sess_name, ret); 2346 rej->reason = cpu_to_be32(ret == -ENOMEM ? 2347 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES : 2348 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2349 goto destroy_ib; 2350 } 2351 2352 /* 2353 * Once a session has been created destruction of srpt_rdma_ch objects 2354 * will decrement sport->refcount. Hence increment sport->refcount now. 2355 */ 2356 atomic_inc(&sport->refcount); 2357 2358 mutex_lock(&sport->mutex); 2359 2360 if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) { 2361 struct srpt_rdma_ch *ch2; 2362 2363 list_for_each_entry(ch2, &nexus->ch_list, list) { 2364 if (srpt_disconnect_ch(ch2) < 0) 2365 continue; 2366 pr_info("Relogin - closed existing channel %s\n", 2367 ch2->sess_name); 2368 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED; 2369 } 2370 } else { 2371 rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED; 2372 } 2373 2374 list_add_tail_rcu(&ch->list, &nexus->ch_list); 2375 2376 if (!sport->enabled) { 2377 rej->reason = cpu_to_be32( 2378 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2379 pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n", 2380 dev_name(&sdev->device->dev), port_num); 2381 mutex_unlock(&sport->mutex); 2382 goto reject; 2383 } 2384 2385 mutex_unlock(&sport->mutex); 2386 2387 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp); 2388 if (ret) { 2389 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2390 pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n", 2391 ret); 2392 goto reject; 2393 } 2394 2395 pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess, 2396 ch->sess_name, ch); 2397 2398 /* create srp_login_response */ 2399 rsp->opcode = SRP_LOGIN_RSP; 2400 rsp->tag = req->tag; 2401 rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size); 2402 rsp->max_ti_iu_len = req->req_it_iu_len; 2403 ch->max_ti_iu_len = it_iu_len; 2404 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 2405 SRP_BUF_FORMAT_INDIRECT); 2406 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2407 atomic_set(&ch->req_lim, ch->rq_size); 2408 atomic_set(&ch->req_lim_delta, 0); 2409 2410 /* create cm reply */ 2411 if (ch->using_rdma_cm) { 2412 rep_param->rdma_cm.private_data = (void *)rsp; 2413 rep_param->rdma_cm.private_data_len = sizeof(*rsp); 2414 rep_param->rdma_cm.rnr_retry_count = 7; 2415 rep_param->rdma_cm.flow_control = 1; 2416 rep_param->rdma_cm.responder_resources = 4; 2417 rep_param->rdma_cm.initiator_depth = 4; 2418 } else { 2419 rep_param->ib_cm.qp_num = ch->qp->qp_num; 2420 rep_param->ib_cm.private_data = (void *)rsp; 2421 rep_param->ib_cm.private_data_len = sizeof(*rsp); 2422 rep_param->ib_cm.rnr_retry_count = 7; 2423 rep_param->ib_cm.flow_control = 1; 2424 rep_param->ib_cm.failover_accepted = 0; 2425 rep_param->ib_cm.srq = 1; 2426 rep_param->ib_cm.responder_resources = 4; 2427 rep_param->ib_cm.initiator_depth = 4; 2428 } 2429 2430 /* 2431 * Hold the sport mutex while accepting a connection to avoid that 2432 * srpt_disconnect_ch() is invoked concurrently with this code. 2433 */ 2434 mutex_lock(&sport->mutex); 2435 if (sport->enabled && ch->state == CH_CONNECTING) { 2436 if (ch->using_rdma_cm) 2437 ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm); 2438 else 2439 ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm); 2440 } else { 2441 ret = -EINVAL; 2442 } 2443 mutex_unlock(&sport->mutex); 2444 2445 switch (ret) { 2446 case 0: 2447 break; 2448 case -EINVAL: 2449 goto reject; 2450 default: 2451 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2452 pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n", 2453 ret); 2454 goto reject; 2455 } 2456 2457 goto out; 2458 2459 destroy_ib: 2460 srpt_destroy_ch_ib(ch); 2461 2462 free_recv_ring: 2463 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring, 2464 ch->sport->sdev, ch->rq_size, 2465 ch->req_buf_cache, DMA_FROM_DEVICE); 2466 2467 free_recv_cache: 2468 kmem_cache_destroy(ch->req_buf_cache); 2469 2470 free_rsp_ring: 2471 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring, 2472 ch->sport->sdev, ch->rq_size, 2473 ch->rsp_buf_cache, DMA_TO_DEVICE); 2474 2475 free_rsp_cache: 2476 kmem_cache_destroy(ch->rsp_buf_cache); 2477 2478 free_ch: 2479 if (rdma_cm_id) 2480 rdma_cm_id->context = NULL; 2481 else 2482 ib_cm_id->context = NULL; 2483 kfree(ch); 2484 ch = NULL; 2485 2486 WARN_ON_ONCE(ret == 0); 2487 2488 reject: 2489 pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason)); 2490 rej->opcode = SRP_LOGIN_REJ; 2491 rej->tag = req->tag; 2492 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | 2493 SRP_BUF_FORMAT_INDIRECT); 2494 2495 if (rdma_cm_id) 2496 rdma_reject(rdma_cm_id, rej, sizeof(*rej), 2497 IB_CM_REJ_CONSUMER_DEFINED); 2498 else 2499 ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2500 rej, sizeof(*rej)); 2501 2502 if (ch && ch->sess) { 2503 srpt_close_ch(ch); 2504 /* 2505 * Tell the caller not to free cm_id since 2506 * srpt_release_channel_work() will do that. 2507 */ 2508 ret = 0; 2509 } 2510 2511 out: 2512 kfree(rep_param); 2513 kfree(rsp); 2514 kfree(rej); 2515 2516 return ret; 2517 } 2518 2519 static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id, 2520 const struct ib_cm_req_event_param *param, 2521 void *private_data) 2522 { 2523 char sguid[40]; 2524 2525 srpt_format_guid(sguid, sizeof(sguid), 2526 ¶m->primary_path->dgid.global.interface_id); 2527 2528 return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port, 2529 param->primary_path->pkey, 2530 private_data, sguid); 2531 } 2532 2533 static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id, 2534 struct rdma_cm_event *event) 2535 { 2536 struct srpt_device *sdev; 2537 struct srp_login_req req; 2538 const struct srp_login_req_rdma *req_rdma; 2539 struct sa_path_rec *path_rec = cm_id->route.path_rec; 2540 char src_addr[40]; 2541 2542 sdev = ib_get_client_data(cm_id->device, &srpt_client); 2543 if (!sdev) 2544 return -ECONNREFUSED; 2545 2546 if (event->param.conn.private_data_len < sizeof(*req_rdma)) 2547 return -EINVAL; 2548 2549 /* Transform srp_login_req_rdma into srp_login_req. */ 2550 req_rdma = event->param.conn.private_data; 2551 memset(&req, 0, sizeof(req)); 2552 req.opcode = req_rdma->opcode; 2553 req.tag = req_rdma->tag; 2554 req.req_it_iu_len = req_rdma->req_it_iu_len; 2555 req.req_buf_fmt = req_rdma->req_buf_fmt; 2556 req.req_flags = req_rdma->req_flags; 2557 memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16); 2558 memcpy(req.target_port_id, req_rdma->target_port_id, 16); 2559 req.imm_data_offset = req_rdma->imm_data_offset; 2560 2561 snprintf(src_addr, sizeof(src_addr), "%pIS", 2562 &cm_id->route.addr.src_addr); 2563 2564 return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num, 2565 path_rec ? path_rec->pkey : 0, &req, src_addr); 2566 } 2567 2568 static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch, 2569 enum ib_cm_rej_reason reason, 2570 const u8 *private_data, 2571 u8 private_data_len) 2572 { 2573 char *priv = NULL; 2574 int i; 2575 2576 if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1, 2577 GFP_KERNEL))) { 2578 for (i = 0; i < private_data_len; i++) 2579 sprintf(priv + 3 * i, " %02x", private_data[i]); 2580 } 2581 pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n", 2582 ch->sess_name, ch->qp->qp_num, reason, private_data_len ? 2583 "; private data" : "", priv ? priv : " (?)"); 2584 kfree(priv); 2585 } 2586 2587 /** 2588 * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event 2589 * @ch: SRPT RDMA channel. 2590 * 2591 * An RTU (ready to use) message indicates that the connection has been 2592 * established and that the recipient may begin transmitting. 2593 */ 2594 static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch) 2595 { 2596 int ret; 2597 2598 ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp); 2599 if (ret < 0) { 2600 pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name, 2601 ch->qp->qp_num); 2602 srpt_close_ch(ch); 2603 return; 2604 } 2605 2606 /* 2607 * Note: calling srpt_close_ch() if the transition to the LIVE state 2608 * fails is not necessary since that means that that function has 2609 * already been invoked from another thread. 2610 */ 2611 if (!srpt_set_ch_state(ch, CH_LIVE)) { 2612 pr_err("%s-%d: channel transition to LIVE state failed\n", 2613 ch->sess_name, ch->qp->qp_num); 2614 return; 2615 } 2616 2617 /* Trigger wait list processing. */ 2618 ret = srpt_zerolength_write(ch); 2619 WARN_ONCE(ret < 0, "%d\n", ret); 2620 } 2621 2622 /** 2623 * srpt_cm_handler - IB connection manager callback function 2624 * @cm_id: IB/CM connection identifier. 2625 * @event: IB/CM event. 2626 * 2627 * A non-zero return value will cause the caller destroy the CM ID. 2628 * 2629 * Note: srpt_cm_handler() must only return a non-zero value when transferring 2630 * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning 2631 * a non-zero value in any other case will trigger a race with the 2632 * ib_destroy_cm_id() call in srpt_release_channel(). 2633 */ 2634 static int srpt_cm_handler(struct ib_cm_id *cm_id, 2635 const struct ib_cm_event *event) 2636 { 2637 struct srpt_rdma_ch *ch = cm_id->context; 2638 int ret; 2639 2640 ret = 0; 2641 switch (event->event) { 2642 case IB_CM_REQ_RECEIVED: 2643 ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd, 2644 event->private_data); 2645 break; 2646 case IB_CM_REJ_RECEIVED: 2647 srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason, 2648 event->private_data, 2649 IB_CM_REJ_PRIVATE_DATA_SIZE); 2650 break; 2651 case IB_CM_RTU_RECEIVED: 2652 case IB_CM_USER_ESTABLISHED: 2653 srpt_cm_rtu_recv(ch); 2654 break; 2655 case IB_CM_DREQ_RECEIVED: 2656 srpt_disconnect_ch(ch); 2657 break; 2658 case IB_CM_DREP_RECEIVED: 2659 pr_info("Received CM DREP message for ch %s-%d.\n", 2660 ch->sess_name, ch->qp->qp_num); 2661 srpt_close_ch(ch); 2662 break; 2663 case IB_CM_TIMEWAIT_EXIT: 2664 pr_info("Received CM TimeWait exit for ch %s-%d.\n", 2665 ch->sess_name, ch->qp->qp_num); 2666 srpt_close_ch(ch); 2667 break; 2668 case IB_CM_REP_ERROR: 2669 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name, 2670 ch->qp->qp_num); 2671 break; 2672 case IB_CM_DREQ_ERROR: 2673 pr_info("Received CM DREQ ERROR event.\n"); 2674 break; 2675 case IB_CM_MRA_RECEIVED: 2676 pr_info("Received CM MRA event\n"); 2677 break; 2678 default: 2679 pr_err("received unrecognized CM event %d\n", event->event); 2680 break; 2681 } 2682 2683 return ret; 2684 } 2685 2686 static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id, 2687 struct rdma_cm_event *event) 2688 { 2689 struct srpt_rdma_ch *ch = cm_id->context; 2690 int ret = 0; 2691 2692 switch (event->event) { 2693 case RDMA_CM_EVENT_CONNECT_REQUEST: 2694 ret = srpt_rdma_cm_req_recv(cm_id, event); 2695 break; 2696 case RDMA_CM_EVENT_REJECTED: 2697 srpt_cm_rej_recv(ch, event->status, 2698 event->param.conn.private_data, 2699 event->param.conn.private_data_len); 2700 break; 2701 case RDMA_CM_EVENT_ESTABLISHED: 2702 srpt_cm_rtu_recv(ch); 2703 break; 2704 case RDMA_CM_EVENT_DISCONNECTED: 2705 if (ch->state < CH_DISCONNECTING) 2706 srpt_disconnect_ch(ch); 2707 else 2708 srpt_close_ch(ch); 2709 break; 2710 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 2711 srpt_close_ch(ch); 2712 break; 2713 case RDMA_CM_EVENT_UNREACHABLE: 2714 pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name, 2715 ch->qp->qp_num); 2716 break; 2717 case RDMA_CM_EVENT_DEVICE_REMOVAL: 2718 case RDMA_CM_EVENT_ADDR_CHANGE: 2719 break; 2720 default: 2721 pr_err("received unrecognized RDMA CM event %d\n", 2722 event->event); 2723 break; 2724 } 2725 2726 return ret; 2727 } 2728 2729 /* 2730 * srpt_write_pending - Start data transfer from initiator to target (write). 2731 */ 2732 static int srpt_write_pending(struct se_cmd *se_cmd) 2733 { 2734 struct srpt_send_ioctx *ioctx = 2735 container_of(se_cmd, struct srpt_send_ioctx, cmd); 2736 struct srpt_rdma_ch *ch = ioctx->ch; 2737 struct ib_send_wr *first_wr = NULL; 2738 struct ib_cqe *cqe = &ioctx->rdma_cqe; 2739 enum srpt_command_state new_state; 2740 int ret, i; 2741 2742 if (ioctx->recv_ioctx) { 2743 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN); 2744 target_execute_cmd(&ioctx->cmd); 2745 return 0; 2746 } 2747 2748 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA); 2749 WARN_ON(new_state == SRPT_STATE_DONE); 2750 2751 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) { 2752 pr_warn("%s: IB send queue full (needed %d)\n", 2753 __func__, ioctx->n_rdma); 2754 ret = -ENOMEM; 2755 goto out_undo; 2756 } 2757 2758 cqe->done = srpt_rdma_read_done; 2759 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) { 2760 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; 2761 2762 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port, 2763 cqe, first_wr); 2764 cqe = NULL; 2765 } 2766 2767 ret = ib_post_send(ch->qp, first_wr, NULL); 2768 if (ret) { 2769 pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n", 2770 __func__, ret, ioctx->n_rdma, 2771 atomic_read(&ch->sq_wr_avail)); 2772 goto out_undo; 2773 } 2774 2775 return 0; 2776 out_undo: 2777 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail); 2778 return ret; 2779 } 2780 2781 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status) 2782 { 2783 switch (tcm_mgmt_status) { 2784 case TMR_FUNCTION_COMPLETE: 2785 return SRP_TSK_MGMT_SUCCESS; 2786 case TMR_FUNCTION_REJECTED: 2787 return SRP_TSK_MGMT_FUNC_NOT_SUPP; 2788 } 2789 return SRP_TSK_MGMT_FAILED; 2790 } 2791 2792 /** 2793 * srpt_queue_response - transmit the response to a SCSI command 2794 * @cmd: SCSI target command. 2795 * 2796 * Callback function called by the TCM core. Must not block since it can be 2797 * invoked on the context of the IB completion handler. 2798 */ 2799 static void srpt_queue_response(struct se_cmd *cmd) 2800 { 2801 struct srpt_send_ioctx *ioctx = 2802 container_of(cmd, struct srpt_send_ioctx, cmd); 2803 struct srpt_rdma_ch *ch = ioctx->ch; 2804 struct srpt_device *sdev = ch->sport->sdev; 2805 struct ib_send_wr send_wr, *first_wr = &send_wr; 2806 struct ib_sge sge; 2807 enum srpt_command_state state; 2808 int resp_len, ret, i; 2809 u8 srp_tm_status; 2810 2811 state = ioctx->state; 2812 switch (state) { 2813 case SRPT_STATE_NEW: 2814 case SRPT_STATE_DATA_IN: 2815 ioctx->state = SRPT_STATE_CMD_RSP_SENT; 2816 break; 2817 case SRPT_STATE_MGMT: 2818 ioctx->state = SRPT_STATE_MGMT_RSP_SENT; 2819 break; 2820 default: 2821 WARN(true, "ch %p; cmd %d: unexpected command state %d\n", 2822 ch, ioctx->ioctx.index, ioctx->state); 2823 break; 2824 } 2825 2826 if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)) 2827 return; 2828 2829 /* For read commands, transfer the data to the initiator. */ 2830 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE && 2831 ioctx->cmd.data_length && 2832 !ioctx->queue_status_only) { 2833 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) { 2834 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i]; 2835 2836 first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, 2837 ch->sport->port, NULL, first_wr); 2838 } 2839 } 2840 2841 if (state != SRPT_STATE_MGMT) 2842 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag, 2843 cmd->scsi_status); 2844 else { 2845 srp_tm_status 2846 = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response); 2847 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status, 2848 ioctx->cmd.tag); 2849 } 2850 2851 atomic_inc(&ch->req_lim); 2852 2853 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma, 2854 &ch->sq_wr_avail) < 0)) { 2855 pr_warn("%s: IB send queue full (needed %d)\n", 2856 __func__, ioctx->n_rdma); 2857 ret = -ENOMEM; 2858 goto out; 2859 } 2860 2861 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len, 2862 DMA_TO_DEVICE); 2863 2864 sge.addr = ioctx->ioctx.dma; 2865 sge.length = resp_len; 2866 sge.lkey = sdev->lkey; 2867 2868 ioctx->ioctx.cqe.done = srpt_send_done; 2869 send_wr.next = NULL; 2870 send_wr.wr_cqe = &ioctx->ioctx.cqe; 2871 send_wr.sg_list = &sge; 2872 send_wr.num_sge = 1; 2873 send_wr.opcode = IB_WR_SEND; 2874 send_wr.send_flags = IB_SEND_SIGNALED; 2875 2876 ret = ib_post_send(ch->qp, first_wr, NULL); 2877 if (ret < 0) { 2878 pr_err("%s: sending cmd response failed for tag %llu (%d)\n", 2879 __func__, ioctx->cmd.tag, ret); 2880 goto out; 2881 } 2882 2883 return; 2884 2885 out: 2886 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail); 2887 atomic_dec(&ch->req_lim); 2888 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE); 2889 target_put_sess_cmd(&ioctx->cmd); 2890 } 2891 2892 static int srpt_queue_data_in(struct se_cmd *cmd) 2893 { 2894 srpt_queue_response(cmd); 2895 return 0; 2896 } 2897 2898 static void srpt_queue_tm_rsp(struct se_cmd *cmd) 2899 { 2900 srpt_queue_response(cmd); 2901 } 2902 2903 /* 2904 * This function is called for aborted commands if no response is sent to the 2905 * initiator. Make sure that the credits freed by aborting a command are 2906 * returned to the initiator the next time a response is sent by incrementing 2907 * ch->req_lim_delta. 2908 */ 2909 static void srpt_aborted_task(struct se_cmd *cmd) 2910 { 2911 struct srpt_send_ioctx *ioctx = container_of(cmd, 2912 struct srpt_send_ioctx, cmd); 2913 struct srpt_rdma_ch *ch = ioctx->ch; 2914 2915 atomic_inc(&ch->req_lim_delta); 2916 } 2917 2918 static int srpt_queue_status(struct se_cmd *cmd) 2919 { 2920 struct srpt_send_ioctx *ioctx; 2921 2922 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd); 2923 BUG_ON(ioctx->sense_data != cmd->sense_buffer); 2924 if (cmd->se_cmd_flags & 2925 (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE)) 2926 WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION); 2927 ioctx->queue_status_only = true; 2928 srpt_queue_response(cmd); 2929 return 0; 2930 } 2931 2932 static void srpt_refresh_port_work(struct work_struct *work) 2933 { 2934 struct srpt_port *sport = container_of(work, struct srpt_port, work); 2935 2936 srpt_refresh_port(sport); 2937 } 2938 2939 /** 2940 * srpt_release_sport - disable login and wait for associated channels 2941 * @sport: SRPT HCA port. 2942 */ 2943 static int srpt_release_sport(struct srpt_port *sport) 2944 { 2945 DECLARE_COMPLETION_ONSTACK(c); 2946 struct srpt_nexus *nexus, *next_n; 2947 struct srpt_rdma_ch *ch; 2948 2949 WARN_ON_ONCE(irqs_disabled()); 2950 2951 sport->freed_channels = &c; 2952 2953 mutex_lock(&sport->mutex); 2954 srpt_set_enabled(sport, false); 2955 mutex_unlock(&sport->mutex); 2956 2957 while (atomic_read(&sport->refcount) > 0 && 2958 wait_for_completion_timeout(&c, 5 * HZ) <= 0) { 2959 pr_info("%s_%d: waiting for unregistration of %d sessions ...\n", 2960 dev_name(&sport->sdev->device->dev), sport->port, 2961 atomic_read(&sport->refcount)); 2962 rcu_read_lock(); 2963 list_for_each_entry(nexus, &sport->nexus_list, entry) { 2964 list_for_each_entry(ch, &nexus->ch_list, list) { 2965 pr_info("%s-%d: state %s\n", 2966 ch->sess_name, ch->qp->qp_num, 2967 get_ch_state_name(ch->state)); 2968 } 2969 } 2970 rcu_read_unlock(); 2971 } 2972 2973 mutex_lock(&sport->mutex); 2974 list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) { 2975 list_del(&nexus->entry); 2976 kfree_rcu(nexus, rcu); 2977 } 2978 mutex_unlock(&sport->mutex); 2979 2980 return 0; 2981 } 2982 2983 static struct se_wwn *__srpt_lookup_wwn(const char *name) 2984 { 2985 struct ib_device *dev; 2986 struct srpt_device *sdev; 2987 struct srpt_port *sport; 2988 int i; 2989 2990 list_for_each_entry(sdev, &srpt_dev_list, list) { 2991 dev = sdev->device; 2992 if (!dev) 2993 continue; 2994 2995 for (i = 0; i < dev->phys_port_cnt; i++) { 2996 sport = &sdev->port[i]; 2997 2998 if (strcmp(sport->port_guid_id.name, name) == 0) 2999 return &sport->port_guid_id.wwn; 3000 if (strcmp(sport->port_gid_id.name, name) == 0) 3001 return &sport->port_gid_id.wwn; 3002 } 3003 } 3004 3005 return NULL; 3006 } 3007 3008 static struct se_wwn *srpt_lookup_wwn(const char *name) 3009 { 3010 struct se_wwn *wwn; 3011 3012 spin_lock(&srpt_dev_lock); 3013 wwn = __srpt_lookup_wwn(name); 3014 spin_unlock(&srpt_dev_lock); 3015 3016 return wwn; 3017 } 3018 3019 static void srpt_free_srq(struct srpt_device *sdev) 3020 { 3021 if (!sdev->srq) 3022 return; 3023 3024 ib_destroy_srq(sdev->srq); 3025 srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev, 3026 sdev->srq_size, sdev->req_buf_cache, 3027 DMA_FROM_DEVICE); 3028 kmem_cache_destroy(sdev->req_buf_cache); 3029 sdev->srq = NULL; 3030 } 3031 3032 static int srpt_alloc_srq(struct srpt_device *sdev) 3033 { 3034 struct ib_srq_init_attr srq_attr = { 3035 .event_handler = srpt_srq_event, 3036 .srq_context = (void *)sdev, 3037 .attr.max_wr = sdev->srq_size, 3038 .attr.max_sge = 1, 3039 .srq_type = IB_SRQT_BASIC, 3040 }; 3041 struct ib_device *device = sdev->device; 3042 struct ib_srq *srq; 3043 int i; 3044 3045 WARN_ON_ONCE(sdev->srq); 3046 srq = ib_create_srq(sdev->pd, &srq_attr); 3047 if (IS_ERR(srq)) { 3048 pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq)); 3049 return PTR_ERR(srq); 3050 } 3051 3052 pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size, 3053 sdev->device->attrs.max_srq_wr, dev_name(&device->dev)); 3054 3055 sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf", 3056 srp_max_req_size, 0, 0, NULL); 3057 if (!sdev->req_buf_cache) 3058 goto free_srq; 3059 3060 sdev->ioctx_ring = (struct srpt_recv_ioctx **) 3061 srpt_alloc_ioctx_ring(sdev, sdev->srq_size, 3062 sizeof(*sdev->ioctx_ring[0]), 3063 sdev->req_buf_cache, 0, DMA_FROM_DEVICE); 3064 if (!sdev->ioctx_ring) 3065 goto free_cache; 3066 3067 sdev->use_srq = true; 3068 sdev->srq = srq; 3069 3070 for (i = 0; i < sdev->srq_size; ++i) { 3071 INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list); 3072 srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]); 3073 } 3074 3075 return 0; 3076 3077 free_cache: 3078 kmem_cache_destroy(sdev->req_buf_cache); 3079 3080 free_srq: 3081 ib_destroy_srq(srq); 3082 return -ENOMEM; 3083 } 3084 3085 static int srpt_use_srq(struct srpt_device *sdev, bool use_srq) 3086 { 3087 struct ib_device *device = sdev->device; 3088 int ret = 0; 3089 3090 if (!use_srq) { 3091 srpt_free_srq(sdev); 3092 sdev->use_srq = false; 3093 } else if (use_srq && !sdev->srq) { 3094 ret = srpt_alloc_srq(sdev); 3095 } 3096 pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, 3097 dev_name(&device->dev), sdev->use_srq, ret); 3098 return ret; 3099 } 3100 3101 /** 3102 * srpt_add_one - InfiniBand device addition callback function 3103 * @device: Describes a HCA. 3104 */ 3105 static int srpt_add_one(struct ib_device *device) 3106 { 3107 struct srpt_device *sdev; 3108 struct srpt_port *sport; 3109 int i, ret; 3110 3111 pr_debug("device = %p\n", device); 3112 3113 sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt), 3114 GFP_KERNEL); 3115 if (!sdev) 3116 return -ENOMEM; 3117 3118 sdev->device = device; 3119 mutex_init(&sdev->sdev_mutex); 3120 3121 sdev->pd = ib_alloc_pd(device, 0); 3122 if (IS_ERR(sdev->pd)) { 3123 ret = PTR_ERR(sdev->pd); 3124 goto free_dev; 3125 } 3126 3127 sdev->lkey = sdev->pd->local_dma_lkey; 3128 3129 sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr); 3130 3131 srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq); 3132 3133 if (!srpt_service_guid) 3134 srpt_service_guid = be64_to_cpu(device->node_guid); 3135 3136 if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND) 3137 sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev); 3138 if (IS_ERR(sdev->cm_id)) { 3139 pr_info("ib_create_cm_id() failed: %ld\n", 3140 PTR_ERR(sdev->cm_id)); 3141 ret = PTR_ERR(sdev->cm_id); 3142 sdev->cm_id = NULL; 3143 if (!rdma_cm_id) 3144 goto err_ring; 3145 } 3146 3147 /* print out target login information */ 3148 pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n", 3149 srpt_service_guid, srpt_service_guid, srpt_service_guid); 3150 3151 /* 3152 * We do not have a consistent service_id (ie. also id_ext of target_id) 3153 * to identify this target. We currently use the guid of the first HCA 3154 * in the system as service_id; therefore, the target_id will change 3155 * if this HCA is gone bad and replaced by different HCA 3156 */ 3157 ret = sdev->cm_id ? 3158 ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0) : 3159 0; 3160 if (ret < 0) { 3161 pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret, 3162 sdev->cm_id->state); 3163 goto err_cm; 3164 } 3165 3166 INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, 3167 srpt_event_handler); 3168 ib_register_event_handler(&sdev->event_handler); 3169 3170 for (i = 1; i <= sdev->device->phys_port_cnt; i++) { 3171 sport = &sdev->port[i - 1]; 3172 INIT_LIST_HEAD(&sport->nexus_list); 3173 mutex_init(&sport->mutex); 3174 sport->sdev = sdev; 3175 sport->port = i; 3176 sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE; 3177 sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE; 3178 sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE; 3179 sport->port_attrib.use_srq = false; 3180 INIT_WORK(&sport->work, srpt_refresh_port_work); 3181 mutex_init(&sport->port_guid_id.mutex); 3182 INIT_LIST_HEAD(&sport->port_guid_id.tpg_list); 3183 mutex_init(&sport->port_gid_id.mutex); 3184 INIT_LIST_HEAD(&sport->port_gid_id.tpg_list); 3185 3186 ret = srpt_refresh_port(sport); 3187 if (ret) { 3188 pr_err("MAD registration failed for %s-%d.\n", 3189 dev_name(&sdev->device->dev), i); 3190 goto err_event; 3191 } 3192 } 3193 3194 spin_lock(&srpt_dev_lock); 3195 list_add_tail(&sdev->list, &srpt_dev_list); 3196 spin_unlock(&srpt_dev_lock); 3197 3198 ib_set_client_data(device, &srpt_client, sdev); 3199 pr_debug("added %s.\n", dev_name(&device->dev)); 3200 return 0; 3201 3202 err_event: 3203 ib_unregister_event_handler(&sdev->event_handler); 3204 err_cm: 3205 if (sdev->cm_id) 3206 ib_destroy_cm_id(sdev->cm_id); 3207 err_ring: 3208 srpt_free_srq(sdev); 3209 ib_dealloc_pd(sdev->pd); 3210 free_dev: 3211 kfree(sdev); 3212 pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev)); 3213 return ret; 3214 } 3215 3216 /** 3217 * srpt_remove_one - InfiniBand device removal callback function 3218 * @device: Describes a HCA. 3219 * @client_data: The value passed as the third argument to ib_set_client_data(). 3220 */ 3221 static void srpt_remove_one(struct ib_device *device, void *client_data) 3222 { 3223 struct srpt_device *sdev = client_data; 3224 int i; 3225 3226 srpt_unregister_mad_agent(sdev); 3227 3228 ib_unregister_event_handler(&sdev->event_handler); 3229 3230 /* Cancel any work queued by the just unregistered IB event handler. */ 3231 for (i = 0; i < sdev->device->phys_port_cnt; i++) 3232 cancel_work_sync(&sdev->port[i].work); 3233 3234 if (sdev->cm_id) 3235 ib_destroy_cm_id(sdev->cm_id); 3236 3237 ib_set_client_data(device, &srpt_client, NULL); 3238 3239 /* 3240 * Unregistering a target must happen after destroying sdev->cm_id 3241 * such that no new SRP_LOGIN_REQ information units can arrive while 3242 * destroying the target. 3243 */ 3244 spin_lock(&srpt_dev_lock); 3245 list_del(&sdev->list); 3246 spin_unlock(&srpt_dev_lock); 3247 3248 for (i = 0; i < sdev->device->phys_port_cnt; i++) 3249 srpt_release_sport(&sdev->port[i]); 3250 3251 srpt_free_srq(sdev); 3252 3253 ib_dealloc_pd(sdev->pd); 3254 3255 kfree(sdev); 3256 } 3257 3258 static struct ib_client srpt_client = { 3259 .name = DRV_NAME, 3260 .add = srpt_add_one, 3261 .remove = srpt_remove_one 3262 }; 3263 3264 static int srpt_check_true(struct se_portal_group *se_tpg) 3265 { 3266 return 1; 3267 } 3268 3269 static int srpt_check_false(struct se_portal_group *se_tpg) 3270 { 3271 return 0; 3272 } 3273 3274 static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg) 3275 { 3276 return tpg->se_tpg_wwn->priv; 3277 } 3278 3279 static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn) 3280 { 3281 struct srpt_port *sport = wwn->priv; 3282 3283 if (wwn == &sport->port_guid_id.wwn) 3284 return &sport->port_guid_id; 3285 if (wwn == &sport->port_gid_id.wwn) 3286 return &sport->port_gid_id; 3287 WARN_ON_ONCE(true); 3288 return NULL; 3289 } 3290 3291 static char *srpt_get_fabric_wwn(struct se_portal_group *tpg) 3292 { 3293 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg); 3294 3295 return stpg->sport_id->name; 3296 } 3297 3298 static u16 srpt_get_tag(struct se_portal_group *tpg) 3299 { 3300 return 1; 3301 } 3302 3303 static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg) 3304 { 3305 return 1; 3306 } 3307 3308 static void srpt_release_cmd(struct se_cmd *se_cmd) 3309 { 3310 struct srpt_send_ioctx *ioctx = container_of(se_cmd, 3311 struct srpt_send_ioctx, cmd); 3312 struct srpt_rdma_ch *ch = ioctx->ch; 3313 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx; 3314 3315 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE && 3316 !(ioctx->cmd.transport_state & CMD_T_ABORTED)); 3317 3318 if (recv_ioctx) { 3319 WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list)); 3320 ioctx->recv_ioctx = NULL; 3321 srpt_post_recv(ch->sport->sdev, ch, recv_ioctx); 3322 } 3323 3324 if (ioctx->n_rw_ctx) { 3325 srpt_free_rw_ctxs(ch, ioctx); 3326 ioctx->n_rw_ctx = 0; 3327 } 3328 3329 target_free_tag(se_cmd->se_sess, se_cmd); 3330 } 3331 3332 /** 3333 * srpt_close_session - forcibly close a session 3334 * @se_sess: SCSI target session. 3335 * 3336 * Callback function invoked by the TCM core to clean up sessions associated 3337 * with a node ACL when the user invokes 3338 * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3339 */ 3340 static void srpt_close_session(struct se_session *se_sess) 3341 { 3342 struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; 3343 3344 srpt_disconnect_ch_sync(ch); 3345 } 3346 3347 /** 3348 * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB) 3349 * @se_sess: SCSI target session. 3350 * 3351 * A quote from RFC 4455 (SCSI-MIB) about this MIB object: 3352 * This object represents an arbitrary integer used to uniquely identify a 3353 * particular attached remote initiator port to a particular SCSI target port 3354 * within a particular SCSI target device within a particular SCSI instance. 3355 */ 3356 static u32 srpt_sess_get_index(struct se_session *se_sess) 3357 { 3358 return 0; 3359 } 3360 3361 static void srpt_set_default_node_attrs(struct se_node_acl *nacl) 3362 { 3363 } 3364 3365 /* Note: only used from inside debug printk's by the TCM core. */ 3366 static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd) 3367 { 3368 struct srpt_send_ioctx *ioctx; 3369 3370 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); 3371 return ioctx->state; 3372 } 3373 3374 static int srpt_parse_guid(u64 *guid, const char *name) 3375 { 3376 u16 w[4]; 3377 int ret = -EINVAL; 3378 3379 if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4) 3380 goto out; 3381 *guid = get_unaligned_be64(w); 3382 ret = 0; 3383 out: 3384 return ret; 3385 } 3386 3387 /** 3388 * srpt_parse_i_port_id - parse an initiator port ID 3389 * @name: ASCII representation of a 128-bit initiator port ID. 3390 * @i_port_id: Binary 128-bit port ID. 3391 */ 3392 static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name) 3393 { 3394 const char *p; 3395 unsigned len, count, leading_zero_bytes; 3396 int ret; 3397 3398 p = name; 3399 if (strncasecmp(p, "0x", 2) == 0) 3400 p += 2; 3401 ret = -EINVAL; 3402 len = strlen(p); 3403 if (len % 2) 3404 goto out; 3405 count = min(len / 2, 16U); 3406 leading_zero_bytes = 16 - count; 3407 memset(i_port_id, 0, leading_zero_bytes); 3408 ret = hex2bin(i_port_id + leading_zero_bytes, p, count); 3409 3410 out: 3411 return ret; 3412 } 3413 3414 /* 3415 * configfs callback function invoked for mkdir 3416 * /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id 3417 * 3418 * i_port_id must be an initiator port GUID, GID or IP address. See also the 3419 * target_alloc_session() calls in this driver. Examples of valid initiator 3420 * port IDs: 3421 * 0x0000000000000000505400fffe4a0b7b 3422 * 0000000000000000505400fffe4a0b7b 3423 * 5054:00ff:fe4a:0b7b 3424 * 192.168.122.76 3425 */ 3426 static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 3427 { 3428 struct sockaddr_storage sa; 3429 u64 guid; 3430 u8 i_port_id[16]; 3431 int ret; 3432 3433 ret = srpt_parse_guid(&guid, name); 3434 if (ret < 0) 3435 ret = srpt_parse_i_port_id(i_port_id, name); 3436 if (ret < 0) 3437 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL, 3438 &sa); 3439 if (ret < 0) 3440 pr_err("invalid initiator port ID %s\n", name); 3441 return ret; 3442 } 3443 3444 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item, 3445 char *page) 3446 { 3447 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3448 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3449 3450 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size); 3451 } 3452 3453 static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item, 3454 const char *page, size_t count) 3455 { 3456 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3457 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3458 unsigned long val; 3459 int ret; 3460 3461 ret = kstrtoul(page, 0, &val); 3462 if (ret < 0) { 3463 pr_err("kstrtoul() failed with ret: %d\n", ret); 3464 return -EINVAL; 3465 } 3466 if (val > MAX_SRPT_RDMA_SIZE) { 3467 pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val, 3468 MAX_SRPT_RDMA_SIZE); 3469 return -EINVAL; 3470 } 3471 if (val < DEFAULT_MAX_RDMA_SIZE) { 3472 pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n", 3473 val, DEFAULT_MAX_RDMA_SIZE); 3474 return -EINVAL; 3475 } 3476 sport->port_attrib.srp_max_rdma_size = val; 3477 3478 return count; 3479 } 3480 3481 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item, 3482 char *page) 3483 { 3484 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3485 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3486 3487 return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size); 3488 } 3489 3490 static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item, 3491 const char *page, size_t count) 3492 { 3493 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3494 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3495 unsigned long val; 3496 int ret; 3497 3498 ret = kstrtoul(page, 0, &val); 3499 if (ret < 0) { 3500 pr_err("kstrtoul() failed with ret: %d\n", ret); 3501 return -EINVAL; 3502 } 3503 if (val > MAX_SRPT_RSP_SIZE) { 3504 pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val, 3505 MAX_SRPT_RSP_SIZE); 3506 return -EINVAL; 3507 } 3508 if (val < MIN_MAX_RSP_SIZE) { 3509 pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val, 3510 MIN_MAX_RSP_SIZE); 3511 return -EINVAL; 3512 } 3513 sport->port_attrib.srp_max_rsp_size = val; 3514 3515 return count; 3516 } 3517 3518 static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item, 3519 char *page) 3520 { 3521 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3522 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3523 3524 return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size); 3525 } 3526 3527 static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item, 3528 const char *page, size_t count) 3529 { 3530 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3531 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3532 unsigned long val; 3533 int ret; 3534 3535 ret = kstrtoul(page, 0, &val); 3536 if (ret < 0) { 3537 pr_err("kstrtoul() failed with ret: %d\n", ret); 3538 return -EINVAL; 3539 } 3540 if (val > MAX_SRPT_SRQ_SIZE) { 3541 pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val, 3542 MAX_SRPT_SRQ_SIZE); 3543 return -EINVAL; 3544 } 3545 if (val < MIN_SRPT_SRQ_SIZE) { 3546 pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val, 3547 MIN_SRPT_SRQ_SIZE); 3548 return -EINVAL; 3549 } 3550 sport->port_attrib.srp_sq_size = val; 3551 3552 return count; 3553 } 3554 3555 static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item, 3556 char *page) 3557 { 3558 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3559 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3560 3561 return sprintf(page, "%d\n", sport->port_attrib.use_srq); 3562 } 3563 3564 static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item, 3565 const char *page, size_t count) 3566 { 3567 struct se_portal_group *se_tpg = attrib_to_tpg(item); 3568 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3569 struct srpt_device *sdev = sport->sdev; 3570 unsigned long val; 3571 bool enabled; 3572 int ret; 3573 3574 ret = kstrtoul(page, 0, &val); 3575 if (ret < 0) 3576 return ret; 3577 if (val != !!val) 3578 return -EINVAL; 3579 3580 ret = mutex_lock_interruptible(&sdev->sdev_mutex); 3581 if (ret < 0) 3582 return ret; 3583 ret = mutex_lock_interruptible(&sport->mutex); 3584 if (ret < 0) 3585 goto unlock_sdev; 3586 enabled = sport->enabled; 3587 /* Log out all initiator systems before changing 'use_srq'. */ 3588 srpt_set_enabled(sport, false); 3589 sport->port_attrib.use_srq = val; 3590 srpt_use_srq(sdev, sport->port_attrib.use_srq); 3591 srpt_set_enabled(sport, enabled); 3592 ret = count; 3593 mutex_unlock(&sport->mutex); 3594 unlock_sdev: 3595 mutex_unlock(&sdev->sdev_mutex); 3596 3597 return ret; 3598 } 3599 3600 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size); 3601 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size); 3602 CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size); 3603 CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq); 3604 3605 static struct configfs_attribute *srpt_tpg_attrib_attrs[] = { 3606 &srpt_tpg_attrib_attr_srp_max_rdma_size, 3607 &srpt_tpg_attrib_attr_srp_max_rsp_size, 3608 &srpt_tpg_attrib_attr_srp_sq_size, 3609 &srpt_tpg_attrib_attr_use_srq, 3610 NULL, 3611 }; 3612 3613 static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr) 3614 { 3615 struct rdma_cm_id *rdma_cm_id; 3616 int ret; 3617 3618 rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler, 3619 NULL, RDMA_PS_TCP, IB_QPT_RC); 3620 if (IS_ERR(rdma_cm_id)) { 3621 pr_err("RDMA/CM ID creation failed: %ld\n", 3622 PTR_ERR(rdma_cm_id)); 3623 goto out; 3624 } 3625 3626 ret = rdma_bind_addr(rdma_cm_id, listen_addr); 3627 if (ret) { 3628 char addr_str[64]; 3629 3630 snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr); 3631 pr_err("Binding RDMA/CM ID to address %s failed: %d\n", 3632 addr_str, ret); 3633 rdma_destroy_id(rdma_cm_id); 3634 rdma_cm_id = ERR_PTR(ret); 3635 goto out; 3636 } 3637 3638 ret = rdma_listen(rdma_cm_id, 128); 3639 if (ret) { 3640 pr_err("rdma_listen() failed: %d\n", ret); 3641 rdma_destroy_id(rdma_cm_id); 3642 rdma_cm_id = ERR_PTR(ret); 3643 } 3644 3645 out: 3646 return rdma_cm_id; 3647 } 3648 3649 static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page) 3650 { 3651 return sprintf(page, "%d\n", rdma_cm_port); 3652 } 3653 3654 static ssize_t srpt_rdma_cm_port_store(struct config_item *item, 3655 const char *page, size_t count) 3656 { 3657 struct sockaddr_in addr4 = { .sin_family = AF_INET }; 3658 struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 }; 3659 struct rdma_cm_id *new_id = NULL; 3660 u16 val; 3661 int ret; 3662 3663 ret = kstrtou16(page, 0, &val); 3664 if (ret < 0) 3665 return ret; 3666 ret = count; 3667 if (rdma_cm_port == val) 3668 goto out; 3669 3670 if (val) { 3671 addr6.sin6_port = cpu_to_be16(val); 3672 new_id = srpt_create_rdma_id((struct sockaddr *)&addr6); 3673 if (IS_ERR(new_id)) { 3674 addr4.sin_port = cpu_to_be16(val); 3675 new_id = srpt_create_rdma_id((struct sockaddr *)&addr4); 3676 if (IS_ERR(new_id)) { 3677 ret = PTR_ERR(new_id); 3678 goto out; 3679 } 3680 } 3681 } 3682 3683 mutex_lock(&rdma_cm_mutex); 3684 rdma_cm_port = val; 3685 swap(rdma_cm_id, new_id); 3686 mutex_unlock(&rdma_cm_mutex); 3687 3688 if (new_id) 3689 rdma_destroy_id(new_id); 3690 ret = count; 3691 out: 3692 return ret; 3693 } 3694 3695 CONFIGFS_ATTR(srpt_, rdma_cm_port); 3696 3697 static struct configfs_attribute *srpt_da_attrs[] = { 3698 &srpt_attr_rdma_cm_port, 3699 NULL, 3700 }; 3701 3702 static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) 3703 { 3704 struct se_portal_group *se_tpg = to_tpg(item); 3705 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3706 3707 return snprintf(page, PAGE_SIZE, "%d\n", sport->enabled); 3708 } 3709 3710 static ssize_t srpt_tpg_enable_store(struct config_item *item, 3711 const char *page, size_t count) 3712 { 3713 struct se_portal_group *se_tpg = to_tpg(item); 3714 struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); 3715 unsigned long tmp; 3716 int ret; 3717 3718 ret = kstrtoul(page, 0, &tmp); 3719 if (ret < 0) { 3720 pr_err("Unable to extract srpt_tpg_store_enable\n"); 3721 return -EINVAL; 3722 } 3723 3724 if ((tmp != 0) && (tmp != 1)) { 3725 pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); 3726 return -EINVAL; 3727 } 3728 3729 mutex_lock(&sport->mutex); 3730 srpt_set_enabled(sport, tmp); 3731 mutex_unlock(&sport->mutex); 3732 3733 return count; 3734 } 3735 3736 CONFIGFS_ATTR(srpt_tpg_, enable); 3737 3738 static struct configfs_attribute *srpt_tpg_attrs[] = { 3739 &srpt_tpg_attr_enable, 3740 NULL, 3741 }; 3742 3743 /** 3744 * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg 3745 * @wwn: Corresponds to $driver/$port. 3746 * @name: $tpg. 3747 */ 3748 static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, 3749 const char *name) 3750 { 3751 struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn); 3752 struct srpt_tpg *stpg; 3753 int res = -ENOMEM; 3754 3755 stpg = kzalloc(sizeof(*stpg), GFP_KERNEL); 3756 if (!stpg) 3757 return ERR_PTR(res); 3758 stpg->sport_id = sport_id; 3759 res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP); 3760 if (res) { 3761 kfree(stpg); 3762 return ERR_PTR(res); 3763 } 3764 3765 mutex_lock(&sport_id->mutex); 3766 list_add_tail(&stpg->entry, &sport_id->tpg_list); 3767 mutex_unlock(&sport_id->mutex); 3768 3769 return &stpg->tpg; 3770 } 3771 3772 /** 3773 * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg 3774 * @tpg: Target portal group to deregister. 3775 */ 3776 static void srpt_drop_tpg(struct se_portal_group *tpg) 3777 { 3778 struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg); 3779 struct srpt_port_id *sport_id = stpg->sport_id; 3780 struct srpt_port *sport = srpt_tpg_to_sport(tpg); 3781 3782 mutex_lock(&sport_id->mutex); 3783 list_del(&stpg->entry); 3784 mutex_unlock(&sport_id->mutex); 3785 3786 sport->enabled = false; 3787 core_tpg_deregister(tpg); 3788 kfree(stpg); 3789 } 3790 3791 /** 3792 * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port 3793 * @tf: Not used. 3794 * @group: Not used. 3795 * @name: $port. 3796 */ 3797 static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf, 3798 struct config_group *group, 3799 const char *name) 3800 { 3801 return srpt_lookup_wwn(name) ? : ERR_PTR(-EINVAL); 3802 } 3803 3804 /** 3805 * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port 3806 * @wwn: $port. 3807 */ 3808 static void srpt_drop_tport(struct se_wwn *wwn) 3809 { 3810 } 3811 3812 static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf) 3813 { 3814 return scnprintf(buf, PAGE_SIZE, "\n"); 3815 } 3816 3817 CONFIGFS_ATTR_RO(srpt_wwn_, version); 3818 3819 static struct configfs_attribute *srpt_wwn_attrs[] = { 3820 &srpt_wwn_attr_version, 3821 NULL, 3822 }; 3823 3824 static const struct target_core_fabric_ops srpt_template = { 3825 .module = THIS_MODULE, 3826 .fabric_name = "srpt", 3827 .tpg_get_wwn = srpt_get_fabric_wwn, 3828 .tpg_get_tag = srpt_get_tag, 3829 .tpg_check_demo_mode = srpt_check_false, 3830 .tpg_check_demo_mode_cache = srpt_check_true, 3831 .tpg_check_demo_mode_write_protect = srpt_check_true, 3832 .tpg_check_prod_mode_write_protect = srpt_check_false, 3833 .tpg_get_inst_index = srpt_tpg_get_inst_index, 3834 .release_cmd = srpt_release_cmd, 3835 .check_stop_free = srpt_check_stop_free, 3836 .close_session = srpt_close_session, 3837 .sess_get_index = srpt_sess_get_index, 3838 .sess_get_initiator_sid = NULL, 3839 .write_pending = srpt_write_pending, 3840 .set_default_node_attributes = srpt_set_default_node_attrs, 3841 .get_cmd_state = srpt_get_tcm_cmd_state, 3842 .queue_data_in = srpt_queue_data_in, 3843 .queue_status = srpt_queue_status, 3844 .queue_tm_rsp = srpt_queue_tm_rsp, 3845 .aborted_task = srpt_aborted_task, 3846 /* 3847 * Setup function pointers for generic logic in 3848 * target_core_fabric_configfs.c 3849 */ 3850 .fabric_make_wwn = srpt_make_tport, 3851 .fabric_drop_wwn = srpt_drop_tport, 3852 .fabric_make_tpg = srpt_make_tpg, 3853 .fabric_drop_tpg = srpt_drop_tpg, 3854 .fabric_init_nodeacl = srpt_init_nodeacl, 3855 3856 .tfc_discovery_attrs = srpt_da_attrs, 3857 .tfc_wwn_attrs = srpt_wwn_attrs, 3858 .tfc_tpg_base_attrs = srpt_tpg_attrs, 3859 .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, 3860 }; 3861 3862 /** 3863 * srpt_init_module - kernel module initialization 3864 * 3865 * Note: Since ib_register_client() registers callback functions, and since at 3866 * least one of these callback functions (srpt_add_one()) calls target core 3867 * functions, this driver must be registered with the target core before 3868 * ib_register_client() is called. 3869 */ 3870 static int __init srpt_init_module(void) 3871 { 3872 int ret; 3873 3874 ret = -EINVAL; 3875 if (srp_max_req_size < MIN_MAX_REQ_SIZE) { 3876 pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n", 3877 srp_max_req_size, MIN_MAX_REQ_SIZE); 3878 goto out; 3879 } 3880 3881 if (srpt_srq_size < MIN_SRPT_SRQ_SIZE 3882 || srpt_srq_size > MAX_SRPT_SRQ_SIZE) { 3883 pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n", 3884 srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE); 3885 goto out; 3886 } 3887 3888 ret = target_register_template(&srpt_template); 3889 if (ret) 3890 goto out; 3891 3892 ret = ib_register_client(&srpt_client); 3893 if (ret) { 3894 pr_err("couldn't register IB client\n"); 3895 goto out_unregister_target; 3896 } 3897 3898 return 0; 3899 3900 out_unregister_target: 3901 target_unregister_template(&srpt_template); 3902 out: 3903 return ret; 3904 } 3905 3906 static void __exit srpt_cleanup_module(void) 3907 { 3908 if (rdma_cm_id) 3909 rdma_destroy_id(rdma_cm_id); 3910 ib_unregister_client(&srpt_client); 3911 target_unregister_template(&srpt_template); 3912 } 3913 3914 module_init(srpt_init_module); 3915 module_exit(srpt_cleanup_module); 3916