1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 #include <linux/kernel.h> 34 #include <linux/module.h> 35 #include <linux/slab.h> 36 #include <linux/delay.h> 37 38 #include "iscsi_iser.h" 39 40 #define ISCSI_ISER_MAX_CONN 8 41 #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) 42 #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) 43 44 static void iser_cq_tasklet_fn(unsigned long data); 45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 46 47 static void iser_cq_event_callback(struct ib_event *cause, void *context) 48 { 49 iser_err("got cq event %d \n", cause->event); 50 } 51 52 static void iser_qp_event_callback(struct ib_event *cause, void *context) 53 { 54 iser_err("got qp event %d\n",cause->event); 55 } 56 57 static void iser_event_handler(struct ib_event_handler *handler, 58 struct ib_event *event) 59 { 60 iser_err("async event %d on device %s port %d\n", event->event, 61 event->device->name, event->element.port_num); 62 } 63 64 /** 65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion 66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with 67 * the adapator. 68 * 69 * returns 0 on success, -1 on failure 70 */ 71 static int iser_create_device_ib_res(struct iser_device *device) 72 { 73 device->pd = ib_alloc_pd(device->ib_device); 74 if (IS_ERR(device->pd)) 75 goto pd_err; 76 77 device->rx_cq = ib_create_cq(device->ib_device, 78 iser_cq_callback, 79 iser_cq_event_callback, 80 (void *)device, 81 ISER_MAX_RX_CQ_LEN, 0); 82 if (IS_ERR(device->rx_cq)) 83 goto rx_cq_err; 84 85 device->tx_cq = ib_create_cq(device->ib_device, 86 NULL, iser_cq_event_callback, 87 (void *)device, 88 ISER_MAX_TX_CQ_LEN, 0); 89 90 if (IS_ERR(device->tx_cq)) 91 goto tx_cq_err; 92 93 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) 94 goto cq_arm_err; 95 96 tasklet_init(&device->cq_tasklet, 97 iser_cq_tasklet_fn, 98 (unsigned long)device); 99 100 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | 101 IB_ACCESS_REMOTE_WRITE | 102 IB_ACCESS_REMOTE_READ); 103 if (IS_ERR(device->mr)) 104 goto dma_mr_err; 105 106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, 107 iser_event_handler); 108 if (ib_register_event_handler(&device->event_handler)) 109 goto handler_err; 110 111 return 0; 112 113 handler_err: 114 ib_dereg_mr(device->mr); 115 dma_mr_err: 116 tasklet_kill(&device->cq_tasklet); 117 cq_arm_err: 118 ib_destroy_cq(device->tx_cq); 119 tx_cq_err: 120 ib_destroy_cq(device->rx_cq); 121 rx_cq_err: 122 ib_dealloc_pd(device->pd); 123 pd_err: 124 iser_err("failed to allocate an IB resource\n"); 125 return -1; 126 } 127 128 /** 129 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR, 130 * CQ and PD created with the device associated with the adapator. 131 */ 132 static void iser_free_device_ib_res(struct iser_device *device) 133 { 134 BUG_ON(device->mr == NULL); 135 136 tasklet_kill(&device->cq_tasklet); 137 (void)ib_unregister_event_handler(&device->event_handler); 138 (void)ib_dereg_mr(device->mr); 139 (void)ib_destroy_cq(device->tx_cq); 140 (void)ib_destroy_cq(device->rx_cq); 141 (void)ib_dealloc_pd(device->pd); 142 143 device->mr = NULL; 144 device->tx_cq = NULL; 145 device->rx_cq = NULL; 146 device->pd = NULL; 147 } 148 149 /** 150 * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP) 151 * 152 * returns 0 on success, -1 on failure 153 */ 154 static int iser_create_ib_conn_res(struct iser_conn *ib_conn) 155 { 156 struct iser_device *device; 157 struct ib_qp_init_attr init_attr; 158 int ret = -ENOMEM; 159 struct ib_fmr_pool_param params; 160 161 BUG_ON(ib_conn->device == NULL); 162 163 device = ib_conn->device; 164 165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); 166 if (!ib_conn->login_buf) 167 goto out_err; 168 169 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, 170 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, 171 DMA_FROM_DEVICE); 172 173 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + 174 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), 175 GFP_KERNEL); 176 if (!ib_conn->page_vec) 177 goto out_err; 178 179 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); 180 181 params.page_shift = SHIFT_4K; 182 /* when the first/last SG element are not start/end * 183 * page aligned, the map whould be of N+1 pages */ 184 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; 185 /* make the pool size twice the max number of SCSI commands * 186 * the ML is expected to queue, watermark for unmap at 50% */ 187 params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2; 188 params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX; 189 params.cache = 0; 190 params.flush_function = NULL; 191 params.access = (IB_ACCESS_LOCAL_WRITE | 192 IB_ACCESS_REMOTE_WRITE | 193 IB_ACCESS_REMOTE_READ); 194 195 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); 196 if (IS_ERR(ib_conn->fmr_pool)) { 197 ret = PTR_ERR(ib_conn->fmr_pool); 198 ib_conn->fmr_pool = NULL; 199 goto out_err; 200 } 201 202 memset(&init_attr, 0, sizeof init_attr); 203 204 init_attr.event_handler = iser_qp_event_callback; 205 init_attr.qp_context = (void *)ib_conn; 206 init_attr.send_cq = device->tx_cq; 207 init_attr.recv_cq = device->rx_cq; 208 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 209 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 210 init_attr.cap.max_send_sge = 2; 211 init_attr.cap.max_recv_sge = 1; 212 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 213 init_attr.qp_type = IB_QPT_RC; 214 215 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 216 if (ret) 217 goto out_err; 218 219 ib_conn->qp = ib_conn->cma_id->qp; 220 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", 221 ib_conn, ib_conn->cma_id, 222 ib_conn->fmr_pool, ib_conn->cma_id->qp); 223 return ret; 224 225 out_err: 226 iser_err("unable to alloc mem or create resource, err %d\n", ret); 227 return ret; 228 } 229 230 /** 231 * releases the FMR pool, QP and CMA ID objects, returns 0 on success, 232 * -1 on failure 233 */ 234 static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) 235 { 236 BUG_ON(ib_conn == NULL); 237 238 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", 239 ib_conn, ib_conn->cma_id, 240 ib_conn->fmr_pool, ib_conn->qp); 241 242 /* qp is created only once both addr & route are resolved */ 243 if (ib_conn->fmr_pool != NULL) 244 ib_destroy_fmr_pool(ib_conn->fmr_pool); 245 246 if (ib_conn->qp != NULL) 247 rdma_destroy_qp(ib_conn->cma_id); 248 249 /* if cma handler context, the caller acts s.t the cma destroy the id */ 250 if (ib_conn->cma_id != NULL && can_destroy_id) 251 rdma_destroy_id(ib_conn->cma_id); 252 253 ib_conn->fmr_pool = NULL; 254 ib_conn->qp = NULL; 255 ib_conn->cma_id = NULL; 256 kfree(ib_conn->page_vec); 257 258 return 0; 259 } 260 261 /** 262 * based on the resolved device node GUID see if there already allocated 263 * device for this device. If there's no such, create one. 264 */ 265 static 266 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 267 { 268 struct iser_device *device; 269 270 mutex_lock(&ig.device_list_mutex); 271 272 list_for_each_entry(device, &ig.device_list, ig_list) 273 /* find if there's a match using the node GUID */ 274 if (device->ib_device->node_guid == cma_id->device->node_guid) 275 goto inc_refcnt; 276 277 device = kzalloc(sizeof *device, GFP_KERNEL); 278 if (device == NULL) 279 goto out; 280 281 /* assign this device to the device */ 282 device->ib_device = cma_id->device; 283 /* init the device and link it into ig device list */ 284 if (iser_create_device_ib_res(device)) { 285 kfree(device); 286 device = NULL; 287 goto out; 288 } 289 list_add(&device->ig_list, &ig.device_list); 290 291 inc_refcnt: 292 device->refcount++; 293 out: 294 mutex_unlock(&ig.device_list_mutex); 295 return device; 296 } 297 298 /* if there's no demand for this device, release it */ 299 static void iser_device_try_release(struct iser_device *device) 300 { 301 mutex_lock(&ig.device_list_mutex); 302 device->refcount--; 303 iser_err("device %p refcount %d\n",device,device->refcount); 304 if (!device->refcount) { 305 iser_free_device_ib_res(device); 306 list_del(&device->ig_list); 307 kfree(device); 308 } 309 mutex_unlock(&ig.device_list_mutex); 310 } 311 312 static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, 313 enum iser_ib_conn_state comp, 314 enum iser_ib_conn_state exch) 315 { 316 int ret; 317 318 spin_lock_bh(&ib_conn->lock); 319 if ((ret = (ib_conn->state == comp))) 320 ib_conn->state = exch; 321 spin_unlock_bh(&ib_conn->lock); 322 return ret; 323 } 324 325 /** 326 * Frees all conn objects and deallocs conn descriptor 327 */ 328 static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) 329 { 330 struct iser_device *device = ib_conn->device; 331 332 BUG_ON(ib_conn->state != ISER_CONN_DOWN); 333 334 mutex_lock(&ig.connlist_mutex); 335 list_del(&ib_conn->conn_list); 336 mutex_unlock(&ig.connlist_mutex); 337 iser_free_rx_descriptors(ib_conn); 338 iser_free_ib_conn_res(ib_conn, can_destroy_id); 339 ib_conn->device = NULL; 340 /* on EVENT_ADDR_ERROR there's no device yet for this conn */ 341 if (device != NULL) 342 iser_device_try_release(device); 343 iscsi_destroy_endpoint(ib_conn->ep); 344 } 345 346 void iser_conn_get(struct iser_conn *ib_conn) 347 { 348 atomic_inc(&ib_conn->refcount); 349 } 350 351 int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) 352 { 353 if (atomic_dec_and_test(&ib_conn->refcount)) { 354 iser_conn_release(ib_conn, can_destroy_id); 355 return 1; 356 } 357 return 0; 358 } 359 360 /** 361 * triggers start of the disconnect procedures and wait for them to be done 362 */ 363 void iser_conn_terminate(struct iser_conn *ib_conn) 364 { 365 int err = 0; 366 367 /* change the ib conn state only if the conn is UP, however always call 368 * rdma_disconnect since this is the only way to cause the CMA to change 369 * the QP state to ERROR 370 */ 371 372 iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING); 373 err = rdma_disconnect(ib_conn->cma_id); 374 if (err) 375 iser_err("Failed to disconnect, conn: 0x%p err %d\n", 376 ib_conn,err); 377 378 wait_event_interruptible(ib_conn->wait, 379 ib_conn->state == ISER_CONN_DOWN); 380 381 iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ 382 } 383 384 static int iser_connect_error(struct rdma_cm_id *cma_id) 385 { 386 struct iser_conn *ib_conn; 387 ib_conn = (struct iser_conn *)cma_id->context; 388 389 ib_conn->state = ISER_CONN_DOWN; 390 wake_up_interruptible(&ib_conn->wait); 391 return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ 392 } 393 394 static int iser_addr_handler(struct rdma_cm_id *cma_id) 395 { 396 struct iser_device *device; 397 struct iser_conn *ib_conn; 398 int ret; 399 400 device = iser_device_find_by_ib_device(cma_id); 401 if (!device) { 402 iser_err("device lookup/creation failed\n"); 403 return iser_connect_error(cma_id); 404 } 405 406 ib_conn = (struct iser_conn *)cma_id->context; 407 ib_conn->device = device; 408 409 ret = rdma_resolve_route(cma_id, 1000); 410 if (ret) { 411 iser_err("resolve route failed: %d\n", ret); 412 return iser_connect_error(cma_id); 413 } 414 415 return 0; 416 } 417 418 static int iser_route_handler(struct rdma_cm_id *cma_id) 419 { 420 struct rdma_conn_param conn_param; 421 int ret; 422 423 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); 424 if (ret) 425 goto failure; 426 427 memset(&conn_param, 0, sizeof conn_param); 428 conn_param.responder_resources = 4; 429 conn_param.initiator_depth = 1; 430 conn_param.retry_count = 7; 431 conn_param.rnr_retry_count = 6; 432 433 ret = rdma_connect(cma_id, &conn_param); 434 if (ret) { 435 iser_err("failure connecting: %d\n", ret); 436 goto failure; 437 } 438 439 return 0; 440 failure: 441 return iser_connect_error(cma_id); 442 } 443 444 static void iser_connected_handler(struct rdma_cm_id *cma_id) 445 { 446 struct iser_conn *ib_conn; 447 448 ib_conn = (struct iser_conn *)cma_id->context; 449 ib_conn->state = ISER_CONN_UP; 450 wake_up_interruptible(&ib_conn->wait); 451 } 452 453 static int iser_disconnected_handler(struct rdma_cm_id *cma_id) 454 { 455 struct iser_conn *ib_conn; 456 int ret; 457 458 ib_conn = (struct iser_conn *)cma_id->context; 459 460 /* getting here when the state is UP means that the conn is being * 461 * terminated asynchronously from the iSCSI layer's perspective. */ 462 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 463 ISER_CONN_TERMINATING)) 464 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 465 ISCSI_ERR_CONN_FAILED); 466 467 /* Complete the termination process if no posts are pending */ 468 if (ib_conn->post_recv_buf_count == 0 && 469 (atomic_read(&ib_conn->post_send_buf_count) == 0)) { 470 ib_conn->state = ISER_CONN_DOWN; 471 wake_up_interruptible(&ib_conn->wait); 472 } 473 474 ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ 475 return ret; 476 } 477 478 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 479 { 480 int ret = 0; 481 482 iser_err("event %d status %d conn %p id %p\n", 483 event->event, event->status, cma_id->context, cma_id); 484 485 switch (event->event) { 486 case RDMA_CM_EVENT_ADDR_RESOLVED: 487 ret = iser_addr_handler(cma_id); 488 break; 489 case RDMA_CM_EVENT_ROUTE_RESOLVED: 490 ret = iser_route_handler(cma_id); 491 break; 492 case RDMA_CM_EVENT_ESTABLISHED: 493 iser_connected_handler(cma_id); 494 break; 495 case RDMA_CM_EVENT_ADDR_ERROR: 496 case RDMA_CM_EVENT_ROUTE_ERROR: 497 case RDMA_CM_EVENT_CONNECT_ERROR: 498 case RDMA_CM_EVENT_UNREACHABLE: 499 case RDMA_CM_EVENT_REJECTED: 500 ret = iser_connect_error(cma_id); 501 break; 502 case RDMA_CM_EVENT_DISCONNECTED: 503 case RDMA_CM_EVENT_DEVICE_REMOVAL: 504 case RDMA_CM_EVENT_ADDR_CHANGE: 505 ret = iser_disconnected_handler(cma_id); 506 break; 507 default: 508 iser_err("Unexpected RDMA CM event (%d)\n", event->event); 509 break; 510 } 511 return ret; 512 } 513 514 void iser_conn_init(struct iser_conn *ib_conn) 515 { 516 ib_conn->state = ISER_CONN_INIT; 517 init_waitqueue_head(&ib_conn->wait); 518 ib_conn->post_recv_buf_count = 0; 519 atomic_set(&ib_conn->post_send_buf_count, 0); 520 atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ 521 INIT_LIST_HEAD(&ib_conn->conn_list); 522 spin_lock_init(&ib_conn->lock); 523 } 524 525 /** 526 * starts the process of connecting to the target 527 * sleeps until the connection is established or rejected 528 */ 529 int iser_connect(struct iser_conn *ib_conn, 530 struct sockaddr_in *src_addr, 531 struct sockaddr_in *dst_addr, 532 int non_blocking) 533 { 534 struct sockaddr *src, *dst; 535 int err = 0; 536 537 sprintf(ib_conn->name, "%pI4:%d", 538 &dst_addr->sin_addr.s_addr, dst_addr->sin_port); 539 540 /* the device is known only --after-- address resolution */ 541 ib_conn->device = NULL; 542 543 iser_err("connecting to: %pI4, port 0x%x\n", 544 &dst_addr->sin_addr, dst_addr->sin_port); 545 546 ib_conn->state = ISER_CONN_PENDING; 547 548 iser_conn_get(ib_conn); /* ref ib conn's cma id */ 549 ib_conn->cma_id = rdma_create_id(iser_cma_handler, 550 (void *)ib_conn, 551 RDMA_PS_TCP); 552 if (IS_ERR(ib_conn->cma_id)) { 553 err = PTR_ERR(ib_conn->cma_id); 554 iser_err("rdma_create_id failed: %d\n", err); 555 goto id_failure; 556 } 557 558 src = (struct sockaddr *)src_addr; 559 dst = (struct sockaddr *)dst_addr; 560 err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000); 561 if (err) { 562 iser_err("rdma_resolve_addr failed: %d\n", err); 563 goto addr_failure; 564 } 565 566 if (!non_blocking) { 567 wait_event_interruptible(ib_conn->wait, 568 (ib_conn->state != ISER_CONN_PENDING)); 569 570 if (ib_conn->state != ISER_CONN_UP) { 571 err = -EIO; 572 goto connect_failure; 573 } 574 } 575 576 mutex_lock(&ig.connlist_mutex); 577 list_add(&ib_conn->conn_list, &ig.connlist); 578 mutex_unlock(&ig.connlist_mutex); 579 return 0; 580 581 id_failure: 582 ib_conn->cma_id = NULL; 583 addr_failure: 584 ib_conn->state = ISER_CONN_DOWN; 585 connect_failure: 586 iser_conn_release(ib_conn, 1); 587 return err; 588 } 589 590 /** 591 * iser_reg_page_vec - Register physical memory 592 * 593 * returns: 0 on success, errno code on failure 594 */ 595 int iser_reg_page_vec(struct iser_conn *ib_conn, 596 struct iser_page_vec *page_vec, 597 struct iser_mem_reg *mem_reg) 598 { 599 struct ib_pool_fmr *mem; 600 u64 io_addr; 601 u64 *page_list; 602 int status; 603 604 page_list = page_vec->pages; 605 io_addr = page_list[0]; 606 607 mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool, 608 page_list, 609 page_vec->length, 610 io_addr); 611 612 if (IS_ERR(mem)) { 613 status = (int)PTR_ERR(mem); 614 iser_err("ib_fmr_pool_map_phys failed: %d\n", status); 615 return status; 616 } 617 618 mem_reg->lkey = mem->fmr->lkey; 619 mem_reg->rkey = mem->fmr->rkey; 620 mem_reg->len = page_vec->length * SIZE_4K; 621 mem_reg->va = io_addr; 622 mem_reg->is_fmr = 1; 623 mem_reg->mem_h = (void *)mem; 624 625 mem_reg->va += page_vec->offset; 626 mem_reg->len = page_vec->data_size; 627 628 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, " 629 "entry[0]: (0x%08lx,%ld)] -> " 630 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n", 631 page_vec, page_vec->length, 632 (unsigned long)page_vec->pages[0], 633 (unsigned long)page_vec->data_size, 634 (unsigned int)mem_reg->lkey, mem_reg->mem_h, 635 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len); 636 return 0; 637 } 638 639 /** 640 * Unregister (previosuly registered) memory. 641 */ 642 void iser_unreg_mem(struct iser_mem_reg *reg) 643 { 644 int ret; 645 646 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h); 647 648 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h); 649 if (ret) 650 iser_err("ib_fmr_pool_unmap failed %d\n", ret); 651 652 reg->mem_h = NULL; 653 } 654 655 int iser_post_recvl(struct iser_conn *ib_conn) 656 { 657 struct ib_recv_wr rx_wr, *rx_wr_failed; 658 struct ib_sge sge; 659 int ib_ret; 660 661 sge.addr = ib_conn->login_dma; 662 sge.length = ISER_RX_LOGIN_SIZE; 663 sge.lkey = ib_conn->device->mr->lkey; 664 665 rx_wr.wr_id = (unsigned long)ib_conn->login_buf; 666 rx_wr.sg_list = &sge; 667 rx_wr.num_sge = 1; 668 rx_wr.next = NULL; 669 670 ib_conn->post_recv_buf_count++; 671 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); 672 if (ib_ret) { 673 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 674 ib_conn->post_recv_buf_count--; 675 } 676 return ib_ret; 677 } 678 679 int iser_post_recvm(struct iser_conn *ib_conn, int count) 680 { 681 struct ib_recv_wr *rx_wr, *rx_wr_failed; 682 int i, ib_ret; 683 unsigned int my_rx_head = ib_conn->rx_desc_head; 684 struct iser_rx_desc *rx_desc; 685 686 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { 687 rx_desc = &ib_conn->rx_descs[my_rx_head]; 688 rx_wr->wr_id = (unsigned long)rx_desc; 689 rx_wr->sg_list = &rx_desc->rx_sg; 690 rx_wr->num_sge = 1; 691 rx_wr->next = rx_wr + 1; 692 my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); 693 } 694 695 rx_wr--; 696 rx_wr->next = NULL; /* mark end of work requests list */ 697 698 ib_conn->post_recv_buf_count += count; 699 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); 700 if (ib_ret) { 701 iser_err("ib_post_recv failed ret=%d\n", ib_ret); 702 ib_conn->post_recv_buf_count -= count; 703 } else 704 ib_conn->rx_desc_head = my_rx_head; 705 return ib_ret; 706 } 707 708 709 /** 710 * iser_start_send - Initiate a Send DTO operation 711 * 712 * returns 0 on success, -1 on failure 713 */ 714 int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) 715 { 716 int ib_ret; 717 struct ib_send_wr send_wr, *send_wr_failed; 718 719 ib_dma_sync_single_for_device(ib_conn->device->ib_device, 720 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); 721 722 send_wr.next = NULL; 723 send_wr.wr_id = (unsigned long)tx_desc; 724 send_wr.sg_list = tx_desc->tx_sg; 725 send_wr.num_sge = tx_desc->num_sge; 726 send_wr.opcode = IB_WR_SEND; 727 send_wr.send_flags = IB_SEND_SIGNALED; 728 729 atomic_inc(&ib_conn->post_send_buf_count); 730 731 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); 732 if (ib_ret) { 733 iser_err("ib_post_send failed, ret:%d\n", ib_ret); 734 atomic_dec(&ib_conn->post_send_buf_count); 735 } 736 return ib_ret; 737 } 738 739 static void iser_handle_comp_error(struct iser_tx_desc *desc, 740 struct iser_conn *ib_conn) 741 { 742 if (desc && desc->type == ISCSI_TX_DATAOUT) 743 kmem_cache_free(ig.desc_cache, desc); 744 745 if (ib_conn->post_recv_buf_count == 0 && 746 atomic_read(&ib_conn->post_send_buf_count) == 0) { 747 /* getting here when the state is UP means that the conn is * 748 * being terminated asynchronously from the iSCSI layer's * 749 * perspective. */ 750 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 751 ISER_CONN_TERMINATING)) 752 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 753 ISCSI_ERR_CONN_FAILED); 754 755 /* no more non completed posts to the QP, complete the 756 * termination process w.o worrying on disconnect event */ 757 ib_conn->state = ISER_CONN_DOWN; 758 wake_up_interruptible(&ib_conn->wait); 759 } 760 } 761 762 static int iser_drain_tx_cq(struct iser_device *device) 763 { 764 struct ib_cq *cq = device->tx_cq; 765 struct ib_wc wc; 766 struct iser_tx_desc *tx_desc; 767 struct iser_conn *ib_conn; 768 int completed_tx = 0; 769 770 while (ib_poll_cq(cq, 1, &wc) == 1) { 771 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; 772 ib_conn = wc.qp->qp_context; 773 if (wc.status == IB_WC_SUCCESS) { 774 if (wc.opcode == IB_WC_SEND) 775 iser_snd_completion(tx_desc, ib_conn); 776 else 777 iser_err("expected opcode %d got %d\n", 778 IB_WC_SEND, wc.opcode); 779 } else { 780 iser_err("tx id %llx status %d vend_err %x\n", 781 wc.wr_id, wc.status, wc.vendor_err); 782 atomic_dec(&ib_conn->post_send_buf_count); 783 iser_handle_comp_error(tx_desc, ib_conn); 784 } 785 completed_tx++; 786 } 787 return completed_tx; 788 } 789 790 791 static void iser_cq_tasklet_fn(unsigned long data) 792 { 793 struct iser_device *device = (struct iser_device *)data; 794 struct ib_cq *cq = device->rx_cq; 795 struct ib_wc wc; 796 struct iser_rx_desc *desc; 797 unsigned long xfer_len; 798 struct iser_conn *ib_conn; 799 int completed_tx, completed_rx; 800 completed_tx = completed_rx = 0; 801 802 while (ib_poll_cq(cq, 1, &wc) == 1) { 803 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; 804 BUG_ON(desc == NULL); 805 ib_conn = wc.qp->qp_context; 806 if (wc.status == IB_WC_SUCCESS) { 807 if (wc.opcode == IB_WC_RECV) { 808 xfer_len = (unsigned long)wc.byte_len; 809 iser_rcv_completion(desc, xfer_len, ib_conn); 810 } else 811 iser_err("expected opcode %d got %d\n", 812 IB_WC_RECV, wc.opcode); 813 } else { 814 if (wc.status != IB_WC_WR_FLUSH_ERR) 815 iser_err("rx id %llx status %d vend_err %x\n", 816 wc.wr_id, wc.status, wc.vendor_err); 817 ib_conn->post_recv_buf_count--; 818 iser_handle_comp_error(NULL, ib_conn); 819 } 820 completed_rx++; 821 if (!(completed_rx & 63)) 822 completed_tx += iser_drain_tx_cq(device); 823 } 824 /* #warning "it is assumed here that arming CQ only once its empty" * 825 * " would not cause interrupts to be missed" */ 826 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 827 828 completed_tx += iser_drain_tx_cq(device); 829 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); 830 } 831 832 static void iser_cq_callback(struct ib_cq *cq, void *cq_context) 833 { 834 struct iser_device *device = (struct iser_device *)cq_context; 835 836 tasklet_schedule(&device->cq_tasklet); 837 } 838