1 /* 2 * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. 3 * 4 * Copyright (c) 2006 - 2010 Broadcom Corporation 5 * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. 6 * Copyright (c) 2007, 2008 Mike Christie 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation. 11 * 12 * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) 13 * Maintained by: Eddie Wai (eddie.wai@broadcom.com) 14 */ 15 16 #include <linux/slab.h> 17 #include <scsi/scsi_tcq.h> 18 #include <scsi/libiscsi.h> 19 #include "bnx2i.h" 20 21 struct scsi_transport_template *bnx2i_scsi_xport_template; 22 struct iscsi_transport bnx2i_iscsi_transport; 23 static struct scsi_host_template bnx2i_host_template; 24 25 /* 26 * Global endpoint resource info 27 */ 28 static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ 29 30 31 static int bnx2i_adapter_ready(struct bnx2i_hba *hba) 32 { 33 int retval = 0; 34 35 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || 36 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || 37 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) 38 retval = -EPERM; 39 return retval; 40 } 41 42 /** 43 * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks 44 * @cmd: iscsi cmd struct pointer 45 * @buf_off: absolute buffer offset 46 * @start_bd_off: u32 pointer to return the offset within the BD 47 * indicated by 'start_bd_idx' on which 'buf_off' falls 48 * @start_bd_idx: index of the BD on which 'buf_off' falls 49 * 50 * identifies & marks various bd info for scsi command's imm data, 51 * unsolicited data and the first solicited data seq. 52 */ 53 static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, 54 u32 *start_bd_off, u32 *start_bd_idx) 55 { 56 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; 57 u32 cur_offset = 0; 58 u32 cur_bd_idx = 0; 59 60 if (buf_off) { 61 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { 62 cur_offset += bd_tbl->buffer_length; 63 cur_bd_idx++; 64 bd_tbl++; 65 } 66 } 67 68 *start_bd_off = buf_off - cur_offset; 69 *start_bd_idx = cur_bd_idx; 70 } 71 72 /** 73 * bnx2i_setup_write_cmd_bd_info - sets up BD various information 74 * @task: transport layer's cmd struct pointer 75 * 76 * identifies & marks various bd info for scsi command's immediate data, 77 * unsolicited data and first solicited data seq which includes BD start 78 * index & BD buf off. his function takes into account iscsi parameter such 79 * as immediate data and unsolicited data is support on this connection. 80 */ 81 static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) 82 { 83 struct bnx2i_cmd *cmd = task->dd_data; 84 u32 start_bd_offset; 85 u32 start_bd_idx; 86 u32 buffer_offset = 0; 87 u32 cmd_len = cmd->req.total_data_transfer_length; 88 89 /* if ImmediateData is turned off & IntialR2T is turned on, 90 * there will be no immediate or unsolicited data, just return. 91 */ 92 if (!iscsi_task_has_unsol_data(task) && !task->imm_count) 93 return; 94 95 /* Immediate data */ 96 buffer_offset += task->imm_count; 97 if (task->imm_count == cmd_len) 98 return; 99 100 if (iscsi_task_has_unsol_data(task)) { 101 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 102 &start_bd_offset, &start_bd_idx); 103 cmd->req.ud_buffer_offset = start_bd_offset; 104 cmd->req.ud_start_bd_index = start_bd_idx; 105 buffer_offset += task->unsol_r2t.data_length; 106 } 107 108 if (buffer_offset != cmd_len) { 109 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, 110 &start_bd_offset, &start_bd_idx); 111 if ((start_bd_offset > task->conn->session->first_burst) || 112 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { 113 int i = 0; 114 115 iscsi_conn_printk(KERN_ALERT, task->conn, 116 "bnx2i- error, buf offset 0x%x " 117 "bd_valid %d use_sg %d\n", 118 buffer_offset, cmd->io_tbl.bd_valid, 119 scsi_sg_count(cmd->scsi_cmd)); 120 for (i = 0; i < cmd->io_tbl.bd_valid; i++) 121 iscsi_conn_printk(KERN_ALERT, task->conn, 122 "bnx2i err, bd[%d]: len %x\n", 123 i, cmd->io_tbl.bd_tbl[i].\ 124 buffer_length); 125 } 126 cmd->req.sd_buffer_offset = start_bd_offset; 127 cmd->req.sd_start_bd_index = start_bd_idx; 128 } 129 } 130 131 132 133 /** 134 * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table 135 * @hba: adapter instance 136 * @cmd: iscsi cmd struct pointer 137 * 138 * map SG list 139 */ 140 static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) 141 { 142 struct scsi_cmnd *sc = cmd->scsi_cmd; 143 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 144 struct scatterlist *sg; 145 int byte_count = 0; 146 int bd_count = 0; 147 int sg_count; 148 int sg_len; 149 u64 addr; 150 int i; 151 152 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); 153 154 sg_count = scsi_dma_map(sc); 155 156 scsi_for_each_sg(sc, sg, sg_count, i) { 157 sg_len = sg_dma_len(sg); 158 addr = (u64) sg_dma_address(sg); 159 bd[bd_count].buffer_addr_lo = addr & 0xffffffff; 160 bd[bd_count].buffer_addr_hi = addr >> 32; 161 bd[bd_count].buffer_length = sg_len; 162 bd[bd_count].flags = 0; 163 if (bd_count == 0) 164 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; 165 166 byte_count += sg_len; 167 bd_count++; 168 } 169 170 if (bd_count) 171 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; 172 173 BUG_ON(byte_count != scsi_bufflen(sc)); 174 return bd_count; 175 } 176 177 /** 178 * bnx2i_iscsi_map_sg_list - maps SG list 179 * @cmd: iscsi cmd struct pointer 180 * 181 * creates BD list table for the command 182 */ 183 static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) 184 { 185 int bd_count; 186 187 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); 188 if (!bd_count) { 189 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; 190 191 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; 192 bd[0].buffer_length = bd[0].flags = 0; 193 } 194 cmd->io_tbl.bd_valid = bd_count; 195 } 196 197 198 /** 199 * bnx2i_iscsi_unmap_sg_list - unmaps SG list 200 * @cmd: iscsi cmd struct pointer 201 * 202 * unmap IO buffers and invalidate the BD table 203 */ 204 void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) 205 { 206 struct scsi_cmnd *sc = cmd->scsi_cmd; 207 208 if (cmd->io_tbl.bd_valid && sc) { 209 scsi_dma_unmap(sc); 210 cmd->io_tbl.bd_valid = 0; 211 } 212 } 213 214 static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) 215 { 216 memset(&cmd->req, 0x00, sizeof(cmd->req)); 217 cmd->req.op_code = 0xFF; 218 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; 219 cmd->req.bd_list_addr_hi = 220 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); 221 222 } 223 224 225 /** 226 * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' 227 * @hba: pointer to adapter instance 228 * @conn: pointer to iscsi connection 229 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 230 * 231 * update iscsi cid table entry with connection pointer. This enables 232 * driver to quickly get hold of connection structure pointer in 233 * completion/interrupt thread using iscsi context ID 234 */ 235 static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, 236 struct bnx2i_conn *bnx2i_conn, 237 u32 iscsi_cid) 238 { 239 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { 240 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 241 "conn bind - entry #%d not free\n", iscsi_cid); 242 return -EBUSY; 243 } 244 245 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; 246 return 0; 247 } 248 249 250 /** 251 * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr 252 * @hba: pointer to adapter instance 253 * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) 254 */ 255 struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, 256 u16 iscsi_cid) 257 { 258 if (!hba->cid_que.conn_cid_tbl) { 259 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); 260 return NULL; 261 262 } else if (iscsi_cid >= hba->max_active_conns) { 263 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); 264 return NULL; 265 } 266 return hba->cid_que.conn_cid_tbl[iscsi_cid]; 267 } 268 269 270 /** 271 * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool 272 * @hba: pointer to adapter instance 273 */ 274 static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) 275 { 276 int idx; 277 278 if (!hba->cid_que.cid_free_cnt) 279 return -1; 280 281 idx = hba->cid_que.cid_q_cons_idx; 282 hba->cid_que.cid_q_cons_idx++; 283 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) 284 hba->cid_que.cid_q_cons_idx = 0; 285 286 hba->cid_que.cid_free_cnt--; 287 return hba->cid_que.cid_que[idx]; 288 } 289 290 291 /** 292 * bnx2i_free_iscsi_cid - returns tcp port to free list 293 * @hba: pointer to adapter instance 294 * @iscsi_cid: iscsi context ID to free 295 */ 296 static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) 297 { 298 int idx; 299 300 if (iscsi_cid == (u16) -1) 301 return; 302 303 hba->cid_que.cid_free_cnt++; 304 305 idx = hba->cid_que.cid_q_prod_idx; 306 hba->cid_que.cid_que[idx] = iscsi_cid; 307 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; 308 hba->cid_que.cid_q_prod_idx++; 309 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) 310 hba->cid_que.cid_q_prod_idx = 0; 311 } 312 313 314 /** 315 * bnx2i_setup_free_cid_que - sets up free iscsi cid queue 316 * @hba: pointer to adapter instance 317 * 318 * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, 319 * and initialize table attributes 320 */ 321 static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) 322 { 323 int mem_size; 324 int i; 325 326 mem_size = hba->max_active_conns * sizeof(u32); 327 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 328 329 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); 330 if (!hba->cid_que.cid_que_base) 331 return -ENOMEM; 332 333 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); 334 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 335 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); 336 if (!hba->cid_que.conn_cid_tbl) { 337 kfree(hba->cid_que.cid_que_base); 338 hba->cid_que.cid_que_base = NULL; 339 return -ENOMEM; 340 } 341 342 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; 343 hba->cid_que.cid_q_prod_idx = 0; 344 hba->cid_que.cid_q_cons_idx = 0; 345 hba->cid_que.cid_q_max_idx = hba->max_active_conns; 346 hba->cid_que.cid_free_cnt = hba->max_active_conns; 347 348 for (i = 0; i < hba->max_active_conns; i++) { 349 hba->cid_que.cid_que[i] = i; 350 hba->cid_que.conn_cid_tbl[i] = NULL; 351 } 352 return 0; 353 } 354 355 356 /** 357 * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources 358 * @hba: pointer to adapter instance 359 */ 360 static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) 361 { 362 kfree(hba->cid_que.cid_que_base); 363 hba->cid_que.cid_que_base = NULL; 364 365 kfree(hba->cid_que.conn_cid_tbl); 366 hba->cid_que.conn_cid_tbl = NULL; 367 } 368 369 370 /** 371 * bnx2i_alloc_ep - allocates ep structure from global pool 372 * @hba: pointer to adapter instance 373 * 374 * routine allocates a free endpoint structure from global pool and 375 * a tcp port to be used for this connection. Global resource lock, 376 * 'bnx2i_resc_lock' is held while accessing shared global data structures 377 */ 378 static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) 379 { 380 struct iscsi_endpoint *ep; 381 struct bnx2i_endpoint *bnx2i_ep; 382 383 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); 384 if (!ep) { 385 printk(KERN_ERR "bnx2i: Could not allocate ep\n"); 386 return NULL; 387 } 388 389 bnx2i_ep = ep->dd_data; 390 bnx2i_ep->cls_ep = ep; 391 INIT_LIST_HEAD(&bnx2i_ep->link); 392 bnx2i_ep->state = EP_STATE_IDLE; 393 bnx2i_ep->ep_iscsi_cid = (u16) -1; 394 bnx2i_ep->hba = hba; 395 bnx2i_ep->hba_age = hba->age; 396 hba->ofld_conns_active++; 397 init_waitqueue_head(&bnx2i_ep->ofld_wait); 398 return ep; 399 } 400 401 402 /** 403 * bnx2i_free_ep - free endpoint 404 * @ep: pointer to iscsi endpoint structure 405 */ 406 static void bnx2i_free_ep(struct iscsi_endpoint *ep) 407 { 408 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; 409 unsigned long flags; 410 411 spin_lock_irqsave(&bnx2i_resc_lock, flags); 412 bnx2i_ep->state = EP_STATE_IDLE; 413 bnx2i_ep->hba->ofld_conns_active--; 414 415 if (bnx2i_ep->ep_iscsi_cid != (u16) -1) 416 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); 417 418 if (bnx2i_ep->conn) { 419 bnx2i_ep->conn->ep = NULL; 420 bnx2i_ep->conn = NULL; 421 } 422 423 bnx2i_ep->hba = NULL; 424 spin_unlock_irqrestore(&bnx2i_resc_lock, flags); 425 iscsi_destroy_endpoint(ep); 426 } 427 428 429 /** 430 * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command 431 * @hba: adapter instance pointer 432 * @session: iscsi session pointer 433 * @cmd: iscsi command structure 434 */ 435 static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, 436 struct bnx2i_cmd *cmd) 437 { 438 struct io_bdt *io = &cmd->io_tbl; 439 struct iscsi_bd *bd; 440 441 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 442 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), 443 &io->bd_tbl_dma, GFP_KERNEL); 444 if (!io->bd_tbl) { 445 iscsi_session_printk(KERN_ERR, session, "Could not " 446 "allocate bdt.\n"); 447 return -ENOMEM; 448 } 449 io->bd_valid = 0; 450 return 0; 451 } 452 453 /** 454 * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table 455 * @hba: adapter instance pointer 456 * @session: iscsi session pointer 457 * @cmd: iscsi command structure 458 */ 459 static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, 460 struct iscsi_session *session) 461 { 462 int i; 463 464 for (i = 0; i < session->cmds_max; i++) { 465 struct iscsi_task *task = session->cmds[i]; 466 struct bnx2i_cmd *cmd = task->dd_data; 467 468 if (cmd->io_tbl.bd_tbl) 469 dma_free_coherent(&hba->pcidev->dev, 470 ISCSI_MAX_BDS_PER_CMD * 471 sizeof(struct iscsi_bd), 472 cmd->io_tbl.bd_tbl, 473 cmd->io_tbl.bd_tbl_dma); 474 } 475 476 } 477 478 479 /** 480 * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session 481 * @hba: adapter instance pointer 482 * @session: iscsi session pointer 483 */ 484 static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, 485 struct iscsi_session *session) 486 { 487 int i; 488 489 for (i = 0; i < session->cmds_max; i++) { 490 struct iscsi_task *task = session->cmds[i]; 491 struct bnx2i_cmd *cmd = task->dd_data; 492 493 task->hdr = &cmd->hdr; 494 task->hdr_max = sizeof(struct iscsi_hdr); 495 496 if (bnx2i_alloc_bdt(hba, session, cmd)) 497 goto free_bdts; 498 } 499 500 return 0; 501 502 free_bdts: 503 bnx2i_destroy_cmd_pool(hba, session); 504 return -ENOMEM; 505 } 506 507 508 /** 509 * bnx2i_setup_mp_bdt - allocate BD table resources 510 * @hba: pointer to adapter structure 511 * 512 * Allocate memory for dummy buffer and associated BD 513 * table to be used by middle path (MP) requests 514 */ 515 static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) 516 { 517 int rc = 0; 518 struct iscsi_bd *mp_bdt; 519 u64 addr; 520 521 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 522 &hba->mp_bd_dma, GFP_KERNEL); 523 if (!hba->mp_bd_tbl) { 524 printk(KERN_ERR "unable to allocate Middle Path BDT\n"); 525 rc = -1; 526 goto out; 527 } 528 529 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 530 &hba->dummy_buf_dma, GFP_KERNEL); 531 if (!hba->dummy_buffer) { 532 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); 533 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 534 hba->mp_bd_tbl, hba->mp_bd_dma); 535 hba->mp_bd_tbl = NULL; 536 rc = -1; 537 goto out; 538 } 539 540 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; 541 addr = (unsigned long) hba->dummy_buf_dma; 542 mp_bdt->buffer_addr_lo = addr & 0xffffffff; 543 mp_bdt->buffer_addr_hi = addr >> 32; 544 mp_bdt->buffer_length = PAGE_SIZE; 545 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 546 ISCSI_BD_FIRST_IN_BD_CHAIN; 547 out: 548 return rc; 549 } 550 551 552 /** 553 * bnx2i_free_mp_bdt - releases ITT back to free pool 554 * @hba: pointer to adapter instance 555 * 556 * free MP dummy buffer and associated BD table 557 */ 558 static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) 559 { 560 if (hba->mp_bd_tbl) { 561 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 562 hba->mp_bd_tbl, hba->mp_bd_dma); 563 hba->mp_bd_tbl = NULL; 564 } 565 if (hba->dummy_buffer) { 566 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 567 hba->dummy_buffer, hba->dummy_buf_dma); 568 hba->dummy_buffer = NULL; 569 } 570 return; 571 } 572 573 /** 574 * bnx2i_drop_session - notifies iscsid of connection error. 575 * @hba: adapter instance pointer 576 * @session: iscsi session pointer 577 * 578 * This notifies iscsid that there is a error, so it can initiate 579 * recovery. 580 * 581 * This relies on caller using the iscsi class iterator so the object 582 * is refcounted and does not disapper from under us. 583 */ 584 void bnx2i_drop_session(struct iscsi_cls_session *cls_session) 585 { 586 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 587 } 588 589 /** 590 * bnx2i_ep_destroy_list_add - add an entry to EP destroy list 591 * @hba: pointer to adapter instance 592 * @ep: pointer to endpoint (transport indentifier) structure 593 * 594 * EP destroy queue manager 595 */ 596 static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, 597 struct bnx2i_endpoint *ep) 598 { 599 write_lock_bh(&hba->ep_rdwr_lock); 600 list_add_tail(&ep->link, &hba->ep_destroy_list); 601 write_unlock_bh(&hba->ep_rdwr_lock); 602 return 0; 603 } 604 605 /** 606 * bnx2i_ep_destroy_list_del - add an entry to EP destroy list 607 * 608 * @hba: pointer to adapter instance 609 * @ep: pointer to endpoint (transport indentifier) structure 610 * 611 * EP destroy queue manager 612 */ 613 static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, 614 struct bnx2i_endpoint *ep) 615 { 616 write_lock_bh(&hba->ep_rdwr_lock); 617 list_del_init(&ep->link); 618 write_unlock_bh(&hba->ep_rdwr_lock); 619 620 return 0; 621 } 622 623 /** 624 * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list 625 * @hba: pointer to adapter instance 626 * @ep: pointer to endpoint (transport indentifier) structure 627 * 628 * pending conn offload completion queue manager 629 */ 630 static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, 631 struct bnx2i_endpoint *ep) 632 { 633 write_lock_bh(&hba->ep_rdwr_lock); 634 list_add_tail(&ep->link, &hba->ep_ofld_list); 635 write_unlock_bh(&hba->ep_rdwr_lock); 636 return 0; 637 } 638 639 /** 640 * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list 641 * @hba: pointer to adapter instance 642 * @ep: pointer to endpoint (transport indentifier) structure 643 * 644 * pending conn offload completion queue manager 645 */ 646 static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, 647 struct bnx2i_endpoint *ep) 648 { 649 write_lock_bh(&hba->ep_rdwr_lock); 650 list_del_init(&ep->link); 651 write_unlock_bh(&hba->ep_rdwr_lock); 652 return 0; 653 } 654 655 656 /** 657 * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints 658 * 659 * @hba: pointer to adapter instance 660 * @iscsi_cid: iscsi context ID to find 661 * 662 */ 663 struct bnx2i_endpoint * 664 bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) 665 { 666 struct list_head *list; 667 struct list_head *tmp; 668 struct bnx2i_endpoint *ep; 669 670 read_lock_bh(&hba->ep_rdwr_lock); 671 list_for_each_safe(list, tmp, &hba->ep_ofld_list) { 672 ep = (struct bnx2i_endpoint *)list; 673 674 if (ep->ep_iscsi_cid == iscsi_cid) 675 break; 676 ep = NULL; 677 } 678 read_unlock_bh(&hba->ep_rdwr_lock); 679 680 if (!ep) 681 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 682 return ep; 683 } 684 685 /** 686 * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list 687 * @hba: pointer to adapter instance 688 * @iscsi_cid: iscsi context ID to find 689 * 690 */ 691 struct bnx2i_endpoint * 692 bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) 693 { 694 struct list_head *list; 695 struct list_head *tmp; 696 struct bnx2i_endpoint *ep; 697 698 read_lock_bh(&hba->ep_rdwr_lock); 699 list_for_each_safe(list, tmp, &hba->ep_destroy_list) { 700 ep = (struct bnx2i_endpoint *)list; 701 702 if (ep->ep_iscsi_cid == iscsi_cid) 703 break; 704 ep = NULL; 705 } 706 read_unlock_bh(&hba->ep_rdwr_lock); 707 708 if (!ep) 709 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); 710 711 return ep; 712 } 713 714 /** 715 * bnx2i_ep_active_list_add - add an entry to ep active list 716 * @hba: pointer to adapter instance 717 * @ep: pointer to endpoint (transport indentifier) structure 718 * 719 * current active conn queue manager 720 */ 721 static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba, 722 struct bnx2i_endpoint *ep) 723 { 724 write_lock_bh(&hba->ep_rdwr_lock); 725 list_add_tail(&ep->link, &hba->ep_active_list); 726 write_unlock_bh(&hba->ep_rdwr_lock); 727 } 728 729 730 /** 731 * bnx2i_ep_active_list_del - deletes an entry to ep active list 732 * @hba: pointer to adapter instance 733 * @ep: pointer to endpoint (transport indentifier) structure 734 * 735 * current active conn queue manager 736 */ 737 static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba, 738 struct bnx2i_endpoint *ep) 739 { 740 write_lock_bh(&hba->ep_rdwr_lock); 741 list_del_init(&ep->link); 742 write_unlock_bh(&hba->ep_rdwr_lock); 743 } 744 745 746 /** 747 * bnx2i_setup_host_queue_size - assigns shost->can_queue param 748 * @hba: pointer to adapter instance 749 * @shost: scsi host pointer 750 * 751 * Initializes 'can_queue' parameter based on how many outstanding commands 752 * the device can handle. Each device 5708/5709/57710 has different 753 * capabilities 754 */ 755 static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, 756 struct Scsi_Host *shost) 757 { 758 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) 759 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 760 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) 761 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; 762 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) 763 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; 764 else 765 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; 766 } 767 768 769 /** 770 * bnx2i_alloc_hba - allocate and init adapter instance 771 * @cnic: cnic device pointer 772 * 773 * allocate & initialize adapter structure and call other 774 * support routines to do per adapter initialization 775 */ 776 struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) 777 { 778 struct Scsi_Host *shost; 779 struct bnx2i_hba *hba; 780 781 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); 782 if (!shost) 783 return NULL; 784 shost->dma_boundary = cnic->pcidev->dma_mask; 785 shost->transportt = bnx2i_scsi_xport_template; 786 shost->max_id = ISCSI_MAX_CONNS_PER_HBA; 787 shost->max_channel = 0; 788 shost->max_lun = 512; 789 shost->max_cmd_len = 16; 790 791 hba = iscsi_host_priv(shost); 792 hba->shost = shost; 793 hba->netdev = cnic->netdev; 794 /* Get PCI related information and update hba struct members */ 795 hba->pcidev = cnic->pcidev; 796 pci_dev_get(hba->pcidev); 797 hba->pci_did = hba->pcidev->device; 798 hba->pci_vid = hba->pcidev->vendor; 799 hba->pci_sdid = hba->pcidev->subsystem_device; 800 hba->pci_svid = hba->pcidev->subsystem_vendor; 801 hba->pci_func = PCI_FUNC(hba->pcidev->devfn); 802 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); 803 804 bnx2i_identify_device(hba); 805 bnx2i_setup_host_queue_size(hba, shost); 806 807 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { 808 hba->regview = ioremap_nocache(hba->netdev->base_addr, 809 BNX2_MQ_CONFIG2); 810 if (!hba->regview) 811 goto ioreg_map_err; 812 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 813 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); 814 if (!hba->regview) 815 goto ioreg_map_err; 816 } 817 818 if (bnx2i_setup_mp_bdt(hba)) 819 goto mp_bdt_mem_err; 820 821 INIT_LIST_HEAD(&hba->ep_ofld_list); 822 INIT_LIST_HEAD(&hba->ep_active_list); 823 INIT_LIST_HEAD(&hba->ep_destroy_list); 824 rwlock_init(&hba->ep_rdwr_lock); 825 826 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; 827 828 /* different values for 5708/5709/57710 */ 829 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; 830 831 if (bnx2i_setup_free_cid_que(hba)) 832 goto cid_que_err; 833 834 /* SQ/RQ/CQ size can be changed via sysfx interface */ 835 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 836 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) 837 hba->max_sqes = sq_size; 838 else 839 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; 840 } else { /* 5706/5708/5709 */ 841 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) 842 hba->max_sqes = sq_size; 843 else 844 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; 845 } 846 847 hba->max_rqes = rq_size; 848 hba->max_cqes = hba->max_sqes + rq_size; 849 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 850 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) 851 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; 852 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) 853 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; 854 855 hba->num_ccell = hba->max_sqes / 2; 856 857 spin_lock_init(&hba->lock); 858 mutex_init(&hba->net_dev_lock); 859 init_waitqueue_head(&hba->eh_wait); 860 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { 861 hba->hba_shutdown_tmo = 20 * HZ; 862 hba->conn_teardown_tmo = 20 * HZ; 863 hba->conn_ctx_destroy_tmo = 6 * HZ; 864 } else { /* 5706/5708/5709 */ 865 hba->hba_shutdown_tmo = 20 * HZ; 866 hba->conn_teardown_tmo = 10 * HZ; 867 hba->conn_ctx_destroy_tmo = 2 * HZ; 868 } 869 870 if (iscsi_host_add(shost, &hba->pcidev->dev)) 871 goto free_dump_mem; 872 return hba; 873 874 free_dump_mem: 875 bnx2i_release_free_cid_que(hba); 876 cid_que_err: 877 bnx2i_free_mp_bdt(hba); 878 mp_bdt_mem_err: 879 if (hba->regview) { 880 iounmap(hba->regview); 881 hba->regview = NULL; 882 } 883 ioreg_map_err: 884 pci_dev_put(hba->pcidev); 885 scsi_host_put(shost); 886 return NULL; 887 } 888 889 /** 890 * bnx2i_free_hba- releases hba structure and resources held by the adapter 891 * @hba: pointer to adapter instance 892 * 893 * free adapter structure and call various cleanup routines. 894 */ 895 void bnx2i_free_hba(struct bnx2i_hba *hba) 896 { 897 struct Scsi_Host *shost = hba->shost; 898 899 iscsi_host_remove(shost); 900 INIT_LIST_HEAD(&hba->ep_ofld_list); 901 INIT_LIST_HEAD(&hba->ep_active_list); 902 INIT_LIST_HEAD(&hba->ep_destroy_list); 903 pci_dev_put(hba->pcidev); 904 905 if (hba->regview) { 906 iounmap(hba->regview); 907 hba->regview = NULL; 908 } 909 bnx2i_free_mp_bdt(hba); 910 bnx2i_release_free_cid_que(hba); 911 iscsi_host_free(shost); 912 } 913 914 /** 915 * bnx2i_conn_free_login_resources - free DMA resources used for login process 916 * @hba: pointer to adapter instance 917 * @bnx2i_conn: iscsi connection pointer 918 * 919 * Login related resources, mostly BDT & payload DMA memory is freed 920 */ 921 static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, 922 struct bnx2i_conn *bnx2i_conn) 923 { 924 if (bnx2i_conn->gen_pdu.resp_bd_tbl) { 925 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 926 bnx2i_conn->gen_pdu.resp_bd_tbl, 927 bnx2i_conn->gen_pdu.resp_bd_dma); 928 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; 929 } 930 931 if (bnx2i_conn->gen_pdu.req_bd_tbl) { 932 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 933 bnx2i_conn->gen_pdu.req_bd_tbl, 934 bnx2i_conn->gen_pdu.req_bd_dma); 935 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 936 } 937 938 if (bnx2i_conn->gen_pdu.resp_buf) { 939 dma_free_coherent(&hba->pcidev->dev, 940 ISCSI_DEF_MAX_RECV_SEG_LEN, 941 bnx2i_conn->gen_pdu.resp_buf, 942 bnx2i_conn->gen_pdu.resp_dma_addr); 943 bnx2i_conn->gen_pdu.resp_buf = NULL; 944 } 945 946 if (bnx2i_conn->gen_pdu.req_buf) { 947 dma_free_coherent(&hba->pcidev->dev, 948 ISCSI_DEF_MAX_RECV_SEG_LEN, 949 bnx2i_conn->gen_pdu.req_buf, 950 bnx2i_conn->gen_pdu.req_dma_addr); 951 bnx2i_conn->gen_pdu.req_buf = NULL; 952 } 953 } 954 955 /** 956 * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. 957 * @hba: pointer to adapter instance 958 * @bnx2i_conn: iscsi connection pointer 959 * 960 * Mgmt task DNA resources are allocated in this routine. 961 */ 962 static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, 963 struct bnx2i_conn *bnx2i_conn) 964 { 965 /* Allocate memory for login request/response buffers */ 966 bnx2i_conn->gen_pdu.req_buf = 967 dma_alloc_coherent(&hba->pcidev->dev, 968 ISCSI_DEF_MAX_RECV_SEG_LEN, 969 &bnx2i_conn->gen_pdu.req_dma_addr, 970 GFP_KERNEL); 971 if (bnx2i_conn->gen_pdu.req_buf == NULL) 972 goto login_req_buf_failure; 973 974 bnx2i_conn->gen_pdu.req_buf_size = 0; 975 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; 976 977 bnx2i_conn->gen_pdu.resp_buf = 978 dma_alloc_coherent(&hba->pcidev->dev, 979 ISCSI_DEF_MAX_RECV_SEG_LEN, 980 &bnx2i_conn->gen_pdu.resp_dma_addr, 981 GFP_KERNEL); 982 if (bnx2i_conn->gen_pdu.resp_buf == NULL) 983 goto login_resp_buf_failure; 984 985 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; 986 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; 987 988 bnx2i_conn->gen_pdu.req_bd_tbl = 989 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 990 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); 991 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) 992 goto login_req_bd_tbl_failure; 993 994 bnx2i_conn->gen_pdu.resp_bd_tbl = 995 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 996 &bnx2i_conn->gen_pdu.resp_bd_dma, 997 GFP_KERNEL); 998 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) 999 goto login_resp_bd_tbl_failure; 1000 1001 return 0; 1002 1003 login_resp_bd_tbl_failure: 1004 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1005 bnx2i_conn->gen_pdu.req_bd_tbl, 1006 bnx2i_conn->gen_pdu.req_bd_dma); 1007 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 1008 1009 login_req_bd_tbl_failure: 1010 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 1011 bnx2i_conn->gen_pdu.resp_buf, 1012 bnx2i_conn->gen_pdu.resp_dma_addr); 1013 bnx2i_conn->gen_pdu.resp_buf = NULL; 1014 login_resp_buf_failure: 1015 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, 1016 bnx2i_conn->gen_pdu.req_buf, 1017 bnx2i_conn->gen_pdu.req_dma_addr); 1018 bnx2i_conn->gen_pdu.req_buf = NULL; 1019 login_req_buf_failure: 1020 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, 1021 "login resource alloc failed!!\n"); 1022 return -ENOMEM; 1023 1024 } 1025 1026 1027 /** 1028 * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. 1029 * @bnx2i_conn: iscsi connection pointer 1030 * 1031 * Allocates buffers and BD tables before shipping requests to cnic 1032 * for PDUs prepared by 'iscsid' daemon 1033 */ 1034 static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) 1035 { 1036 struct iscsi_bd *bd_tbl; 1037 1038 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; 1039 1040 bd_tbl->buffer_addr_hi = 1041 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); 1042 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; 1043 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - 1044 bnx2i_conn->gen_pdu.req_buf; 1045 bd_tbl->reserved0 = 0; 1046 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1047 ISCSI_BD_FIRST_IN_BD_CHAIN; 1048 1049 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; 1050 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; 1051 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; 1052 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; 1053 bd_tbl->reserved0 = 0; 1054 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 1055 ISCSI_BD_FIRST_IN_BD_CHAIN; 1056 } 1057 1058 1059 /** 1060 * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. 1061 * @task: transport layer task pointer 1062 * 1063 * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, 1064 * Nop-out and Logout requests flow through this path. 1065 */ 1066 static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) 1067 { 1068 struct bnx2i_cmd *cmd = task->dd_data; 1069 struct bnx2i_conn *bnx2i_conn = cmd->conn; 1070 int rc = 0; 1071 char *buf; 1072 int data_len; 1073 1074 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); 1075 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 1076 case ISCSI_OP_LOGIN: 1077 bnx2i_send_iscsi_login(bnx2i_conn, task); 1078 break; 1079 case ISCSI_OP_NOOP_OUT: 1080 data_len = bnx2i_conn->gen_pdu.req_buf_size; 1081 buf = bnx2i_conn->gen_pdu.req_buf; 1082 if (data_len) 1083 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1084 buf, data_len, 1); 1085 else 1086 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, 1087 NULL, 0, 1); 1088 break; 1089 case ISCSI_OP_LOGOUT: 1090 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); 1091 break; 1092 case ISCSI_OP_SCSI_TMFUNC: 1093 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); 1094 break; 1095 default: 1096 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, 1097 "send_gen: unsupported op 0x%x\n", 1098 task->hdr->opcode); 1099 } 1100 return rc; 1101 } 1102 1103 1104 /********************************************************************** 1105 * SCSI-ML Interface 1106 **********************************************************************/ 1107 1108 /** 1109 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe 1110 * @sc: SCSI-ML command pointer 1111 * @cmd: iscsi cmd pointer 1112 */ 1113 static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) 1114 { 1115 u32 dword; 1116 int lpcnt; 1117 u8 *srcp; 1118 u32 *dstp; 1119 u32 scsi_lun[2]; 1120 1121 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); 1122 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); 1123 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); 1124 1125 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); 1126 srcp = (u8 *) sc->cmnd; 1127 dstp = (u32 *) cmd->req.cdb; 1128 while (lpcnt--) { 1129 memcpy(&dword, (const void *) srcp, 4); 1130 *dstp = cpu_to_be32(dword); 1131 srcp += 4; 1132 dstp++; 1133 } 1134 if (sc->cmd_len & 0x3) { 1135 dword = (u32) srcp[0] | ((u32) srcp[1] << 8); 1136 *dstp = cpu_to_be32(dword); 1137 } 1138 } 1139 1140 static void bnx2i_cleanup_task(struct iscsi_task *task) 1141 { 1142 struct iscsi_conn *conn = task->conn; 1143 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1144 struct bnx2i_hba *hba = bnx2i_conn->hba; 1145 1146 /* 1147 * mgmt task or cmd was never sent to us to transmit. 1148 */ 1149 if (!task->sc || task->state == ISCSI_TASK_PENDING) 1150 return; 1151 /* 1152 * need to clean-up task context to claim dma buffers 1153 */ 1154 if (task->state == ISCSI_TASK_ABRT_TMF) { 1155 bnx2i_send_cmd_cleanup_req(hba, task->dd_data); 1156 1157 spin_unlock_bh(&conn->session->lock); 1158 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, 1159 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); 1160 spin_lock_bh(&conn->session->lock); 1161 } 1162 bnx2i_iscsi_unmap_sg_list(task->dd_data); 1163 } 1164 1165 /** 1166 * bnx2i_mtask_xmit - transmit mtask to chip for further processing 1167 * @conn: transport layer conn structure pointer 1168 * @task: transport layer command structure pointer 1169 */ 1170 static int 1171 bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) 1172 { 1173 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1174 struct bnx2i_cmd *cmd = task->dd_data; 1175 1176 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); 1177 1178 bnx2i_setup_cmd_wqe_template(cmd); 1179 bnx2i_conn->gen_pdu.req_buf_size = task->data_count; 1180 if (task->data_count) { 1181 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, 1182 task->data_count); 1183 bnx2i_conn->gen_pdu.req_wr_ptr = 1184 bnx2i_conn->gen_pdu.req_buf + task->data_count; 1185 } 1186 cmd->conn = conn->dd_data; 1187 cmd->scsi_cmd = NULL; 1188 return bnx2i_iscsi_send_generic_request(task); 1189 } 1190 1191 /** 1192 * bnx2i_task_xmit - transmit iscsi command to chip for further processing 1193 * @task: transport layer command structure pointer 1194 * 1195 * maps SG buffers and send request to chip/firmware in the form of SQ WQE 1196 */ 1197 static int bnx2i_task_xmit(struct iscsi_task *task) 1198 { 1199 struct iscsi_conn *conn = task->conn; 1200 struct iscsi_session *session = conn->session; 1201 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 1202 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1203 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1204 struct scsi_cmnd *sc = task->sc; 1205 struct bnx2i_cmd *cmd = task->dd_data; 1206 struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; 1207 1208 /* 1209 * If there is no scsi_cmnd this must be a mgmt task 1210 */ 1211 if (!sc) 1212 return bnx2i_mtask_xmit(conn, task); 1213 1214 bnx2i_setup_cmd_wqe_template(cmd); 1215 cmd->req.op_code = ISCSI_OP_SCSI_CMD; 1216 cmd->conn = bnx2i_conn; 1217 cmd->scsi_cmd = sc; 1218 cmd->req.total_data_transfer_length = scsi_bufflen(sc); 1219 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); 1220 1221 bnx2i_iscsi_map_sg_list(cmd); 1222 bnx2i_cpy_scsi_cdb(sc, cmd); 1223 1224 cmd->req.op_attr = ISCSI_ATTR_SIMPLE; 1225 if (sc->sc_data_direction == DMA_TO_DEVICE) { 1226 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; 1227 cmd->req.itt = task->itt | 1228 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1229 bnx2i_setup_write_cmd_bd_info(task); 1230 } else { 1231 if (scsi_bufflen(sc)) 1232 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; 1233 cmd->req.itt = task->itt | 1234 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); 1235 } 1236 1237 cmd->req.num_bds = cmd->io_tbl.bd_valid; 1238 if (!cmd->io_tbl.bd_valid) { 1239 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; 1240 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); 1241 cmd->req.num_bds = 1; 1242 } 1243 1244 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); 1245 return 0; 1246 } 1247 1248 /** 1249 * bnx2i_session_create - create a new iscsi session 1250 * @cmds_max: max commands supported 1251 * @qdepth: scsi queue depth to support 1252 * @initial_cmdsn: initial iscsi CMDSN to be used for this session 1253 * 1254 * Creates a new iSCSI session instance on given device. 1255 */ 1256 static struct iscsi_cls_session * 1257 bnx2i_session_create(struct iscsi_endpoint *ep, 1258 uint16_t cmds_max, uint16_t qdepth, 1259 uint32_t initial_cmdsn) 1260 { 1261 struct Scsi_Host *shost; 1262 struct iscsi_cls_session *cls_session; 1263 struct bnx2i_hba *hba; 1264 struct bnx2i_endpoint *bnx2i_ep; 1265 1266 if (!ep) { 1267 printk(KERN_ERR "bnx2i: missing ep.\n"); 1268 return NULL; 1269 } 1270 1271 bnx2i_ep = ep->dd_data; 1272 shost = bnx2i_ep->hba->shost; 1273 hba = iscsi_host_priv(shost); 1274 if (bnx2i_adapter_ready(hba)) 1275 return NULL; 1276 1277 /* 1278 * user can override hw limit as long as it is within 1279 * the min/max. 1280 */ 1281 if (cmds_max > hba->max_sqes) 1282 cmds_max = hba->max_sqes; 1283 else if (cmds_max < BNX2I_SQ_WQES_MIN) 1284 cmds_max = BNX2I_SQ_WQES_MIN; 1285 1286 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, 1287 cmds_max, 0, sizeof(struct bnx2i_cmd), 1288 initial_cmdsn, ISCSI_MAX_TARGET); 1289 if (!cls_session) 1290 return NULL; 1291 1292 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) 1293 goto session_teardown; 1294 return cls_session; 1295 1296 session_teardown: 1297 iscsi_session_teardown(cls_session); 1298 return NULL; 1299 } 1300 1301 1302 /** 1303 * bnx2i_session_destroy - destroys iscsi session 1304 * @cls_session: pointer to iscsi cls session 1305 * 1306 * Destroys previously created iSCSI session instance and releases 1307 * all resources held by it 1308 */ 1309 static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) 1310 { 1311 struct iscsi_session *session = cls_session->dd_data; 1312 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1313 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1314 1315 bnx2i_destroy_cmd_pool(hba, session); 1316 iscsi_session_teardown(cls_session); 1317 } 1318 1319 1320 /** 1321 * bnx2i_conn_create - create iscsi connection instance 1322 * @cls_session: pointer to iscsi cls session 1323 * @cid: iscsi cid as per rfc (not NX2's CID terminology) 1324 * 1325 * Creates a new iSCSI connection instance for a given session 1326 */ 1327 static struct iscsi_cls_conn * 1328 bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) 1329 { 1330 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1331 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1332 struct bnx2i_conn *bnx2i_conn; 1333 struct iscsi_cls_conn *cls_conn; 1334 struct iscsi_conn *conn; 1335 1336 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), 1337 cid); 1338 if (!cls_conn) 1339 return NULL; 1340 conn = cls_conn->dd_data; 1341 1342 bnx2i_conn = conn->dd_data; 1343 bnx2i_conn->cls_conn = cls_conn; 1344 bnx2i_conn->hba = hba; 1345 /* 'ep' ptr will be assigned in bind() call */ 1346 bnx2i_conn->ep = NULL; 1347 init_completion(&bnx2i_conn->cmd_cleanup_cmpl); 1348 1349 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { 1350 iscsi_conn_printk(KERN_ALERT, conn, 1351 "conn_new: login resc alloc failed!!\n"); 1352 goto free_conn; 1353 } 1354 1355 return cls_conn; 1356 1357 free_conn: 1358 iscsi_conn_teardown(cls_conn); 1359 return NULL; 1360 } 1361 1362 /** 1363 * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together 1364 * @cls_session: pointer to iscsi cls session 1365 * @cls_conn: pointer to iscsi cls conn 1366 * @transport_fd: 64-bit EP handle 1367 * @is_leading: leading connection on this session? 1368 * 1369 * Binds together iSCSI session instance, iSCSI connection instance 1370 * and the TCP connection. This routine returns error code if 1371 * TCP connection does not belong on the device iSCSI sess/conn 1372 * is bound 1373 */ 1374 static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, 1375 struct iscsi_cls_conn *cls_conn, 1376 uint64_t transport_fd, int is_leading) 1377 { 1378 struct iscsi_conn *conn = cls_conn->dd_data; 1379 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1380 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); 1381 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1382 struct bnx2i_endpoint *bnx2i_ep; 1383 struct iscsi_endpoint *ep; 1384 int ret_code; 1385 1386 ep = iscsi_lookup_endpoint(transport_fd); 1387 if (!ep) 1388 return -EINVAL; 1389 /* 1390 * Forcefully terminate all in progress connection recovery at the 1391 * earliest, either in bind(), send_pdu(LOGIN), or conn_start() 1392 */ 1393 if (bnx2i_adapter_ready(hba)) 1394 return -EIO; 1395 1396 bnx2i_ep = ep->dd_data; 1397 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || 1398 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) 1399 /* Peer disconnect via' FIN or RST */ 1400 return -EINVAL; 1401 1402 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 1403 return -EINVAL; 1404 1405 if (bnx2i_ep->hba != hba) { 1406 /* Error - TCP connection does not belong to this device 1407 */ 1408 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1409 "conn bind, ep=0x%p (%s) does not", 1410 bnx2i_ep, bnx2i_ep->hba->netdev->name); 1411 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, 1412 "belong to hba (%s)\n", 1413 hba->netdev->name); 1414 return -EEXIST; 1415 } 1416 bnx2i_ep->conn = bnx2i_conn; 1417 bnx2i_conn->ep = bnx2i_ep; 1418 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; 1419 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; 1420 1421 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, 1422 bnx2i_ep->ep_iscsi_cid); 1423 1424 /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 1425 * driver needs to explicitly replenish RQ index during setup. 1426 */ 1427 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1428 bnx2i_put_rq_buf(bnx2i_conn, 0); 1429 1430 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); 1431 return ret_code; 1432 } 1433 1434 1435 /** 1436 * bnx2i_conn_destroy - destroy iscsi connection instance & release resources 1437 * @cls_conn: pointer to iscsi cls conn 1438 * 1439 * Destroy an iSCSI connection instance and release memory resources held by 1440 * this connection 1441 */ 1442 static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) 1443 { 1444 struct iscsi_conn *conn = cls_conn->dd_data; 1445 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1446 struct Scsi_Host *shost; 1447 struct bnx2i_hba *hba; 1448 1449 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); 1450 hba = iscsi_host_priv(shost); 1451 1452 bnx2i_conn_free_login_resources(hba, bnx2i_conn); 1453 iscsi_conn_teardown(cls_conn); 1454 } 1455 1456 1457 /** 1458 * bnx2i_conn_get_param - return iscsi connection parameter to caller 1459 * @cls_conn: pointer to iscsi cls conn 1460 * @param: parameter type identifier 1461 * @buf: buffer pointer 1462 * 1463 * returns iSCSI connection parameters 1464 */ 1465 static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, 1466 enum iscsi_param param, char *buf) 1467 { 1468 struct iscsi_conn *conn = cls_conn->dd_data; 1469 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1470 int len = 0; 1471 1472 if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba)) 1473 goto out; 1474 1475 switch (param) { 1476 case ISCSI_PARAM_CONN_PORT: 1477 mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); 1478 if (bnx2i_conn->ep->cm_sk) 1479 len = sprintf(buf, "%hu\n", 1480 bnx2i_conn->ep->cm_sk->dst_port); 1481 mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); 1482 break; 1483 case ISCSI_PARAM_CONN_ADDRESS: 1484 mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); 1485 if (bnx2i_conn->ep->cm_sk) 1486 len = sprintf(buf, "%pI4\n", 1487 &bnx2i_conn->ep->cm_sk->dst_ip); 1488 mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); 1489 break; 1490 default: 1491 return iscsi_conn_get_param(cls_conn, param, buf); 1492 } 1493 out: 1494 return len; 1495 } 1496 1497 /** 1498 * bnx2i_host_get_param - returns host (adapter) related parameters 1499 * @shost: scsi host pointer 1500 * @param: parameter type identifier 1501 * @buf: buffer pointer 1502 */ 1503 static int bnx2i_host_get_param(struct Scsi_Host *shost, 1504 enum iscsi_host_param param, char *buf) 1505 { 1506 struct bnx2i_hba *hba = iscsi_host_priv(shost); 1507 int len = 0; 1508 1509 switch (param) { 1510 case ISCSI_HOST_PARAM_HWADDRESS: 1511 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); 1512 break; 1513 case ISCSI_HOST_PARAM_NETDEV_NAME: 1514 len = sprintf(buf, "%s\n", hba->netdev->name); 1515 break; 1516 case ISCSI_HOST_PARAM_IPADDRESS: { 1517 struct list_head *active_list = &hba->ep_active_list; 1518 1519 read_lock_bh(&hba->ep_rdwr_lock); 1520 if (!list_empty(&hba->ep_active_list)) { 1521 struct bnx2i_endpoint *bnx2i_ep; 1522 struct cnic_sock *csk; 1523 1524 bnx2i_ep = list_first_entry(active_list, 1525 struct bnx2i_endpoint, 1526 link); 1527 csk = bnx2i_ep->cm_sk; 1528 if (test_bit(SK_F_IPV6, &csk->flags)) 1529 len = sprintf(buf, "%pI6\n", csk->src_ip); 1530 else 1531 len = sprintf(buf, "%pI4\n", csk->src_ip); 1532 } 1533 read_unlock_bh(&hba->ep_rdwr_lock); 1534 break; 1535 } 1536 default: 1537 return iscsi_host_get_param(shost, param, buf); 1538 } 1539 return len; 1540 } 1541 1542 /** 1543 * bnx2i_conn_start - completes iscsi connection migration to FFP 1544 * @cls_conn: pointer to iscsi cls conn 1545 * 1546 * last call in FFP migration to handover iscsi conn to the driver 1547 */ 1548 static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) 1549 { 1550 struct iscsi_conn *conn = cls_conn->dd_data; 1551 struct bnx2i_conn *bnx2i_conn = conn->dd_data; 1552 1553 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; 1554 bnx2i_update_iscsi_conn(conn); 1555 1556 /* 1557 * this should normally not sleep for a long time so it should 1558 * not disrupt the caller. 1559 */ 1560 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; 1561 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1562 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; 1563 add_timer(&bnx2i_conn->ep->ofld_timer); 1564 /* update iSCSI context for this conn, wait for CNIC to complete */ 1565 wait_event_interruptible(bnx2i_conn->ep->ofld_wait, 1566 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); 1567 1568 if (signal_pending(current)) 1569 flush_signals(current); 1570 del_timer_sync(&bnx2i_conn->ep->ofld_timer); 1571 1572 iscsi_conn_start(cls_conn); 1573 return 0; 1574 } 1575 1576 1577 /** 1578 * bnx2i_conn_get_stats - returns iSCSI stats 1579 * @cls_conn: pointer to iscsi cls conn 1580 * @stats: pointer to iscsi statistic struct 1581 */ 1582 static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1583 struct iscsi_stats *stats) 1584 { 1585 struct iscsi_conn *conn = cls_conn->dd_data; 1586 1587 stats->txdata_octets = conn->txdata_octets; 1588 stats->rxdata_octets = conn->rxdata_octets; 1589 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; 1590 stats->dataout_pdus = conn->dataout_pdus_cnt; 1591 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; 1592 stats->datain_pdus = conn->datain_pdus_cnt; 1593 stats->r2t_pdus = conn->r2t_pdus_cnt; 1594 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; 1595 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; 1596 stats->custom_length = 3; 1597 strcpy(stats->custom[2].desc, "eh_abort_cnt"); 1598 stats->custom[2].value = conn->eh_abort_cnt; 1599 stats->digest_err = 0; 1600 stats->timeout_err = 0; 1601 stats->custom_length = 0; 1602 } 1603 1604 1605 /** 1606 * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices 1607 * @dst_addr: target IP address 1608 * 1609 * check if route resolves to BNX2 device 1610 */ 1611 static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) 1612 { 1613 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1614 struct bnx2i_hba *hba; 1615 struct cnic_dev *cnic = NULL; 1616 1617 hba = get_adapter_list_head(); 1618 if (hba && hba->cnic) 1619 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); 1620 if (!cnic) { 1621 printk(KERN_ALERT "bnx2i: no route," 1622 "can't connect using cnic\n"); 1623 goto no_nx2_route; 1624 } 1625 hba = bnx2i_find_hba_for_cnic(cnic); 1626 if (!hba) 1627 goto no_nx2_route; 1628 1629 if (bnx2i_adapter_ready(hba)) { 1630 printk(KERN_ALERT "bnx2i: check route, hba not found\n"); 1631 goto no_nx2_route; 1632 } 1633 if (hba->netdev->mtu > hba->mtu_supported) { 1634 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", 1635 hba->netdev->name, hba->netdev->mtu); 1636 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", 1637 hba->mtu_supported); 1638 goto no_nx2_route; 1639 } 1640 return hba; 1641 no_nx2_route: 1642 return NULL; 1643 } 1644 1645 1646 /** 1647 * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources 1648 * @hba: pointer to adapter instance 1649 * @ep: endpoint (transport indentifier) structure 1650 * 1651 * destroys cm_sock structure and on chip iscsi context 1652 */ 1653 static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, 1654 struct bnx2i_endpoint *ep) 1655 { 1656 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) 1657 hba->cnic->cm_destroy(ep->cm_sk); 1658 1659 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && 1660 ep->state == EP_STATE_DISCONN_TIMEDOUT) { 1661 if (ep->conn && ep->conn->cls_conn && 1662 ep->conn->cls_conn->dd_data) { 1663 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; 1664 1665 /* Must suspend all rx queue activity for this ep */ 1666 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1667 } 1668 /* CONN_DISCONNECT timeout may or may not be an issue depending 1669 * on what transcribed in TCP layer, different targets behave 1670 * differently 1671 */ 1672 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " 1673 "please submit GRC Dump, NW/PCIe trace, " 1674 "driver msgs to developers for analysis\n", 1675 hba->netdev->name); 1676 } 1677 1678 ep->state = EP_STATE_CLEANUP_START; 1679 init_timer(&ep->ofld_timer); 1680 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; 1681 ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1682 ep->ofld_timer.data = (unsigned long) ep; 1683 add_timer(&ep->ofld_timer); 1684 1685 bnx2i_ep_destroy_list_add(hba, ep); 1686 1687 /* destroy iSCSI context, wait for it to complete */ 1688 if (bnx2i_send_conn_destroy(hba, ep)) 1689 ep->state = EP_STATE_CLEANUP_CMPL; 1690 1691 wait_event_interruptible(ep->ofld_wait, 1692 (ep->state != EP_STATE_CLEANUP_START)); 1693 1694 if (signal_pending(current)) 1695 flush_signals(current); 1696 del_timer_sync(&ep->ofld_timer); 1697 1698 bnx2i_ep_destroy_list_del(hba, ep); 1699 1700 if (ep->state != EP_STATE_CLEANUP_CMPL) 1701 /* should never happen */ 1702 printk(KERN_ALERT "bnx2i - conn destroy failed\n"); 1703 1704 return 0; 1705 } 1706 1707 1708 /** 1709 * bnx2i_ep_connect - establish TCP connection to target portal 1710 * @shost: scsi host 1711 * @dst_addr: target IP address 1712 * @non_blocking: blocking or non-blocking call 1713 * 1714 * this routine initiates the TCP/IP connection by invoking Option-2 i/f 1715 * with l5_core and the CNIC. This is a multi-step process of resolving 1716 * route to target, create a iscsi connection context, handshaking with 1717 * CNIC module to create/initialize the socket struct and finally 1718 * sending down option-2 request to complete TCP 3-way handshake 1719 */ 1720 static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, 1721 struct sockaddr *dst_addr, 1722 int non_blocking) 1723 { 1724 u32 iscsi_cid = BNX2I_CID_RESERVED; 1725 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; 1726 struct sockaddr_in6 *desti6; 1727 struct bnx2i_endpoint *bnx2i_ep; 1728 struct bnx2i_hba *hba; 1729 struct cnic_dev *cnic; 1730 struct cnic_sockaddr saddr; 1731 struct iscsi_endpoint *ep; 1732 int rc = 0; 1733 1734 if (shost) { 1735 /* driver is given scsi host to work with */ 1736 hba = iscsi_host_priv(shost); 1737 } else 1738 /* 1739 * check if the given destination can be reached through 1740 * a iscsi capable NetXtreme2 device 1741 */ 1742 hba = bnx2i_check_route(dst_addr); 1743 1744 if (!hba) { 1745 rc = -EINVAL; 1746 goto nohba; 1747 } 1748 mutex_lock(&hba->net_dev_lock); 1749 1750 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { 1751 rc = -EPERM; 1752 goto check_busy; 1753 } 1754 cnic = hba->cnic; 1755 ep = bnx2i_alloc_ep(hba); 1756 if (!ep) { 1757 rc = -ENOMEM; 1758 goto check_busy; 1759 } 1760 bnx2i_ep = ep->dd_data; 1761 1762 bnx2i_ep->num_active_cmds = 0; 1763 iscsi_cid = bnx2i_alloc_iscsi_cid(hba); 1764 if (iscsi_cid == -1) { 1765 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " 1766 "iscsi cid\n", hba->netdev->name); 1767 rc = -ENOMEM; 1768 bnx2i_free_ep(ep); 1769 goto check_busy; 1770 } 1771 bnx2i_ep->hba_age = hba->age; 1772 1773 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); 1774 if (rc != 0) { 1775 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" 1776 "\n", hba->netdev->name); 1777 rc = -ENOMEM; 1778 goto qp_resc_err; 1779 } 1780 1781 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; 1782 bnx2i_ep->state = EP_STATE_OFLD_START; 1783 bnx2i_ep_ofld_list_add(hba, bnx2i_ep); 1784 1785 init_timer(&bnx2i_ep->ofld_timer); 1786 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; 1787 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 1788 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 1789 add_timer(&bnx2i_ep->ofld_timer); 1790 1791 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { 1792 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { 1793 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", 1794 hba->netdev->name, bnx2i_ep->ep_iscsi_cid); 1795 rc = -EBUSY; 1796 } else 1797 rc = -ENOSPC; 1798 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" 1799 "\n", hba->netdev->name); 1800 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1801 goto conn_failed; 1802 } 1803 1804 /* Wait for CNIC hardware to setup conn context and return 'cid' */ 1805 wait_event_interruptible(bnx2i_ep->ofld_wait, 1806 bnx2i_ep->state != EP_STATE_OFLD_START); 1807 1808 if (signal_pending(current)) 1809 flush_signals(current); 1810 del_timer_sync(&bnx2i_ep->ofld_timer); 1811 1812 bnx2i_ep_ofld_list_del(hba, bnx2i_ep); 1813 1814 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { 1815 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { 1816 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", 1817 hba->netdev->name, bnx2i_ep->ep_iscsi_cid); 1818 rc = -EBUSY; 1819 } else 1820 rc = -ENOSPC; 1821 goto conn_failed; 1822 } 1823 1824 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, 1825 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); 1826 if (rc) { 1827 rc = -EINVAL; 1828 /* Need to terminate and cleanup the connection */ 1829 goto release_ep; 1830 } 1831 1832 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; 1833 bnx2i_ep->cm_sk->snd_buf = 256 * 1024; 1834 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); 1835 1836 memset(&saddr, 0, sizeof(saddr)); 1837 if (dst_addr->sa_family == AF_INET) { 1838 desti = (struct sockaddr_in *) dst_addr; 1839 saddr.remote.v4 = *desti; 1840 saddr.local.v4.sin_family = desti->sin_family; 1841 } else if (dst_addr->sa_family == AF_INET6) { 1842 desti6 = (struct sockaddr_in6 *) dst_addr; 1843 saddr.remote.v6 = *desti6; 1844 saddr.local.v6.sin6_family = desti6->sin6_family; 1845 } 1846 1847 bnx2i_ep->timestamp = jiffies; 1848 bnx2i_ep->state = EP_STATE_CONNECT_START; 1849 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { 1850 rc = -EINVAL; 1851 goto conn_failed; 1852 } else 1853 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); 1854 if (rc) 1855 goto release_ep; 1856 1857 bnx2i_ep_active_list_add(hba, bnx2i_ep); 1858 1859 if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) 1860 goto del_active_ep; 1861 1862 mutex_unlock(&hba->net_dev_lock); 1863 return ep; 1864 1865 del_active_ep: 1866 bnx2i_ep_active_list_del(hba, bnx2i_ep); 1867 release_ep: 1868 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { 1869 mutex_unlock(&hba->net_dev_lock); 1870 return ERR_PTR(rc); 1871 } 1872 conn_failed: 1873 bnx2i_free_qp_resc(hba, bnx2i_ep); 1874 qp_resc_err: 1875 bnx2i_free_ep(ep); 1876 check_busy: 1877 mutex_unlock(&hba->net_dev_lock); 1878 nohba: 1879 return ERR_PTR(rc); 1880 } 1881 1882 1883 /** 1884 * bnx2i_ep_poll - polls for TCP connection establishement 1885 * @ep: TCP connection (endpoint) handle 1886 * @timeout_ms: timeout value in milli secs 1887 * 1888 * polls for TCP connect request to complete 1889 */ 1890 static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) 1891 { 1892 struct bnx2i_endpoint *bnx2i_ep; 1893 int rc = 0; 1894 1895 bnx2i_ep = ep->dd_data; 1896 if ((bnx2i_ep->state == EP_STATE_IDLE) || 1897 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || 1898 (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) 1899 return -1; 1900 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) 1901 return 1; 1902 1903 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, 1904 ((bnx2i_ep->state == 1905 EP_STATE_OFLD_FAILED) || 1906 (bnx2i_ep->state == 1907 EP_STATE_CONNECT_FAILED) || 1908 (bnx2i_ep->state == 1909 EP_STATE_CONNECT_COMPL)), 1910 msecs_to_jiffies(timeout_ms)); 1911 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED) 1912 rc = -1; 1913 1914 if (rc > 0) 1915 return 1; 1916 else if (!rc) 1917 return 0; /* timeout */ 1918 else 1919 return rc; 1920 } 1921 1922 1923 /** 1924 * bnx2i_ep_tcp_conn_active - check EP state transition 1925 * @ep: endpoint pointer 1926 * 1927 * check if underlying TCP connection is active 1928 */ 1929 static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) 1930 { 1931 int ret; 1932 int cnic_dev_10g = 0; 1933 1934 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) 1935 cnic_dev_10g = 1; 1936 1937 switch (bnx2i_ep->state) { 1938 case EP_STATE_CONNECT_FAILED: 1939 case EP_STATE_CLEANUP_FAILED: 1940 case EP_STATE_OFLD_FAILED: 1941 case EP_STATE_DISCONN_TIMEDOUT: 1942 ret = 0; 1943 break; 1944 case EP_STATE_CONNECT_START: 1945 case EP_STATE_CONNECT_COMPL: 1946 case EP_STATE_ULP_UPDATE_START: 1947 case EP_STATE_ULP_UPDATE_COMPL: 1948 case EP_STATE_TCP_FIN_RCVD: 1949 case EP_STATE_LOGOUT_SENT: 1950 case EP_STATE_LOGOUT_RESP_RCVD: 1951 case EP_STATE_ULP_UPDATE_FAILED: 1952 ret = 1; 1953 break; 1954 case EP_STATE_TCP_RST_RCVD: 1955 if (cnic_dev_10g) 1956 ret = 0; 1957 else 1958 ret = 1; 1959 break; 1960 default: 1961 ret = 0; 1962 } 1963 1964 return ret; 1965 } 1966 1967 1968 /* 1969 * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw 1970 * @ep: TCP connection (bnx2i endpoint) handle 1971 * 1972 * executes TCP connection teardown process 1973 */ 1974 int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) 1975 { 1976 struct bnx2i_hba *hba = bnx2i_ep->hba; 1977 struct cnic_dev *cnic; 1978 struct iscsi_session *session = NULL; 1979 struct iscsi_conn *conn = NULL; 1980 int ret = 0; 1981 int close = 0; 1982 int close_ret = 0; 1983 1984 if (!hba) 1985 return 0; 1986 1987 cnic = hba->cnic; 1988 if (!cnic) 1989 return 0; 1990 1991 if (bnx2i_ep->state == EP_STATE_IDLE || 1992 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) 1993 return 0; 1994 1995 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) 1996 goto destroy_conn; 1997 1998 if (bnx2i_ep->conn) { 1999 conn = bnx2i_ep->conn->cls_conn->dd_data; 2000 session = conn->session; 2001 } 2002 2003 init_timer(&bnx2i_ep->ofld_timer); 2004 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; 2005 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; 2006 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; 2007 add_timer(&bnx2i_ep->ofld_timer); 2008 2009 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) 2010 goto out; 2011 2012 if (session) { 2013 spin_lock_bh(&session->lock); 2014 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) { 2015 if (session->state == ISCSI_STATE_LOGGING_OUT) { 2016 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { 2017 /* Logout sent, but no resp */ 2018 printk(KERN_ALERT "bnx2i (%s): WARNING" 2019 " logout response was not " 2020 "received!\n", 2021 bnx2i_ep->hba->netdev->name); 2022 } else if (bnx2i_ep->state == 2023 EP_STATE_LOGOUT_RESP_RCVD) 2024 close = 1; 2025 } 2026 } else 2027 close = 1; 2028 2029 spin_unlock_bh(&session->lock); 2030 } 2031 2032 bnx2i_ep->state = EP_STATE_DISCONN_START; 2033 2034 if (close) 2035 close_ret = cnic->cm_close(bnx2i_ep->cm_sk); 2036 else 2037 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); 2038 2039 if (close_ret) 2040 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", 2041 bnx2i_ep->hba->netdev->name, close, close_ret); 2042 else 2043 /* wait for option-2 conn teardown */ 2044 wait_event_interruptible(bnx2i_ep->ofld_wait, 2045 bnx2i_ep->state != EP_STATE_DISCONN_START); 2046 2047 if (signal_pending(current)) 2048 flush_signals(current); 2049 del_timer_sync(&bnx2i_ep->ofld_timer); 2050 2051 destroy_conn: 2052 bnx2i_ep_active_list_del(hba, bnx2i_ep); 2053 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) 2054 return -EINVAL; 2055 out: 2056 bnx2i_ep->state = EP_STATE_IDLE; 2057 return ret; 2058 } 2059 2060 2061 /** 2062 * bnx2i_ep_disconnect - executes TCP connection teardown process 2063 * @ep: TCP connection (iscsi endpoint) handle 2064 * 2065 * executes TCP connection teardown process 2066 */ 2067 static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) 2068 { 2069 struct bnx2i_endpoint *bnx2i_ep; 2070 struct bnx2i_conn *bnx2i_conn = NULL; 2071 struct iscsi_conn *conn = NULL; 2072 struct bnx2i_hba *hba; 2073 2074 bnx2i_ep = ep->dd_data; 2075 2076 /* driver should not attempt connection cleanup until TCP_CONNECT 2077 * completes either successfully or fails. Timeout is 9-secs, so 2078 * wait for it to complete 2079 */ 2080 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && 2081 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) 2082 msleep(250); 2083 2084 if (bnx2i_ep->conn) { 2085 bnx2i_conn = bnx2i_ep->conn; 2086 conn = bnx2i_conn->cls_conn->dd_data; 2087 iscsi_suspend_queue(conn); 2088 } 2089 hba = bnx2i_ep->hba; 2090 2091 mutex_lock(&hba->net_dev_lock); 2092 2093 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) 2094 goto out; 2095 2096 if (bnx2i_ep->state == EP_STATE_IDLE) 2097 goto free_resc; 2098 2099 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || 2100 (bnx2i_ep->hba_age != hba->age)) { 2101 bnx2i_ep_active_list_del(hba, bnx2i_ep); 2102 goto free_resc; 2103 } 2104 2105 /* Do all chip cleanup here */ 2106 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { 2107 mutex_unlock(&hba->net_dev_lock); 2108 return; 2109 } 2110 free_resc: 2111 bnx2i_free_qp_resc(hba, bnx2i_ep); 2112 2113 if (bnx2i_conn) 2114 bnx2i_conn->ep = NULL; 2115 2116 bnx2i_free_ep(ep); 2117 out: 2118 mutex_unlock(&hba->net_dev_lock); 2119 2120 wake_up_interruptible(&hba->eh_wait); 2121 } 2122 2123 2124 /** 2125 * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler 2126 * @buf: pointer to buffer containing iscsi path message 2127 * 2128 */ 2129 static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) 2130 { 2131 struct bnx2i_hba *hba = iscsi_host_priv(shost); 2132 char *buf = (char *) params; 2133 u16 len = sizeof(*params); 2134 2135 /* handled by cnic driver */ 2136 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, 2137 len); 2138 2139 return 0; 2140 } 2141 2142 2143 /* 2144 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template 2145 * used while registering with the scsi host and iSCSI transport module. 2146 */ 2147 static struct scsi_host_template bnx2i_host_template = { 2148 .module = THIS_MODULE, 2149 .name = "Broadcom Offload iSCSI Initiator", 2150 .proc_name = "bnx2i", 2151 .queuecommand = iscsi_queuecommand, 2152 .eh_abort_handler = iscsi_eh_abort, 2153 .eh_device_reset_handler = iscsi_eh_device_reset, 2154 .eh_target_reset_handler = iscsi_eh_recover_target, 2155 .change_queue_depth = iscsi_change_queue_depth, 2156 .can_queue = 1024, 2157 .max_sectors = 127, 2158 .cmd_per_lun = 32, 2159 .this_id = -1, 2160 .use_clustering = ENABLE_CLUSTERING, 2161 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, 2162 .shost_attrs = bnx2i_dev_attributes, 2163 }; 2164 2165 struct iscsi_transport bnx2i_iscsi_transport = { 2166 .owner = THIS_MODULE, 2167 .name = "bnx2i", 2168 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | 2169 CAP_MULTI_R2T | CAP_DATADGST | 2170 CAP_DATA_PATH_OFFLOAD, 2171 .param_mask = ISCSI_MAX_RECV_DLENGTH | 2172 ISCSI_MAX_XMIT_DLENGTH | 2173 ISCSI_HDRDGST_EN | 2174 ISCSI_DATADGST_EN | 2175 ISCSI_INITIAL_R2T_EN | 2176 ISCSI_MAX_R2T | 2177 ISCSI_IMM_DATA_EN | 2178 ISCSI_FIRST_BURST | 2179 ISCSI_MAX_BURST | 2180 ISCSI_PDU_INORDER_EN | 2181 ISCSI_DATASEQ_INORDER_EN | 2182 ISCSI_ERL | 2183 ISCSI_CONN_PORT | 2184 ISCSI_CONN_ADDRESS | 2185 ISCSI_EXP_STATSN | 2186 ISCSI_PERSISTENT_PORT | 2187 ISCSI_PERSISTENT_ADDRESS | 2188 ISCSI_TARGET_NAME | ISCSI_TPGT | 2189 ISCSI_USERNAME | ISCSI_PASSWORD | 2190 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 2191 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 2192 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | 2193 ISCSI_PING_TMO | ISCSI_RECV_TMO | 2194 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 2195 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 2196 ISCSI_HOST_NETDEV_NAME, 2197 .create_session = bnx2i_session_create, 2198 .destroy_session = bnx2i_session_destroy, 2199 .create_conn = bnx2i_conn_create, 2200 .bind_conn = bnx2i_conn_bind, 2201 .destroy_conn = bnx2i_conn_destroy, 2202 .set_param = iscsi_set_param, 2203 .get_conn_param = bnx2i_conn_get_param, 2204 .get_session_param = iscsi_session_get_param, 2205 .get_host_param = bnx2i_host_get_param, 2206 .start_conn = bnx2i_conn_start, 2207 .stop_conn = iscsi_conn_stop, 2208 .send_pdu = iscsi_conn_send_pdu, 2209 .xmit_task = bnx2i_task_xmit, 2210 .get_stats = bnx2i_conn_get_stats, 2211 /* TCP connect - disconnect - option-2 interface calls */ 2212 .ep_connect = bnx2i_ep_connect, 2213 .ep_poll = bnx2i_ep_poll, 2214 .ep_disconnect = bnx2i_ep_disconnect, 2215 .set_path = bnx2i_nl_set_path, 2216 /* Error recovery timeout call */ 2217 .session_recovery_timedout = iscsi_session_recovery_timedout, 2218 .cleanup_task = bnx2i_cleanup_task, 2219 }; 2220