1 /* 2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c 3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers 4 * 5 * Copyright (c) 2001-2013 ATTO Technology, Inc. 6 * (mailto:linuxdrivers@attotech.com) 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * NO WARRANTY 19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 23 * solely responsible for determining the appropriateness of using and 24 * distributing the Program and assumes all risks associated with its 25 * exercise of rights under this Agreement, including but not limited to 26 * the risks and costs of program errors, damage to or loss of data, 27 * programs or equipment, and unavailability or interruption of operations. 28 * 29 * DISCLAIMER OF LIABILITY 30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 37 * 38 * You should have received a copy of the GNU General Public License 39 * along with this program; if not, write to the Free Software 40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 41 * USA. 42 */ 43 44 #include "esas2r.h" 45 46 /* 47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we 48 * allocate a DMA-able memory area to communicate with the firmware. In 49 * order to prevent continually allocating and freeing consistent memory, 50 * we will allocate a global buffer the first time we need it and re-use 51 * it for subsequent ioctl calls that require it. 52 */ 53 54 u8 *esas2r_buffered_ioctl; 55 dma_addr_t esas2r_buffered_ioctl_addr; 56 u32 esas2r_buffered_ioctl_size; 57 struct pci_dev *esas2r_buffered_ioctl_pcid; 58 59 static DEFINE_SEMAPHORE(buffered_ioctl_semaphore); 60 typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *, 61 struct esas2r_request *, 62 struct esas2r_sg_context *, 63 void *); 64 typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *, 65 struct esas2r_request *, void *); 66 67 struct esas2r_buffered_ioctl { 68 struct esas2r_adapter *a; 69 void *ioctl; 70 u32 length; 71 u32 control_code; 72 u32 offset; 73 BUFFERED_IOCTL_CALLBACK 74 callback; 75 void *context; 76 BUFFERED_IOCTL_DONE_CALLBACK 77 done_callback; 78 void *done_context; 79 80 }; 81 82 static void complete_fm_api_req(struct esas2r_adapter *a, 83 struct esas2r_request *rq) 84 { 85 a->fm_api_command_done = 1; 86 wake_up_interruptible(&a->fm_api_waiter); 87 } 88 89 /* Callbacks for building scatter/gather lists for FM API requests */ 90 static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr) 91 { 92 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; 93 int offset = sgc->cur_offset - a->save_offset; 94 95 (*addr) = a->firmware.phys + offset; 96 return a->firmware.orig_len - offset; 97 } 98 99 static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr) 100 { 101 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; 102 int offset = sgc->cur_offset - a->save_offset; 103 104 (*addr) = a->firmware.header_buff_phys + offset; 105 return sizeof(struct esas2r_flash_img) - offset; 106 } 107 108 /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */ 109 static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi) 110 { 111 struct esas2r_request *rq; 112 113 if (down_interruptible(&a->fm_api_semaphore)) { 114 fi->status = FI_STAT_BUSY; 115 return; 116 } 117 118 rq = esas2r_alloc_request(a); 119 if (rq == NULL) { 120 fi->status = FI_STAT_BUSY; 121 goto free_sem; 122 } 123 124 if (fi == &a->firmware.header) { 125 a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev, 126 (size_t)sizeof( 127 struct 128 esas2r_flash_img), 129 (dma_addr_t *)&a-> 130 firmware. 131 header_buff_phys, 132 GFP_KERNEL); 133 134 if (a->firmware.header_buff == NULL) { 135 esas2r_debug("failed to allocate header buffer!"); 136 fi->status = FI_STAT_BUSY; 137 goto free_req; 138 } 139 140 memcpy(a->firmware.header_buff, fi, 141 sizeof(struct esas2r_flash_img)); 142 a->save_offset = a->firmware.header_buff; 143 a->fm_api_sgc.get_phys_addr = 144 (PGETPHYSADDR)get_physaddr_fm_api_header; 145 } else { 146 a->save_offset = (u8 *)fi; 147 a->fm_api_sgc.get_phys_addr = 148 (PGETPHYSADDR)get_physaddr_fm_api; 149 } 150 151 rq->comp_cb = complete_fm_api_req; 152 a->fm_api_command_done = 0; 153 a->fm_api_sgc.cur_offset = a->save_offset; 154 155 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, 156 &a->fm_api_sgc)) 157 goto all_done; 158 159 /* Now wait around for it to complete. */ 160 while (!a->fm_api_command_done) 161 wait_event_interruptible(a->fm_api_waiter, 162 a->fm_api_command_done); 163 all_done: 164 if (fi == &a->firmware.header) { 165 memcpy(fi, a->firmware.header_buff, 166 sizeof(struct esas2r_flash_img)); 167 168 dma_free_coherent(&a->pcid->dev, 169 (size_t)sizeof(struct esas2r_flash_img), 170 a->firmware.header_buff, 171 (dma_addr_t)a->firmware.header_buff_phys); 172 } 173 free_req: 174 esas2r_free_request(a, (struct esas2r_request *)rq); 175 free_sem: 176 up(&a->fm_api_semaphore); 177 return; 178 179 } 180 181 static void complete_nvr_req(struct esas2r_adapter *a, 182 struct esas2r_request *rq) 183 { 184 a->nvram_command_done = 1; 185 wake_up_interruptible(&a->nvram_waiter); 186 } 187 188 /* Callback for building scatter/gather lists for buffered ioctls */ 189 static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc, 190 u64 *addr) 191 { 192 int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl; 193 194 (*addr) = esas2r_buffered_ioctl_addr + offset; 195 return esas2r_buffered_ioctl_size - offset; 196 } 197 198 static void complete_buffered_ioctl_req(struct esas2r_adapter *a, 199 struct esas2r_request *rq) 200 { 201 a->buffered_ioctl_done = 1; 202 wake_up_interruptible(&a->buffered_ioctl_waiter); 203 } 204 205 static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi) 206 { 207 struct esas2r_adapter *a = bi->a; 208 struct esas2r_request *rq; 209 struct esas2r_sg_context sgc; 210 u8 result = IOCTL_SUCCESS; 211 212 if (down_interruptible(&buffered_ioctl_semaphore)) 213 return IOCTL_OUT_OF_RESOURCES; 214 215 /* allocate a buffer or use the existing buffer. */ 216 if (esas2r_buffered_ioctl) { 217 if (esas2r_buffered_ioctl_size < bi->length) { 218 /* free the too-small buffer and get a new one */ 219 dma_free_coherent(&a->pcid->dev, 220 (size_t)esas2r_buffered_ioctl_size, 221 esas2r_buffered_ioctl, 222 esas2r_buffered_ioctl_addr); 223 224 goto allocate_buffer; 225 } 226 } else { 227 allocate_buffer: 228 esas2r_buffered_ioctl_size = bi->length; 229 esas2r_buffered_ioctl_pcid = a->pcid; 230 esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev, 231 (size_t) 232 esas2r_buffered_ioctl_size, 233 & 234 esas2r_buffered_ioctl_addr, 235 GFP_KERNEL); 236 } 237 238 if (!esas2r_buffered_ioctl) { 239 esas2r_log(ESAS2R_LOG_CRIT, 240 "could not allocate %d bytes of consistent memory " 241 "for a buffered ioctl!", 242 bi->length); 243 244 esas2r_debug("buffered ioctl alloc failure"); 245 result = IOCTL_OUT_OF_RESOURCES; 246 goto exit_cleanly; 247 } 248 249 memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length); 250 251 rq = esas2r_alloc_request(a); 252 if (rq == NULL) { 253 esas2r_log(ESAS2R_LOG_CRIT, 254 "could not allocate an internal request"); 255 256 result = IOCTL_OUT_OF_RESOURCES; 257 esas2r_debug("buffered ioctl - no requests"); 258 goto exit_cleanly; 259 } 260 261 a->buffered_ioctl_done = 0; 262 rq->comp_cb = complete_buffered_ioctl_req; 263 sgc.cur_offset = esas2r_buffered_ioctl + bi->offset; 264 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl; 265 sgc.length = esas2r_buffered_ioctl_size; 266 267 if (!(*bi->callback)(a, rq, &sgc, bi->context)) { 268 /* completed immediately, no need to wait */ 269 a->buffered_ioctl_done = 0; 270 goto free_andexit_cleanly; 271 } 272 273 /* now wait around for it to complete. */ 274 while (!a->buffered_ioctl_done) 275 wait_event_interruptible(a->buffered_ioctl_waiter, 276 a->buffered_ioctl_done); 277 278 free_andexit_cleanly: 279 if (result == IOCTL_SUCCESS && bi->done_callback) 280 (*bi->done_callback)(a, rq, bi->done_context); 281 282 esas2r_free_request(a, rq); 283 284 exit_cleanly: 285 if (result == IOCTL_SUCCESS) 286 memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length); 287 288 up(&buffered_ioctl_semaphore); 289 return result; 290 } 291 292 /* SMP ioctl support */ 293 static int smp_ioctl_callback(struct esas2r_adapter *a, 294 struct esas2r_request *rq, 295 struct esas2r_sg_context *sgc, void *context) 296 { 297 struct atto_ioctl_smp *si = 298 (struct atto_ioctl_smp *)esas2r_buffered_ioctl; 299 300 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); 301 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP); 302 303 if (!esas2r_build_sg_list(a, rq, sgc)) { 304 si->status = ATTO_STS_OUT_OF_RSRC; 305 return false; 306 } 307 308 esas2r_start_request(a, rq); 309 return true; 310 } 311 312 static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si) 313 { 314 struct esas2r_buffered_ioctl bi; 315 316 memset(&bi, 0, sizeof(bi)); 317 318 bi.a = a; 319 bi.ioctl = si; 320 bi.length = sizeof(struct atto_ioctl_smp) 321 + le32_to_cpu(si->req_length) 322 + le32_to_cpu(si->rsp_length); 323 bi.offset = 0; 324 bi.callback = smp_ioctl_callback; 325 return handle_buffered_ioctl(&bi); 326 } 327 328 329 /* CSMI ioctl support */ 330 static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a, 331 struct esas2r_request *rq) 332 { 333 rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id); 334 rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun); 335 336 /* Now call the original completion callback. */ 337 (*rq->aux_req_cb)(a, rq); 338 } 339 340 /* Tunnel a CSMI IOCTL to the back end driver for processing. */ 341 static bool csmi_ioctl_tunnel(struct esas2r_adapter *a, 342 union atto_ioctl_csmi *ci, 343 struct esas2r_request *rq, 344 struct esas2r_sg_context *sgc, 345 u32 ctrl_code, 346 u16 target_id) 347 { 348 struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; 349 350 if (test_bit(AF_DEGRADED_MODE, &a->flags)) 351 return false; 352 353 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); 354 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI); 355 ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code); 356 ioctl->csmi.target_id = cpu_to_le16(target_id); 357 ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags); 358 359 /* 360 * Always usurp the completion callback since the interrupt callback 361 * mechanism may be used. 362 */ 363 rq->aux_req_cx = ci; 364 rq->aux_req_cb = rq->comp_cb; 365 rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb; 366 367 if (!esas2r_build_sg_list(a, rq, sgc)) 368 return false; 369 370 esas2r_start_request(a, rq); 371 return true; 372 } 373 374 static bool check_lun(struct scsi_lun lun) 375 { 376 bool result; 377 378 result = ((lun.scsi_lun[7] == 0) && 379 (lun.scsi_lun[6] == 0) && 380 (lun.scsi_lun[5] == 0) && 381 (lun.scsi_lun[4] == 0) && 382 (lun.scsi_lun[3] == 0) && 383 (lun.scsi_lun[2] == 0) && 384 /* Byte 1 is intentionally skipped */ 385 (lun.scsi_lun[0] == 0)); 386 387 return result; 388 } 389 390 static int csmi_ioctl_callback(struct esas2r_adapter *a, 391 struct esas2r_request *rq, 392 struct esas2r_sg_context *sgc, void *context) 393 { 394 struct atto_csmi *ci = (struct atto_csmi *)context; 395 union atto_ioctl_csmi *ioctl_csmi = 396 (union atto_ioctl_csmi *)esas2r_buffered_ioctl; 397 u8 path = 0; 398 u8 tid = 0; 399 u8 lun = 0; 400 u32 sts = CSMI_STS_SUCCESS; 401 struct esas2r_target *t; 402 unsigned long flags; 403 404 if (ci->control_code == CSMI_CC_GET_DEV_ADDR) { 405 struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr; 406 407 path = gda->path_id; 408 tid = gda->target_id; 409 lun = gda->lun; 410 } else if (ci->control_code == CSMI_CC_TASK_MGT) { 411 struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt; 412 413 path = tm->path_id; 414 tid = tm->target_id; 415 lun = tm->lun; 416 } 417 418 if (path > 0) { 419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( 420 CSMI_STS_INV_PARAM); 421 return false; 422 } 423 424 rq->target_id = tid; 425 rq->vrq->scsi.flags |= cpu_to_le32(lun); 426 427 switch (ci->control_code) { 428 case CSMI_CC_GET_DRVR_INFO: 429 { 430 struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info; 431 432 strcpy(gdi->description, esas2r_get_model_name(a)); 433 gdi->csmi_major_rev = CSMI_MAJOR_REV; 434 gdi->csmi_minor_rev = CSMI_MINOR_REV; 435 break; 436 } 437 438 case CSMI_CC_GET_CNTLR_CFG: 439 { 440 struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg; 441 442 gcc->base_io_addr = 0; 443 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2, 444 &gcc->base_memaddr_lo); 445 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3, 446 &gcc->base_memaddr_hi); 447 gcc->board_id = MAKEDWORD(a->pcid->subsystem_device, 448 a->pcid->subsystem_vendor); 449 gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN; 450 gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA; 451 gcc->io_bus_type = CSMI_BUS_TYPE_PCI; 452 gcc->pci_addr.bus_num = a->pcid->bus->number; 453 gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn); 454 gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn); 455 456 memset(gcc->serial_num, 0, sizeof(gcc->serial_num)); 457 458 gcc->major_rev = LOBYTE(LOWORD(a->fw_version)); 459 gcc->minor_rev = HIBYTE(LOWORD(a->fw_version)); 460 gcc->build_rev = LOBYTE(HIWORD(a->fw_version)); 461 gcc->release_rev = HIBYTE(HIWORD(a->fw_version)); 462 gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver)); 463 gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver)); 464 gcc->bios_build_rev = LOWORD(a->flash_ver); 465 466 if (test_bit(AF2_THUNDERLINK, &a->flags2)) 467 gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA 468 | CSMI_CNTLRF_SATA_HBA; 469 else 470 gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID 471 | CSMI_CNTLRF_SATA_RAID; 472 473 gcc->rrom_major_rev = 0; 474 gcc->rrom_minor_rev = 0; 475 gcc->rrom_build_rev = 0; 476 gcc->rrom_release_rev = 0; 477 gcc->rrom_biosmajor_rev = 0; 478 gcc->rrom_biosminor_rev = 0; 479 gcc->rrom_biosbuild_rev = 0; 480 gcc->rrom_biosrelease_rev = 0; 481 break; 482 } 483 484 case CSMI_CC_GET_CNTLR_STS: 485 { 486 struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; 487 488 if (test_bit(AF_DEGRADED_MODE, &a->flags)) 489 gcs->status = CSMI_CNTLR_STS_FAILED; 490 else 491 gcs->status = CSMI_CNTLR_STS_GOOD; 492 493 gcs->offline_reason = CSMI_OFFLINE_NO_REASON; 494 break; 495 } 496 497 case CSMI_CC_FW_DOWNLOAD: 498 case CSMI_CC_GET_RAID_INFO: 499 case CSMI_CC_GET_RAID_CFG: 500 501 sts = CSMI_STS_BAD_CTRL_CODE; 502 break; 503 504 case CSMI_CC_SMP_PASSTHRU: 505 case CSMI_CC_SSP_PASSTHRU: 506 case CSMI_CC_STP_PASSTHRU: 507 case CSMI_CC_GET_PHY_INFO: 508 case CSMI_CC_SET_PHY_INFO: 509 case CSMI_CC_GET_LINK_ERRORS: 510 case CSMI_CC_GET_SATA_SIG: 511 case CSMI_CC_GET_CONN_INFO: 512 case CSMI_CC_PHY_CTRL: 513 514 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, 515 ci->control_code, 516 ESAS2R_TARG_ID_INV)) { 517 sts = CSMI_STS_FAILED; 518 break; 519 } 520 521 return true; 522 523 case CSMI_CC_GET_SCSI_ADDR: 524 { 525 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; 526 527 struct scsi_lun lun; 528 529 memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun)); 530 531 if (!check_lun(lun)) { 532 sts = CSMI_STS_NO_SCSI_ADDR; 533 break; 534 } 535 536 /* make sure the device is present */ 537 spin_lock_irqsave(&a->mem_lock, flags); 538 t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr); 539 spin_unlock_irqrestore(&a->mem_lock, flags); 540 541 if (t == NULL) { 542 sts = CSMI_STS_NO_SCSI_ADDR; 543 break; 544 } 545 546 gsa->host_index = 0xFF; 547 gsa->lun = gsa->sas_lun[1]; 548 rq->target_id = esas2r_targ_get_id(t, a); 549 break; 550 } 551 552 case CSMI_CC_GET_DEV_ADDR: 553 { 554 struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr; 555 556 /* make sure the target is present */ 557 t = a->targetdb + rq->target_id; 558 559 if (t >= a->targetdb_end 560 || t->target_state != TS_PRESENT 561 || t->sas_addr == 0) { 562 sts = CSMI_STS_NO_DEV_ADDR; 563 break; 564 } 565 566 /* fill in the result */ 567 *(u64 *)gda->sas_addr = t->sas_addr; 568 memset(gda->sas_lun, 0, sizeof(gda->sas_lun)); 569 gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags); 570 break; 571 } 572 573 case CSMI_CC_TASK_MGT: 574 575 /* make sure the target is present */ 576 t = a->targetdb + rq->target_id; 577 578 if (t >= a->targetdb_end 579 || t->target_state != TS_PRESENT 580 || !(t->flags & TF_PASS_THRU)) { 581 sts = CSMI_STS_NO_DEV_ADDR; 582 break; 583 } 584 585 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc, 586 ci->control_code, 587 t->phys_targ_id)) { 588 sts = CSMI_STS_FAILED; 589 break; 590 } 591 592 return true; 593 594 default: 595 596 sts = CSMI_STS_BAD_CTRL_CODE; 597 break; 598 } 599 600 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts); 601 602 return false; 603 } 604 605 606 static void csmi_ioctl_done_callback(struct esas2r_adapter *a, 607 struct esas2r_request *rq, void *context) 608 { 609 struct atto_csmi *ci = (struct atto_csmi *)context; 610 union atto_ioctl_csmi *ioctl_csmi = 611 (union atto_ioctl_csmi *)esas2r_buffered_ioctl; 612 613 switch (ci->control_code) { 614 case CSMI_CC_GET_DRVR_INFO: 615 { 616 struct atto_csmi_get_driver_info *gdi = 617 &ioctl_csmi->drvr_info; 618 619 strcpy(gdi->name, ESAS2R_VERSION_STR); 620 621 gdi->major_rev = ESAS2R_MAJOR_REV; 622 gdi->minor_rev = ESAS2R_MINOR_REV; 623 gdi->build_rev = 0; 624 gdi->release_rev = 0; 625 break; 626 } 627 628 case CSMI_CC_GET_SCSI_ADDR: 629 { 630 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr; 631 632 if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) == 633 CSMI_STS_SUCCESS) { 634 gsa->target_id = rq->target_id; 635 gsa->path_id = 0; 636 } 637 638 break; 639 } 640 } 641 642 ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status); 643 } 644 645 646 static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci) 647 { 648 struct esas2r_buffered_ioctl bi; 649 650 memset(&bi, 0, sizeof(bi)); 651 652 bi.a = a; 653 bi.ioctl = &ci->data; 654 bi.length = sizeof(union atto_ioctl_csmi); 655 bi.offset = 0; 656 bi.callback = csmi_ioctl_callback; 657 bi.context = ci; 658 bi.done_callback = csmi_ioctl_done_callback; 659 bi.done_context = ci; 660 661 return handle_buffered_ioctl(&bi); 662 } 663 664 /* ATTO HBA ioctl support */ 665 666 /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */ 667 static bool hba_ioctl_tunnel(struct esas2r_adapter *a, 668 struct atto_ioctl *hi, 669 struct esas2r_request *rq, 670 struct esas2r_sg_context *sgc) 671 { 672 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); 673 674 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA); 675 676 if (!esas2r_build_sg_list(a, rq, sgc)) { 677 hi->status = ATTO_STS_OUT_OF_RSRC; 678 679 return false; 680 } 681 682 esas2r_start_request(a, rq); 683 684 return true; 685 } 686 687 static void scsi_passthru_comp_cb(struct esas2r_adapter *a, 688 struct esas2r_request *rq) 689 { 690 struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx; 691 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; 692 u8 sts = ATTO_SPT_RS_FAILED; 693 694 spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat; 695 spt->sense_length = rq->sense_len; 696 spt->residual_length = 697 le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length); 698 699 switch (rq->req_stat) { 700 case RS_SUCCESS: 701 case RS_SCSI_ERROR: 702 sts = ATTO_SPT_RS_SUCCESS; 703 break; 704 case RS_UNDERRUN: 705 sts = ATTO_SPT_RS_UNDERRUN; 706 break; 707 case RS_OVERRUN: 708 sts = ATTO_SPT_RS_OVERRUN; 709 break; 710 case RS_SEL: 711 case RS_SEL2: 712 sts = ATTO_SPT_RS_NO_DEVICE; 713 break; 714 case RS_NO_LUN: 715 sts = ATTO_SPT_RS_NO_LUN; 716 break; 717 case RS_TIMEOUT: 718 sts = ATTO_SPT_RS_TIMEOUT; 719 break; 720 case RS_DEGRADED: 721 sts = ATTO_SPT_RS_DEGRADED; 722 break; 723 case RS_BUSY: 724 sts = ATTO_SPT_RS_BUSY; 725 break; 726 case RS_ABORTED: 727 sts = ATTO_SPT_RS_ABORTED; 728 break; 729 case RS_RESET: 730 sts = ATTO_SPT_RS_BUS_RESET; 731 break; 732 } 733 734 spt->req_status = sts; 735 736 /* Update the target ID to the next one present. */ 737 spt->target_id = 738 esas2r_targ_db_find_next_present(a, (u16)spt->target_id); 739 740 /* Done, call the completion callback. */ 741 (*rq->aux_req_cb)(a, rq); 742 } 743 744 static int hba_ioctl_callback(struct esas2r_adapter *a, 745 struct esas2r_request *rq, 746 struct esas2r_sg_context *sgc, 747 void *context) 748 { 749 struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl; 750 751 hi->status = ATTO_STS_SUCCESS; 752 753 switch (hi->function) { 754 case ATTO_FUNC_GET_ADAP_INFO: 755 { 756 u8 *class_code = (u8 *)&a->pcid->class; 757 758 struct atto_hba_get_adapter_info *gai = 759 &hi->data.get_adap_info; 760 int pcie_cap_reg; 761 762 if (hi->flags & HBAF_TUNNEL) { 763 hi->status = ATTO_STS_UNSUPPORTED; 764 break; 765 } 766 767 if (hi->version > ATTO_VER_GET_ADAP_INFO0) { 768 hi->status = ATTO_STS_INV_VERSION; 769 hi->version = ATTO_VER_GET_ADAP_INFO0; 770 break; 771 } 772 773 memset(gai, 0, sizeof(*gai)); 774 775 gai->pci.vendor_id = a->pcid->vendor; 776 gai->pci.device_id = a->pcid->device; 777 gai->pci.ss_vendor_id = a->pcid->subsystem_vendor; 778 gai->pci.ss_device_id = a->pcid->subsystem_device; 779 gai->pci.class_code[0] = class_code[0]; 780 gai->pci.class_code[1] = class_code[1]; 781 gai->pci.class_code[2] = class_code[2]; 782 gai->pci.rev_id = a->pcid->revision; 783 gai->pci.bus_num = a->pcid->bus->number; 784 gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); 785 gai->pci.func_num = PCI_FUNC(a->pcid->devfn); 786 787 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); 788 if (pcie_cap_reg) { 789 u16 stat; 790 u32 caps; 791 792 pci_read_config_word(a->pcid, 793 pcie_cap_reg + PCI_EXP_LNKSTA, 794 &stat); 795 pci_read_config_dword(a->pcid, 796 pcie_cap_reg + PCI_EXP_LNKCAP, 797 &caps); 798 799 gai->pci.link_speed_curr = 800 (u8)(stat & PCI_EXP_LNKSTA_CLS); 801 gai->pci.link_speed_max = 802 (u8)(caps & PCI_EXP_LNKCAP_SLS); 803 gai->pci.link_width_curr = 804 (u8)((stat & PCI_EXP_LNKSTA_NLW) 805 >> PCI_EXP_LNKSTA_NLW_SHIFT); 806 gai->pci.link_width_max = 807 (u8)((caps & PCI_EXP_LNKCAP_MLW) 808 >> 4); 809 } 810 811 gai->pci.msi_vector_cnt = 1; 812 813 if (a->pcid->msix_enabled) 814 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX; 815 else if (a->pcid->msi_enabled) 816 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI; 817 else 818 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY; 819 820 gai->adap_type = ATTO_GAI_AT_ESASRAID2; 821 822 if (test_bit(AF2_THUNDERLINK, &a->flags2)) 823 gai->adap_type = ATTO_GAI_AT_TLSASHBA; 824 825 if (test_bit(AF_DEGRADED_MODE, &a->flags)) 826 gai->adap_flags |= ATTO_GAI_AF_DEGRADED; 827 828 gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | 829 ATTO_GAI_AF_DEVADDR_SUPP; 830 831 if (a->pcid->subsystem_device == ATTO_ESAS_R60F 832 || a->pcid->subsystem_device == ATTO_ESAS_R608 833 || a->pcid->subsystem_device == ATTO_ESAS_R644 834 || a->pcid->subsystem_device == ATTO_TSSC_3808E) 835 gai->adap_flags |= ATTO_GAI_AF_VIRT_SES; 836 837 gai->num_ports = ESAS2R_NUM_PHYS; 838 gai->num_phys = ESAS2R_NUM_PHYS; 839 840 strcpy(gai->firmware_rev, a->fw_rev); 841 strcpy(gai->flash_rev, a->flash_rev); 842 strcpy(gai->model_name_short, esas2r_get_model_name_short(a)); 843 strcpy(gai->model_name, esas2r_get_model_name(a)); 844 845 gai->num_targets = ESAS2R_MAX_TARGETS; 846 847 gai->num_busses = 1; 848 gai->num_targsper_bus = gai->num_targets; 849 gai->num_lunsper_targ = 256; 850 851 if (a->pcid->subsystem_device == ATTO_ESAS_R6F0 852 || a->pcid->subsystem_device == ATTO_ESAS_R60F) 853 gai->num_connectors = 4; 854 else 855 gai->num_connectors = 2; 856 857 gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP; 858 859 gai->num_targets_backend = a->num_targets_backend; 860 861 gai->tunnel_flags = a->ioctl_tunnel 862 & (ATTO_GAI_TF_MEM_RW 863 | ATTO_GAI_TF_TRACE 864 | ATTO_GAI_TF_SCSI_PASS_THRU 865 | ATTO_GAI_TF_GET_DEV_ADDR 866 | ATTO_GAI_TF_PHY_CTRL 867 | ATTO_GAI_TF_CONN_CTRL 868 | ATTO_GAI_TF_GET_DEV_INFO); 869 break; 870 } 871 872 case ATTO_FUNC_GET_ADAP_ADDR: 873 { 874 struct atto_hba_get_adapter_address *gaa = 875 &hi->data.get_adap_addr; 876 877 if (hi->flags & HBAF_TUNNEL) { 878 hi->status = ATTO_STS_UNSUPPORTED; 879 break; 880 } 881 882 if (hi->version > ATTO_VER_GET_ADAP_ADDR0) { 883 hi->status = ATTO_STS_INV_VERSION; 884 hi->version = ATTO_VER_GET_ADAP_ADDR0; 885 } else if (gaa->addr_type == ATTO_GAA_AT_PORT 886 || gaa->addr_type == ATTO_GAA_AT_NODE) { 887 if (gaa->addr_type == ATTO_GAA_AT_PORT 888 && gaa->port_id >= ESAS2R_NUM_PHYS) { 889 hi->status = ATTO_STS_NOT_APPL; 890 } else { 891 memcpy((u64 *)gaa->address, 892 &a->nvram->sas_addr[0], sizeof(u64)); 893 gaa->addr_len = sizeof(u64); 894 } 895 } else { 896 hi->status = ATTO_STS_INV_PARAM; 897 } 898 899 break; 900 } 901 902 case ATTO_FUNC_MEM_RW: 903 { 904 if (hi->flags & HBAF_TUNNEL) { 905 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 906 return true; 907 908 break; 909 } 910 911 hi->status = ATTO_STS_UNSUPPORTED; 912 913 break; 914 } 915 916 case ATTO_FUNC_TRACE: 917 { 918 struct atto_hba_trace *trc = &hi->data.trace; 919 920 if (hi->flags & HBAF_TUNNEL) { 921 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 922 return true; 923 924 break; 925 } 926 927 if (hi->version > ATTO_VER_TRACE1) { 928 hi->status = ATTO_STS_INV_VERSION; 929 hi->version = ATTO_VER_TRACE1; 930 break; 931 } 932 933 if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP 934 && hi->version >= ATTO_VER_TRACE1) { 935 if (trc->trace_func == ATTO_TRC_TF_UPLOAD) { 936 u32 len = hi->data_length; 937 u32 offset = trc->current_offset; 938 u32 total_len = ESAS2R_FWCOREDUMP_SZ; 939 940 /* Size is zero if a core dump isn't present */ 941 if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) 942 total_len = 0; 943 944 if (len > total_len) 945 len = total_len; 946 947 if (offset >= total_len 948 || offset + len > total_len 949 || len == 0) { 950 hi->status = ATTO_STS_INV_PARAM; 951 break; 952 } 953 954 memcpy(trc + 1, 955 a->fw_coredump_buff + offset, 956 len); 957 958 hi->data_length = len; 959 } else if (trc->trace_func == ATTO_TRC_TF_RESET) { 960 memset(a->fw_coredump_buff, 0, 961 ESAS2R_FWCOREDUMP_SZ); 962 963 clear_bit(AF2_COREDUMP_SAVED, &a->flags2); 964 } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) { 965 hi->status = ATTO_STS_UNSUPPORTED; 966 break; 967 } 968 969 /* Always return all the info we can. */ 970 trc->trace_mask = 0; 971 trc->current_offset = 0; 972 trc->total_length = ESAS2R_FWCOREDUMP_SZ; 973 974 /* Return zero length buffer if core dump not present */ 975 if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2)) 976 trc->total_length = 0; 977 } else { 978 hi->status = ATTO_STS_UNSUPPORTED; 979 } 980 981 break; 982 } 983 984 case ATTO_FUNC_SCSI_PASS_THRU: 985 { 986 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru; 987 struct scsi_lun lun; 988 989 memcpy(&lun, spt->lun, sizeof(struct scsi_lun)); 990 991 if (hi->flags & HBAF_TUNNEL) { 992 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 993 return true; 994 995 break; 996 } 997 998 if (hi->version > ATTO_VER_SCSI_PASS_THRU0) { 999 hi->status = ATTO_STS_INV_VERSION; 1000 hi->version = ATTO_VER_SCSI_PASS_THRU0; 1001 break; 1002 } 1003 1004 if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) { 1005 hi->status = ATTO_STS_INV_PARAM; 1006 break; 1007 } 1008 1009 esas2r_sgc_init(sgc, a, rq, NULL); 1010 1011 sgc->length = hi->data_length; 1012 sgc->cur_offset += offsetof(struct atto_ioctl, data.byte) 1013 + sizeof(struct atto_hba_scsi_pass_thru); 1014 1015 /* Finish request initialization */ 1016 rq->target_id = (u16)spt->target_id; 1017 rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]); 1018 memcpy(rq->vrq->scsi.cdb, spt->cdb, 16); 1019 rq->vrq->scsi.length = cpu_to_le32(hi->data_length); 1020 rq->sense_len = spt->sense_length; 1021 rq->sense_buf = (u8 *)spt->sense_data; 1022 /* NOTE: we ignore spt->timeout */ 1023 1024 /* 1025 * always usurp the completion callback since the interrupt 1026 * callback mechanism may be used. 1027 */ 1028 1029 rq->aux_req_cx = hi; 1030 rq->aux_req_cb = rq->comp_cb; 1031 rq->comp_cb = scsi_passthru_comp_cb; 1032 1033 if (spt->flags & ATTO_SPTF_DATA_IN) { 1034 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD); 1035 } else if (spt->flags & ATTO_SPTF_DATA_OUT) { 1036 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); 1037 } else { 1038 if (sgc->length) { 1039 hi->status = ATTO_STS_INV_PARAM; 1040 break; 1041 } 1042 } 1043 1044 if (spt->flags & ATTO_SPTF_ORDERED_Q) 1045 rq->vrq->scsi.flags |= 1046 cpu_to_le32(FCP_CMND_TA_ORDRD_Q); 1047 else if (spt->flags & ATTO_SPTF_HEAD_OF_Q) 1048 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); 1049 1050 1051 if (!esas2r_build_sg_list(a, rq, sgc)) { 1052 hi->status = ATTO_STS_OUT_OF_RSRC; 1053 break; 1054 } 1055 1056 esas2r_start_request(a, rq); 1057 1058 return true; 1059 } 1060 1061 case ATTO_FUNC_GET_DEV_ADDR: 1062 { 1063 struct atto_hba_get_device_address *gda = 1064 &hi->data.get_dev_addr; 1065 struct esas2r_target *t; 1066 1067 if (hi->flags & HBAF_TUNNEL) { 1068 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 1069 return true; 1070 1071 break; 1072 } 1073 1074 if (hi->version > ATTO_VER_GET_DEV_ADDR0) { 1075 hi->status = ATTO_STS_INV_VERSION; 1076 hi->version = ATTO_VER_GET_DEV_ADDR0; 1077 break; 1078 } 1079 1080 if (gda->target_id >= ESAS2R_MAX_TARGETS) { 1081 hi->status = ATTO_STS_INV_PARAM; 1082 break; 1083 } 1084 1085 t = a->targetdb + (u16)gda->target_id; 1086 1087 if (t->target_state != TS_PRESENT) { 1088 hi->status = ATTO_STS_FAILED; 1089 } else if (gda->addr_type == ATTO_GDA_AT_PORT) { 1090 if (t->sas_addr == 0) { 1091 hi->status = ATTO_STS_UNSUPPORTED; 1092 } else { 1093 *(u64 *)gda->address = t->sas_addr; 1094 1095 gda->addr_len = sizeof(u64); 1096 } 1097 } else if (gda->addr_type == ATTO_GDA_AT_NODE) { 1098 hi->status = ATTO_STS_NOT_APPL; 1099 } else { 1100 hi->status = ATTO_STS_INV_PARAM; 1101 } 1102 1103 /* update the target ID to the next one present. */ 1104 1105 gda->target_id = 1106 esas2r_targ_db_find_next_present(a, 1107 (u16)gda->target_id); 1108 break; 1109 } 1110 1111 case ATTO_FUNC_PHY_CTRL: 1112 case ATTO_FUNC_CONN_CTRL: 1113 { 1114 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 1115 return true; 1116 1117 break; 1118 } 1119 1120 case ATTO_FUNC_ADAP_CTRL: 1121 { 1122 struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl; 1123 1124 if (hi->flags & HBAF_TUNNEL) { 1125 hi->status = ATTO_STS_UNSUPPORTED; 1126 break; 1127 } 1128 1129 if (hi->version > ATTO_VER_ADAP_CTRL0) { 1130 hi->status = ATTO_STS_INV_VERSION; 1131 hi->version = ATTO_VER_ADAP_CTRL0; 1132 break; 1133 } 1134 1135 if (ac->adap_func == ATTO_AC_AF_HARD_RST) { 1136 esas2r_reset_adapter(a); 1137 } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) { 1138 hi->status = ATTO_STS_UNSUPPORTED; 1139 break; 1140 } 1141 1142 if (test_bit(AF_CHPRST_NEEDED, &a->flags)) 1143 ac->adap_state = ATTO_AC_AS_RST_SCHED; 1144 else if (test_bit(AF_CHPRST_PENDING, &a->flags)) 1145 ac->adap_state = ATTO_AC_AS_RST_IN_PROG; 1146 else if (test_bit(AF_DISC_PENDING, &a->flags)) 1147 ac->adap_state = ATTO_AC_AS_RST_DISC; 1148 else if (test_bit(AF_DISABLED, &a->flags)) 1149 ac->adap_state = ATTO_AC_AS_DISABLED; 1150 else if (test_bit(AF_DEGRADED_MODE, &a->flags)) 1151 ac->adap_state = ATTO_AC_AS_DEGRADED; 1152 else 1153 ac->adap_state = ATTO_AC_AS_OK; 1154 1155 break; 1156 } 1157 1158 case ATTO_FUNC_GET_DEV_INFO: 1159 { 1160 struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info; 1161 struct esas2r_target *t; 1162 1163 if (hi->flags & HBAF_TUNNEL) { 1164 if (hba_ioctl_tunnel(a, hi, rq, sgc)) 1165 return true; 1166 1167 break; 1168 } 1169 1170 if (hi->version > ATTO_VER_GET_DEV_INFO0) { 1171 hi->status = ATTO_STS_INV_VERSION; 1172 hi->version = ATTO_VER_GET_DEV_INFO0; 1173 break; 1174 } 1175 1176 if (gdi->target_id >= ESAS2R_MAX_TARGETS) { 1177 hi->status = ATTO_STS_INV_PARAM; 1178 break; 1179 } 1180 1181 t = a->targetdb + (u16)gdi->target_id; 1182 1183 /* update the target ID to the next one present. */ 1184 1185 gdi->target_id = 1186 esas2r_targ_db_find_next_present(a, 1187 (u16)gdi->target_id); 1188 1189 if (t->target_state != TS_PRESENT) { 1190 hi->status = ATTO_STS_FAILED; 1191 break; 1192 } 1193 1194 hi->status = ATTO_STS_UNSUPPORTED; 1195 break; 1196 } 1197 1198 default: 1199 1200 hi->status = ATTO_STS_INV_FUNC; 1201 break; 1202 } 1203 1204 return false; 1205 } 1206 1207 static void hba_ioctl_done_callback(struct esas2r_adapter *a, 1208 struct esas2r_request *rq, void *context) 1209 { 1210 struct atto_ioctl *ioctl_hba = 1211 (struct atto_ioctl *)esas2r_buffered_ioctl; 1212 1213 esas2r_debug("hba_ioctl_done_callback %d", a->index); 1214 1215 if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) { 1216 struct atto_hba_get_adapter_info *gai = 1217 &ioctl_hba->data.get_adap_info; 1218 1219 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO"); 1220 1221 gai->drvr_rev_major = ESAS2R_MAJOR_REV; 1222 gai->drvr_rev_minor = ESAS2R_MINOR_REV; 1223 1224 strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR); 1225 strcpy(gai->drvr_name, ESAS2R_DRVR_NAME); 1226 1227 gai->num_busses = 1; 1228 gai->num_targsper_bus = ESAS2R_MAX_ID + 1; 1229 gai->num_lunsper_targ = 1; 1230 } 1231 } 1232 1233 u8 handle_hba_ioctl(struct esas2r_adapter *a, 1234 struct atto_ioctl *ioctl_hba) 1235 { 1236 struct esas2r_buffered_ioctl bi; 1237 1238 memset(&bi, 0, sizeof(bi)); 1239 1240 bi.a = a; 1241 bi.ioctl = ioctl_hba; 1242 bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length; 1243 bi.callback = hba_ioctl_callback; 1244 bi.context = NULL; 1245 bi.done_callback = hba_ioctl_done_callback; 1246 bi.done_context = NULL; 1247 bi.offset = 0; 1248 1249 return handle_buffered_ioctl(&bi); 1250 } 1251 1252 1253 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, 1254 struct esas2r_sas_nvram *data) 1255 { 1256 int result = 0; 1257 1258 a->nvram_command_done = 0; 1259 rq->comp_cb = complete_nvr_req; 1260 1261 if (esas2r_nvram_write(a, rq, data)) { 1262 /* now wait around for it to complete. */ 1263 while (!a->nvram_command_done) 1264 wait_event_interruptible(a->nvram_waiter, 1265 a->nvram_command_done); 1266 ; 1267 1268 /* done, check the status. */ 1269 if (rq->req_stat == RS_SUCCESS) 1270 result = 1; 1271 } 1272 return result; 1273 } 1274 1275 1276 /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ 1277 int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) 1278 { 1279 struct atto_express_ioctl *ioctl = NULL; 1280 struct esas2r_adapter *a; 1281 struct esas2r_request *rq; 1282 u16 code; 1283 int err; 1284 1285 esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg); 1286 1287 if ((arg == NULL) 1288 || (cmd < EXPRESS_IOCTL_MIN) 1289 || (cmd > EXPRESS_IOCTL_MAX)) 1290 return -ENOTSUPP; 1291 1292 if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) { 1293 esas2r_log(ESAS2R_LOG_WARN, 1294 "ioctl_handler access_ok failed for cmd %d, " 1295 "address %p", cmd, 1296 arg); 1297 return -EFAULT; 1298 } 1299 1300 /* allocate a kernel memory buffer for the IOCTL data */ 1301 ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL); 1302 if (ioctl == NULL) { 1303 esas2r_log(ESAS2R_LOG_WARN, 1304 "ioctl_handler kzalloc failed for %d bytes", 1305 sizeof(struct atto_express_ioctl)); 1306 return -ENOMEM; 1307 } 1308 1309 err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl)); 1310 if (err != 0) { 1311 esas2r_log(ESAS2R_LOG_WARN, 1312 "copy_from_user didn't copy everything (err %d, cmd %d)", 1313 err, 1314 cmd); 1315 kfree(ioctl); 1316 1317 return -EFAULT; 1318 } 1319 1320 /* verify the signature */ 1321 1322 if (memcmp(ioctl->header.signature, 1323 EXPRESS_IOCTL_SIGNATURE, 1324 EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) { 1325 esas2r_log(ESAS2R_LOG_WARN, "invalid signature"); 1326 kfree(ioctl); 1327 1328 return -ENOTSUPP; 1329 } 1330 1331 /* assume success */ 1332 1333 ioctl->header.return_code = IOCTL_SUCCESS; 1334 err = 0; 1335 1336 /* 1337 * handle EXPRESS_IOCTL_GET_CHANNELS 1338 * without paying attention to channel 1339 */ 1340 1341 if (cmd == EXPRESS_IOCTL_GET_CHANNELS) { 1342 int i = 0, k = 0; 1343 1344 ioctl->data.chanlist.num_channels = 0; 1345 1346 while (i < MAX_ADAPTERS) { 1347 if (esas2r_adapters[i]) { 1348 ioctl->data.chanlist.num_channels++; 1349 ioctl->data.chanlist.channel[k] = i; 1350 k++; 1351 } 1352 i++; 1353 } 1354 1355 goto ioctl_done; 1356 } 1357 1358 /* get the channel */ 1359 1360 if (ioctl->header.channel == 0xFF) { 1361 a = (struct esas2r_adapter *)hostdata; 1362 } else { 1363 if (ioctl->header.channel >= MAX_ADAPTERS || 1364 esas2r_adapters[ioctl->header.channel] == NULL) { 1365 ioctl->header.return_code = IOCTL_BAD_CHANNEL; 1366 esas2r_log(ESAS2R_LOG_WARN, "bad channel value"); 1367 kfree(ioctl); 1368 1369 return -ENOTSUPP; 1370 } 1371 a = esas2r_adapters[ioctl->header.channel]; 1372 } 1373 1374 switch (cmd) { 1375 case EXPRESS_IOCTL_RW_FIRMWARE: 1376 1377 if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) { 1378 err = esas2r_write_fw(a, 1379 (char *)ioctl->data.fwrw.image, 1380 0, 1381 sizeof(struct 1382 atto_express_ioctl)); 1383 1384 if (err >= 0) { 1385 err = esas2r_read_fw(a, 1386 (char *)ioctl->data.fwrw. 1387 image, 1388 0, 1389 sizeof(struct 1390 atto_express_ioctl)); 1391 } 1392 } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) { 1393 err = esas2r_write_fs(a, 1394 (char *)ioctl->data.fwrw.image, 1395 0, 1396 sizeof(struct 1397 atto_express_ioctl)); 1398 1399 if (err >= 0) { 1400 err = esas2r_read_fs(a, 1401 (char *)ioctl->data.fwrw. 1402 image, 1403 0, 1404 sizeof(struct 1405 atto_express_ioctl)); 1406 } 1407 } else { 1408 ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE; 1409 } 1410 1411 break; 1412 1413 case EXPRESS_IOCTL_READ_PARAMS: 1414 1415 memcpy(ioctl->data.prw.data_buffer, a->nvram, 1416 sizeof(struct esas2r_sas_nvram)); 1417 ioctl->data.prw.code = 1; 1418 break; 1419 1420 case EXPRESS_IOCTL_WRITE_PARAMS: 1421 1422 rq = esas2r_alloc_request(a); 1423 if (rq == NULL) { 1424 kfree(ioctl); 1425 esas2r_log(ESAS2R_LOG_WARN, 1426 "could not allocate an internal request"); 1427 return -ENOMEM; 1428 } 1429 1430 code = esas2r_write_params(a, rq, 1431 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); 1432 ioctl->data.prw.code = code; 1433 1434 esas2r_free_request(a, rq); 1435 1436 break; 1437 1438 case EXPRESS_IOCTL_DEFAULT_PARAMS: 1439 1440 esas2r_nvram_get_defaults(a, 1441 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer); 1442 ioctl->data.prw.code = 1; 1443 break; 1444 1445 case EXPRESS_IOCTL_CHAN_INFO: 1446 1447 ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV; 1448 ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV; 1449 ioctl->data.chaninfo.IRQ = a->pcid->irq; 1450 ioctl->data.chaninfo.device_id = a->pcid->device; 1451 ioctl->data.chaninfo.vendor_id = a->pcid->vendor; 1452 ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device; 1453 ioctl->data.chaninfo.revision_id = a->pcid->revision; 1454 ioctl->data.chaninfo.pci_bus = a->pcid->bus->number; 1455 ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn; 1456 ioctl->data.chaninfo.core_rev = 0; 1457 ioctl->data.chaninfo.host_no = a->host->host_no; 1458 ioctl->data.chaninfo.hbaapi_rev = 0; 1459 break; 1460 1461 case EXPRESS_IOCTL_SMP: 1462 ioctl->header.return_code = handle_smp_ioctl(a, 1463 &ioctl->data. 1464 ioctl_smp); 1465 break; 1466 1467 case EXPRESS_CSMI: 1468 ioctl->header.return_code = 1469 handle_csmi_ioctl(a, &ioctl->data.csmi); 1470 break; 1471 1472 case EXPRESS_IOCTL_HBA: 1473 ioctl->header.return_code = handle_hba_ioctl(a, 1474 &ioctl->data. 1475 ioctl_hba); 1476 break; 1477 1478 case EXPRESS_IOCTL_VDA: 1479 err = esas2r_write_vda(a, 1480 (char *)&ioctl->data.ioctl_vda, 1481 0, 1482 sizeof(struct atto_ioctl_vda) + 1483 ioctl->data.ioctl_vda.data_length); 1484 1485 if (err >= 0) { 1486 err = esas2r_read_vda(a, 1487 (char *)&ioctl->data.ioctl_vda, 1488 0, 1489 sizeof(struct atto_ioctl_vda) + 1490 ioctl->data.ioctl_vda.data_length); 1491 } 1492 1493 1494 1495 1496 break; 1497 1498 case EXPRESS_IOCTL_GET_MOD_INFO: 1499 1500 ioctl->data.modinfo.adapter = a; 1501 ioctl->data.modinfo.pci_dev = a->pcid; 1502 ioctl->data.modinfo.scsi_host = a->host; 1503 ioctl->data.modinfo.host_no = a->host->host_no; 1504 1505 break; 1506 1507 default: 1508 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd); 1509 ioctl->header.return_code = IOCTL_ERR_INVCMD; 1510 } 1511 1512 ioctl_done: 1513 1514 if (err < 0) { 1515 esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, 1516 cmd); 1517 1518 switch (err) { 1519 case -ENOMEM: 1520 case -EBUSY: 1521 ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES; 1522 break; 1523 1524 case -ENOSYS: 1525 case -EINVAL: 1526 ioctl->header.return_code = IOCTL_INVALID_PARAM; 1527 break; 1528 1529 default: 1530 ioctl->header.return_code = IOCTL_GENERAL_ERROR; 1531 break; 1532 } 1533 1534 } 1535 1536 /* Always copy the buffer back, if only to pick up the status */ 1537 err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); 1538 if (err != 0) { 1539 esas2r_log(ESAS2R_LOG_WARN, 1540 "ioctl_handler copy_to_user didn't copy " 1541 "everything (err %d, cmd %d)", err, 1542 cmd); 1543 kfree(ioctl); 1544 1545 return -EFAULT; 1546 } 1547 1548 kfree(ioctl); 1549 1550 return 0; 1551 } 1552 1553 int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) 1554 { 1555 return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); 1556 } 1557 1558 static void free_fw_buffers(struct esas2r_adapter *a) 1559 { 1560 if (a->firmware.data) { 1561 dma_free_coherent(&a->pcid->dev, 1562 (size_t)a->firmware.orig_len, 1563 a->firmware.data, 1564 (dma_addr_t)a->firmware.phys); 1565 1566 a->firmware.data = NULL; 1567 } 1568 } 1569 1570 static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length) 1571 { 1572 free_fw_buffers(a); 1573 1574 a->firmware.orig_len = length; 1575 1576 a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev, 1577 (size_t)length, 1578 (dma_addr_t *)&a->firmware. 1579 phys, 1580 GFP_KERNEL); 1581 1582 if (!a->firmware.data) { 1583 esas2r_debug("buffer alloc failed!"); 1584 return 0; 1585 } 1586 1587 return 1; 1588 } 1589 1590 /* Handle a call to read firmware. */ 1591 int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count) 1592 { 1593 esas2r_trace_enter(); 1594 /* if the cached header is a status, simply copy it over and return. */ 1595 if (a->firmware.state == FW_STATUS_ST) { 1596 int size = min_t(int, count, sizeof(a->firmware.header)); 1597 esas2r_trace_exit(); 1598 memcpy(buf, &a->firmware.header, size); 1599 esas2r_debug("esas2r_read_fw: STATUS size %d", size); 1600 return size; 1601 } 1602 1603 /* 1604 * if the cached header is a command, do it if at 1605 * offset 0, otherwise copy the pieces. 1606 */ 1607 1608 if (a->firmware.state == FW_COMMAND_ST) { 1609 u32 length = a->firmware.header.length; 1610 esas2r_trace_exit(); 1611 1612 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d", 1613 length, 1614 off); 1615 1616 if (off == 0) { 1617 if (a->firmware.header.action == FI_ACT_UP) { 1618 if (!allocate_fw_buffers(a, length)) 1619 return -ENOMEM; 1620 1621 1622 /* copy header over */ 1623 1624 memcpy(a->firmware.data, 1625 &a->firmware.header, 1626 sizeof(a->firmware.header)); 1627 1628 do_fm_api(a, 1629 (struct esas2r_flash_img *)a->firmware.data); 1630 } else if (a->firmware.header.action == FI_ACT_UPSZ) { 1631 int size = 1632 min((int)count, 1633 (int)sizeof(a->firmware.header)); 1634 do_fm_api(a, &a->firmware.header); 1635 memcpy(buf, &a->firmware.header, size); 1636 esas2r_debug("FI_ACT_UPSZ size %d", size); 1637 return size; 1638 } else { 1639 esas2r_debug("invalid action %d", 1640 a->firmware.header.action); 1641 return -ENOSYS; 1642 } 1643 } 1644 1645 if (count + off > length) 1646 count = length - off; 1647 1648 if (count < 0) 1649 return 0; 1650 1651 if (!a->firmware.data) { 1652 esas2r_debug( 1653 "read: nonzero offset but no buffer available!"); 1654 return -ENOMEM; 1655 } 1656 1657 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off, 1658 count, 1659 length); 1660 1661 memcpy(buf, &a->firmware.data[off], count); 1662 1663 /* when done, release the buffer */ 1664 1665 if (length <= off + count) { 1666 esas2r_debug("esas2r_read_fw: freeing buffer!"); 1667 1668 free_fw_buffers(a); 1669 } 1670 1671 return count; 1672 } 1673 1674 esas2r_trace_exit(); 1675 esas2r_debug("esas2r_read_fw: invalid firmware state %d", 1676 a->firmware.state); 1677 1678 return -EINVAL; 1679 } 1680 1681 /* Handle a call to write firmware. */ 1682 int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off, 1683 int count) 1684 { 1685 u32 length; 1686 1687 if (off == 0) { 1688 struct esas2r_flash_img *header = 1689 (struct esas2r_flash_img *)buf; 1690 1691 /* assume version 0 flash image */ 1692 1693 int min_size = sizeof(struct esas2r_flash_img_v0); 1694 1695 a->firmware.state = FW_INVALID_ST; 1696 1697 /* validate the version field first */ 1698 1699 if (count < 4 1700 || header->fi_version > FI_VERSION_1) { 1701 esas2r_debug( 1702 "esas2r_write_fw: short header or invalid version"); 1703 return -EINVAL; 1704 } 1705 1706 /* See if its a version 1 flash image */ 1707 1708 if (header->fi_version == FI_VERSION_1) 1709 min_size = sizeof(struct esas2r_flash_img); 1710 1711 /* If this is the start, the header must be full and valid. */ 1712 if (count < min_size) { 1713 esas2r_debug("esas2r_write_fw: short header, aborting"); 1714 return -EINVAL; 1715 } 1716 1717 /* Make sure the size is reasonable. */ 1718 length = header->length; 1719 1720 if (length > 1024 * 1024) { 1721 esas2r_debug( 1722 "esas2r_write_fw: hosed, length %d fi_version %d", 1723 length, header->fi_version); 1724 return -EINVAL; 1725 } 1726 1727 /* 1728 * If this is a write command, allocate memory because 1729 * we have to cache everything. otherwise, just cache 1730 * the header, because the read op will do the command. 1731 */ 1732 1733 if (header->action == FI_ACT_DOWN) { 1734 if (!allocate_fw_buffers(a, length)) 1735 return -ENOMEM; 1736 1737 /* 1738 * Store the command, so there is context on subsequent 1739 * calls. 1740 */ 1741 memcpy(&a->firmware.header, 1742 buf, 1743 sizeof(*header)); 1744 } else if (header->action == FI_ACT_UP 1745 || header->action == FI_ACT_UPSZ) { 1746 /* Save the command, result will be picked up on read */ 1747 memcpy(&a->firmware.header, 1748 buf, 1749 sizeof(*header)); 1750 1751 a->firmware.state = FW_COMMAND_ST; 1752 1753 esas2r_debug( 1754 "esas2r_write_fw: COMMAND, count %d, action %d ", 1755 count, header->action); 1756 1757 /* 1758 * Pretend we took the whole buffer, 1759 * so we don't get bothered again. 1760 */ 1761 1762 return count; 1763 } else { 1764 esas2r_debug("esas2r_write_fw: invalid action %d ", 1765 a->firmware.header.action); 1766 return -ENOSYS; 1767 } 1768 } else { 1769 length = a->firmware.header.length; 1770 } 1771 1772 /* 1773 * We only get here on a download command, regardless of offset. 1774 * the chunks written by the system need to be cached, and when 1775 * the final one arrives, issue the fmapi command. 1776 */ 1777 1778 if (off + count > length) 1779 count = length - off; 1780 1781 if (count > 0) { 1782 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off, 1783 count, 1784 length); 1785 1786 /* 1787 * On a full upload, the system tries sending the whole buffer. 1788 * there's nothing to do with it, so just drop it here, before 1789 * trying to copy over into unallocated memory! 1790 */ 1791 if (a->firmware.header.action == FI_ACT_UP) 1792 return count; 1793 1794 if (!a->firmware.data) { 1795 esas2r_debug( 1796 "write: nonzero offset but no buffer available!"); 1797 return -ENOMEM; 1798 } 1799 1800 memcpy(&a->firmware.data[off], buf, count); 1801 1802 if (length == off + count) { 1803 do_fm_api(a, 1804 (struct esas2r_flash_img *)a->firmware.data); 1805 1806 /* 1807 * Now copy the header result to be picked up by the 1808 * next read 1809 */ 1810 memcpy(&a->firmware.header, 1811 a->firmware.data, 1812 sizeof(a->firmware.header)); 1813 1814 a->firmware.state = FW_STATUS_ST; 1815 1816 esas2r_debug("write completed"); 1817 1818 /* 1819 * Since the system has the data buffered, the only way 1820 * this can leak is if a root user writes a program 1821 * that writes a shorter buffer than it claims, and the 1822 * copyin fails. 1823 */ 1824 free_fw_buffers(a); 1825 } 1826 } 1827 1828 return count; 1829 } 1830 1831 /* Callback for the completion of a VDA request. */ 1832 static void vda_complete_req(struct esas2r_adapter *a, 1833 struct esas2r_request *rq) 1834 { 1835 a->vda_command_done = 1; 1836 wake_up_interruptible(&a->vda_waiter); 1837 } 1838 1839 /* Scatter/gather callback for VDA requests */ 1840 static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr) 1841 { 1842 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; 1843 int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer; 1844 1845 (*addr) = a->ppvda_buffer + offset; 1846 return VDA_MAX_BUFFER_SIZE - offset; 1847 } 1848 1849 /* Handle a call to read a VDA command. */ 1850 int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count) 1851 { 1852 if (!a->vda_buffer) 1853 return -ENOMEM; 1854 1855 if (off == 0) { 1856 struct esas2r_request *rq; 1857 struct atto_ioctl_vda *vi = 1858 (struct atto_ioctl_vda *)a->vda_buffer; 1859 struct esas2r_sg_context sgc; 1860 bool wait_for_completion; 1861 1862 /* 1863 * Presumeably, someone has already written to the vda_buffer, 1864 * and now they are reading the node the response, so now we 1865 * will actually issue the request to the chip and reply. 1866 */ 1867 1868 /* allocate a request */ 1869 rq = esas2r_alloc_request(a); 1870 if (rq == NULL) { 1871 esas2r_debug("esas2r_read_vda: out of requestss"); 1872 return -EBUSY; 1873 } 1874 1875 rq->comp_cb = vda_complete_req; 1876 1877 sgc.first_req = rq; 1878 sgc.adapter = a; 1879 sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ; 1880 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda; 1881 1882 a->vda_command_done = 0; 1883 1884 wait_for_completion = 1885 esas2r_process_vda_ioctl(a, vi, rq, &sgc); 1886 1887 if (wait_for_completion) { 1888 /* now wait around for it to complete. */ 1889 1890 while (!a->vda_command_done) 1891 wait_event_interruptible(a->vda_waiter, 1892 a->vda_command_done); 1893 } 1894 1895 esas2r_free_request(a, (struct esas2r_request *)rq); 1896 } 1897 1898 if (off > VDA_MAX_BUFFER_SIZE) 1899 return 0; 1900 1901 if (count + off > VDA_MAX_BUFFER_SIZE) 1902 count = VDA_MAX_BUFFER_SIZE - off; 1903 1904 if (count < 0) 1905 return 0; 1906 1907 memcpy(buf, a->vda_buffer + off, count); 1908 1909 return count; 1910 } 1911 1912 /* Handle a call to write a VDA command. */ 1913 int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off, 1914 int count) 1915 { 1916 /* 1917 * allocate memory for it, if not already done. once allocated, 1918 * we will keep it around until the driver is unloaded. 1919 */ 1920 1921 if (!a->vda_buffer) { 1922 dma_addr_t dma_addr; 1923 a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev, 1924 (size_t) 1925 VDA_MAX_BUFFER_SIZE, 1926 &dma_addr, 1927 GFP_KERNEL); 1928 1929 a->ppvda_buffer = dma_addr; 1930 } 1931 1932 if (!a->vda_buffer) 1933 return -ENOMEM; 1934 1935 if (off > VDA_MAX_BUFFER_SIZE) 1936 return 0; 1937 1938 if (count + off > VDA_MAX_BUFFER_SIZE) 1939 count = VDA_MAX_BUFFER_SIZE - off; 1940 1941 if (count < 1) 1942 return 0; 1943 1944 memcpy(a->vda_buffer + off, buf, count); 1945 1946 return count; 1947 } 1948 1949 /* Callback for the completion of an FS_API request.*/ 1950 static void fs_api_complete_req(struct esas2r_adapter *a, 1951 struct esas2r_request *rq) 1952 { 1953 a->fs_api_command_done = 1; 1954 1955 wake_up_interruptible(&a->fs_api_waiter); 1956 } 1957 1958 /* Scatter/gather callback for VDA requests */ 1959 static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr) 1960 { 1961 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter; 1962 struct esas2r_ioctl_fs *fs = 1963 (struct esas2r_ioctl_fs *)a->fs_api_buffer; 1964 u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs; 1965 1966 (*addr) = a->ppfs_api_buffer + offset; 1967 1968 return a->fs_api_buffer_size - offset; 1969 } 1970 1971 /* Handle a call to read firmware via FS_API. */ 1972 int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count) 1973 { 1974 if (!a->fs_api_buffer) 1975 return -ENOMEM; 1976 1977 if (off == 0) { 1978 struct esas2r_request *rq; 1979 struct esas2r_sg_context sgc; 1980 struct esas2r_ioctl_fs *fs = 1981 (struct esas2r_ioctl_fs *)a->fs_api_buffer; 1982 1983 /* If another flash request is already in progress, return. */ 1984 if (down_interruptible(&a->fs_api_semaphore)) { 1985 busy: 1986 fs->status = ATTO_STS_OUT_OF_RSRC; 1987 return -EBUSY; 1988 } 1989 1990 /* 1991 * Presumeably, someone has already written to the 1992 * fs_api_buffer, and now they are reading the node the 1993 * response, so now we will actually issue the request to the 1994 * chip and reply. Allocate a request 1995 */ 1996 1997 rq = esas2r_alloc_request(a); 1998 if (rq == NULL) { 1999 esas2r_debug("esas2r_read_fs: out of requests"); 2000 up(&a->fs_api_semaphore); 2001 goto busy; 2002 } 2003 2004 rq->comp_cb = fs_api_complete_req; 2005 2006 /* Set up the SGCONTEXT for to build the s/g table */ 2007 2008 sgc.cur_offset = fs->data; 2009 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api; 2010 2011 a->fs_api_command_done = 0; 2012 2013 if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) { 2014 if (fs->status == ATTO_STS_OUT_OF_RSRC) 2015 count = -EBUSY; 2016 2017 goto dont_wait; 2018 } 2019 2020 /* Now wait around for it to complete. */ 2021 2022 while (!a->fs_api_command_done) 2023 wait_event_interruptible(a->fs_api_waiter, 2024 a->fs_api_command_done); 2025 ; 2026 dont_wait: 2027 /* Free the request and keep going */ 2028 up(&a->fs_api_semaphore); 2029 esas2r_free_request(a, (struct esas2r_request *)rq); 2030 2031 /* Pick up possible error code from above */ 2032 if (count < 0) 2033 return count; 2034 } 2035 2036 if (off > a->fs_api_buffer_size) 2037 return 0; 2038 2039 if (count + off > a->fs_api_buffer_size) 2040 count = a->fs_api_buffer_size - off; 2041 2042 if (count < 0) 2043 return 0; 2044 2045 memcpy(buf, a->fs_api_buffer + off, count); 2046 2047 return count; 2048 } 2049 2050 /* Handle a call to write firmware via FS_API. */ 2051 int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off, 2052 int count) 2053 { 2054 if (off == 0) { 2055 struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf; 2056 u32 length = fs->command.length + offsetof( 2057 struct esas2r_ioctl_fs, 2058 data); 2059 2060 /* 2061 * Special case, for BEGIN commands, the length field 2062 * is lying to us, so just get enough for the header. 2063 */ 2064 2065 if (fs->command.command == ESAS2R_FS_CMD_BEGINW) 2066 length = offsetof(struct esas2r_ioctl_fs, data); 2067 2068 /* 2069 * Beginning a command. We assume we'll get at least 2070 * enough in the first write so we can look at the 2071 * header and see how much we need to alloc. 2072 */ 2073 2074 if (count < offsetof(struct esas2r_ioctl_fs, data)) 2075 return -EINVAL; 2076 2077 /* Allocate a buffer or use the existing buffer. */ 2078 if (a->fs_api_buffer) { 2079 if (a->fs_api_buffer_size < length) { 2080 /* Free too-small buffer and get a new one */ 2081 dma_free_coherent(&a->pcid->dev, 2082 (size_t)a->fs_api_buffer_size, 2083 a->fs_api_buffer, 2084 (dma_addr_t)a->ppfs_api_buffer); 2085 2086 goto re_allocate_buffer; 2087 } 2088 } else { 2089 re_allocate_buffer: 2090 a->fs_api_buffer_size = length; 2091 2092 a->fs_api_buffer = (u8 *)dma_alloc_coherent( 2093 &a->pcid->dev, 2094 (size_t)a->fs_api_buffer_size, 2095 (dma_addr_t *)&a->ppfs_api_buffer, 2096 GFP_KERNEL); 2097 } 2098 } 2099 2100 if (!a->fs_api_buffer) 2101 return -ENOMEM; 2102 2103 if (off > a->fs_api_buffer_size) 2104 return 0; 2105 2106 if (count + off > a->fs_api_buffer_size) 2107 count = a->fs_api_buffer_size - off; 2108 2109 if (count < 1) 2110 return 0; 2111 2112 memcpy(a->fs_api_buffer + off, buf, count); 2113 2114 return count; 2115 } 2116