1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_sas_device_find_by_handle - sas device search 83 * @ioc: per adapter object 84 * @handle: sas device handle (assigned by firmware) 85 * Context: Calling function should acquire ioc->sas_device_lock 86 * 87 * This searches for sas_device based on sas_address, then return sas_device 88 * object. 89 */ 90 static struct _sas_device * 91 _ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 92 { 93 struct _sas_device *sas_device, *r; 94 95 r = NULL; 96 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 97 if (sas_device->handle != handle) 98 continue; 99 r = sas_device; 100 goto out; 101 } 102 103 out: 104 return r; 105 } 106 107 /** 108 * _ctl_display_some_debug - debug routine 109 * @ioc: per adapter object 110 * @smid: system request message index 111 * @calling_function_name: string pass from calling function 112 * @mpi_reply: reply message frame 113 * Context: none. 114 * 115 * Function for displaying debug info helpful when debugging issues 116 * in this module. 117 */ 118 static void 119 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 120 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 121 { 122 Mpi2ConfigRequest_t *mpi_request; 123 char *desc = NULL; 124 125 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 126 return; 127 128 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 129 switch (mpi_request->Function) { 130 case MPI2_FUNCTION_SCSI_IO_REQUEST: 131 { 132 Mpi2SCSIIORequest_t *scsi_request = 133 (Mpi2SCSIIORequest_t *)mpi_request; 134 135 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 136 "scsi_io, cmd(0x%02x), cdb_len(%d)", 137 scsi_request->CDB.CDB32[0], 138 le16_to_cpu(scsi_request->IoFlags) & 0xF); 139 desc = ioc->tmp_string; 140 break; 141 } 142 case MPI2_FUNCTION_SCSI_TASK_MGMT: 143 desc = "task_mgmt"; 144 break; 145 case MPI2_FUNCTION_IOC_INIT: 146 desc = "ioc_init"; 147 break; 148 case MPI2_FUNCTION_IOC_FACTS: 149 desc = "ioc_facts"; 150 break; 151 case MPI2_FUNCTION_CONFIG: 152 { 153 Mpi2ConfigRequest_t *config_request = 154 (Mpi2ConfigRequest_t *)mpi_request; 155 156 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 157 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 158 (config_request->Header.PageType & 159 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 160 config_request->Header.PageNumber); 161 desc = ioc->tmp_string; 162 break; 163 } 164 case MPI2_FUNCTION_PORT_FACTS: 165 desc = "port_facts"; 166 break; 167 case MPI2_FUNCTION_PORT_ENABLE: 168 desc = "port_enable"; 169 break; 170 case MPI2_FUNCTION_EVENT_NOTIFICATION: 171 desc = "event_notification"; 172 break; 173 case MPI2_FUNCTION_FW_DOWNLOAD: 174 desc = "fw_download"; 175 break; 176 case MPI2_FUNCTION_FW_UPLOAD: 177 desc = "fw_upload"; 178 break; 179 case MPI2_FUNCTION_RAID_ACTION: 180 desc = "raid_action"; 181 break; 182 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 183 { 184 Mpi2SCSIIORequest_t *scsi_request = 185 (Mpi2SCSIIORequest_t *)mpi_request; 186 187 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 188 "raid_pass, cmd(0x%02x), cdb_len(%d)", 189 scsi_request->CDB.CDB32[0], 190 le16_to_cpu(scsi_request->IoFlags) & 0xF); 191 desc = ioc->tmp_string; 192 break; 193 } 194 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 195 desc = "sas_iounit_cntl"; 196 break; 197 case MPI2_FUNCTION_SATA_PASSTHROUGH: 198 desc = "sata_pass"; 199 break; 200 case MPI2_FUNCTION_DIAG_BUFFER_POST: 201 desc = "diag_buffer_post"; 202 break; 203 case MPI2_FUNCTION_DIAG_RELEASE: 204 desc = "diag_release"; 205 break; 206 case MPI2_FUNCTION_SMP_PASSTHROUGH: 207 desc = "smp_passthrough"; 208 break; 209 } 210 211 if (!desc) 212 return; 213 214 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 215 ioc->name, calling_function_name, desc, smid); 216 217 if (!mpi_reply) 218 return; 219 220 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 221 pr_info(MPT3SAS_FMT 222 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 223 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 224 le32_to_cpu(mpi_reply->IOCLogInfo)); 225 226 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 227 mpi_request->Function == 228 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 229 Mpi2SCSIIOReply_t *scsi_reply = 230 (Mpi2SCSIIOReply_t *)mpi_reply; 231 struct _sas_device *sas_device = NULL; 232 unsigned long flags; 233 234 spin_lock_irqsave(&ioc->sas_device_lock, flags); 235 sas_device = _ctl_sas_device_find_by_handle(ioc, 236 le16_to_cpu(scsi_reply->DevHandle)); 237 if (sas_device) { 238 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 239 ioc->name, (unsigned long long) 240 sas_device->sas_address, sas_device->phy); 241 pr_warn(MPT3SAS_FMT 242 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 243 ioc->name, (unsigned long long) 244 sas_device->enclosure_logical_id, sas_device->slot); 245 } 246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 247 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 248 pr_info(MPT3SAS_FMT 249 "\tscsi_state(0x%02x), scsi_status" 250 "(0x%02x)\n", ioc->name, 251 scsi_reply->SCSIState, 252 scsi_reply->SCSIStatus); 253 } 254 } 255 256 /** 257 * mpt3sas_ctl_done - ctl module completion routine 258 * @ioc: per adapter object 259 * @smid: system request message index 260 * @msix_index: MSIX table index supplied by the OS 261 * @reply: reply message frame(lower 32bit addr) 262 * Context: none. 263 * 264 * The callback handler when using ioc->ctl_cb_idx. 265 * 266 * Return 1 meaning mf should be freed from _base_interrupt 267 * 0 means the mf is freed from this function. 268 */ 269 u8 270 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 271 u32 reply) 272 { 273 MPI2DefaultReply_t *mpi_reply; 274 Mpi2SCSIIOReply_t *scsiio_reply; 275 const void *sense_data; 276 u32 sz; 277 278 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 279 return 1; 280 if (ioc->ctl_cmds.smid != smid) 281 return 1; 282 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 283 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 284 if (mpi_reply) { 285 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 286 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 287 /* get sense data */ 288 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 289 mpi_reply->Function == 290 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 291 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 292 if (scsiio_reply->SCSIState & 293 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 294 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 295 le32_to_cpu(scsiio_reply->SenseCount)); 296 sense_data = mpt3sas_base_get_sense_buffer(ioc, 297 smid); 298 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 299 } 300 } 301 } 302 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 303 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 304 complete(&ioc->ctl_cmds.done); 305 return 1; 306 } 307 308 /** 309 * _ctl_check_event_type - determines when an event needs logging 310 * @ioc: per adapter object 311 * @event: firmware event 312 * 313 * The bitmask in ioc->event_type[] indicates which events should be 314 * be saved in the driver event_log. This bitmask is set by application. 315 * 316 * Returns 1 when event should be captured, or zero means no match. 317 */ 318 static int 319 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 320 { 321 u16 i; 322 u32 desired_event; 323 324 if (event >= 128 || !event || !ioc->event_log) 325 return 0; 326 327 desired_event = (1 << (event % 32)); 328 if (!desired_event) 329 desired_event = 1; 330 i = event / 32; 331 return desired_event & ioc->event_type[i]; 332 } 333 334 /** 335 * mpt3sas_ctl_add_to_event_log - add event 336 * @ioc: per adapter object 337 * @mpi_reply: reply message frame 338 * 339 * Return nothing. 340 */ 341 void 342 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 343 Mpi2EventNotificationReply_t *mpi_reply) 344 { 345 struct MPT3_IOCTL_EVENTS *event_log; 346 u16 event; 347 int i; 348 u32 sz, event_data_sz; 349 u8 send_aen = 0; 350 351 if (!ioc->event_log) 352 return; 353 354 event = le16_to_cpu(mpi_reply->Event); 355 356 if (_ctl_check_event_type(ioc, event)) { 357 358 /* insert entry into circular event_log */ 359 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 360 event_log = ioc->event_log; 361 event_log[i].event = event; 362 event_log[i].context = ioc->event_context++; 363 364 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 365 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 366 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 367 memcpy(event_log[i].data, mpi_reply->EventData, sz); 368 send_aen = 1; 369 } 370 371 /* This aen_event_read_flag flag is set until the 372 * application has read the event log. 373 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 374 */ 375 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 376 (send_aen && !ioc->aen_event_read_flag)) { 377 ioc->aen_event_read_flag = 1; 378 wake_up_interruptible(&ctl_poll_wait); 379 if (async_queue) 380 kill_fasync(&async_queue, SIGIO, POLL_IN); 381 } 382 } 383 384 /** 385 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 386 * @ioc: per adapter object 387 * @msix_index: MSIX table index supplied by the OS 388 * @reply: reply message frame(lower 32bit addr) 389 * Context: interrupt. 390 * 391 * This function merely adds a new work task into ioc->firmware_event_thread. 392 * The tasks are worked from _firmware_event_work in user context. 393 * 394 * Return 1 meaning mf should be freed from _base_interrupt 395 * 0 means the mf is freed from this function. 396 */ 397 u8 398 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 399 u32 reply) 400 { 401 Mpi2EventNotificationReply_t *mpi_reply; 402 403 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 404 if (mpi_reply) 405 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 406 return 1; 407 } 408 409 /** 410 * _ctl_verify_adapter - validates ioc_number passed from application 411 * @ioc: per adapter object 412 * @iocpp: The ioc pointer is returned in this. 413 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 414 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 415 * 416 * Return (-1) means error, else ioc_number. 417 */ 418 static int 419 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 420 int mpi_version) 421 { 422 struct MPT3SAS_ADAPTER *ioc; 423 int version = 0; 424 /* global ioc lock to protect controller on list operations */ 425 spin_lock(&gioc_lock); 426 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 427 if (ioc->id != ioc_number) 428 continue; 429 /* Check whether this ioctl command is from right 430 * ioctl device or not, if not continue the search. 431 */ 432 version = ioc->hba_mpi_version_belonged; 433 /* MPI25_VERSION and MPI26_VERSION uses same ioctl 434 * device. 435 */ 436 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { 437 if ((version == MPI25_VERSION) || 438 (version == MPI26_VERSION)) 439 goto out; 440 else 441 continue; 442 } else { 443 if (version != mpi_version) 444 continue; 445 } 446 out: 447 spin_unlock(&gioc_lock); 448 *iocpp = ioc; 449 return ioc_number; 450 } 451 spin_unlock(&gioc_lock); 452 *iocpp = NULL; 453 return -1; 454 } 455 456 /** 457 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) 458 * @ioc: per adapter object 459 * @reset_phase: phase 460 * 461 * The handler for doing any required cleanup or initialization. 462 * 463 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 464 * MPT3_IOC_DONE_RESET 465 */ 466 void 467 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 468 { 469 int i; 470 u8 issue_reset; 471 472 switch (reset_phase) { 473 case MPT3_IOC_PRE_RESET: 474 dtmprintk(ioc, pr_info(MPT3SAS_FMT 475 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 476 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 477 if (!(ioc->diag_buffer_status[i] & 478 MPT3_DIAG_BUFFER_IS_REGISTERED)) 479 continue; 480 if ((ioc->diag_buffer_status[i] & 481 MPT3_DIAG_BUFFER_IS_RELEASED)) 482 continue; 483 mpt3sas_send_diag_release(ioc, i, &issue_reset); 484 } 485 break; 486 case MPT3_IOC_AFTER_RESET: 487 dtmprintk(ioc, pr_info(MPT3SAS_FMT 488 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 489 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 490 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 491 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 492 complete(&ioc->ctl_cmds.done); 493 } 494 break; 495 case MPT3_IOC_DONE_RESET: 496 dtmprintk(ioc, pr_info(MPT3SAS_FMT 497 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 498 499 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 500 if (!(ioc->diag_buffer_status[i] & 501 MPT3_DIAG_BUFFER_IS_REGISTERED)) 502 continue; 503 if ((ioc->diag_buffer_status[i] & 504 MPT3_DIAG_BUFFER_IS_RELEASED)) 505 continue; 506 ioc->diag_buffer_status[i] |= 507 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 508 } 509 break; 510 } 511 } 512 513 /** 514 * _ctl_fasync - 515 * @fd - 516 * @filep - 517 * @mode - 518 * 519 * Called when application request fasyn callback handler. 520 */ 521 static int 522 _ctl_fasync(int fd, struct file *filep, int mode) 523 { 524 return fasync_helper(fd, filep, mode, &async_queue); 525 } 526 527 /** 528 * _ctl_poll - 529 * @file - 530 * @wait - 531 * 532 */ 533 static unsigned int 534 _ctl_poll(struct file *filep, poll_table *wait) 535 { 536 struct MPT3SAS_ADAPTER *ioc; 537 538 poll_wait(filep, &ctl_poll_wait, wait); 539 540 /* global ioc lock to protect controller on list operations */ 541 spin_lock(&gioc_lock); 542 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 543 if (ioc->aen_event_read_flag) { 544 spin_unlock(&gioc_lock); 545 return POLLIN | POLLRDNORM; 546 } 547 } 548 spin_unlock(&gioc_lock); 549 return 0; 550 } 551 552 /** 553 * _ctl_set_task_mid - assign an active smid to tm request 554 * @ioc: per adapter object 555 * @karg - (struct mpt3_ioctl_command) 556 * @tm_request - pointer to mf from user space 557 * 558 * Returns 0 when an smid if found, else fail. 559 * during failure, the reply frame is filled. 560 */ 561 static int 562 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 563 Mpi2SCSITaskManagementRequest_t *tm_request) 564 { 565 u8 found = 0; 566 u16 i; 567 u16 handle; 568 struct scsi_cmnd *scmd; 569 struct MPT3SAS_DEVICE *priv_data; 570 unsigned long flags; 571 Mpi2SCSITaskManagementReply_t *tm_reply; 572 u32 sz; 573 u32 lun; 574 char *desc = NULL; 575 576 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 577 desc = "abort_task"; 578 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 579 desc = "query_task"; 580 else 581 return 0; 582 583 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 584 585 handle = le16_to_cpu(tm_request->DevHandle); 586 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 587 for (i = ioc->scsiio_depth; i && !found; i--) { 588 scmd = ioc->scsi_lookup[i - 1].scmd; 589 if (scmd == NULL || scmd->device == NULL || 590 scmd->device->hostdata == NULL) 591 continue; 592 if (lun != scmd->device->lun) 593 continue; 594 priv_data = scmd->device->hostdata; 595 if (priv_data->sas_target == NULL) 596 continue; 597 if (priv_data->sas_target->handle != handle) 598 continue; 599 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); 600 found = 1; 601 } 602 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 603 604 if (!found) { 605 dctlprintk(ioc, pr_info(MPT3SAS_FMT 606 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 607 ioc->name, 608 desc, le16_to_cpu(tm_request->DevHandle), lun)); 609 tm_reply = ioc->ctl_cmds.reply; 610 tm_reply->DevHandle = tm_request->DevHandle; 611 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 612 tm_reply->TaskType = tm_request->TaskType; 613 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 614 tm_reply->VP_ID = tm_request->VP_ID; 615 tm_reply->VF_ID = tm_request->VF_ID; 616 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 617 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 618 sz)) 619 pr_err("failure at %s:%d/%s()!\n", __FILE__, 620 __LINE__, __func__); 621 return 1; 622 } 623 624 dctlprintk(ioc, pr_info(MPT3SAS_FMT 625 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 626 desc, le16_to_cpu(tm_request->DevHandle), lun, 627 le16_to_cpu(tm_request->TaskMID))); 628 return 0; 629 } 630 631 /** 632 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 633 * @ioc: per adapter object 634 * @karg - (struct mpt3_ioctl_command) 635 * @mf - pointer to mf in user space 636 */ 637 static long 638 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 639 void __user *mf) 640 { 641 MPI2RequestHeader_t *mpi_request = NULL, *request; 642 MPI2DefaultReply_t *mpi_reply; 643 u32 ioc_state; 644 u16 smid; 645 unsigned long timeout; 646 u8 issue_reset; 647 u32 sz; 648 void *psge; 649 void *data_out = NULL; 650 dma_addr_t data_out_dma = 0; 651 size_t data_out_sz = 0; 652 void *data_in = NULL; 653 dma_addr_t data_in_dma = 0; 654 size_t data_in_sz = 0; 655 long ret; 656 u16 wait_state_count; 657 658 issue_reset = 0; 659 660 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 661 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 662 ioc->name, __func__); 663 ret = -EAGAIN; 664 goto out; 665 } 666 667 wait_state_count = 0; 668 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 669 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 670 if (wait_state_count++ == 10) { 671 pr_err(MPT3SAS_FMT 672 "%s: failed due to ioc not operational\n", 673 ioc->name, __func__); 674 ret = -EFAULT; 675 goto out; 676 } 677 ssleep(1); 678 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 679 pr_info(MPT3SAS_FMT 680 "%s: waiting for operational state(count=%d)\n", 681 ioc->name, 682 __func__, wait_state_count); 683 } 684 if (wait_state_count) 685 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 686 ioc->name, __func__); 687 688 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 689 if (!mpi_request) { 690 pr_err(MPT3SAS_FMT 691 "%s: failed obtaining a memory for mpi_request\n", 692 ioc->name, __func__); 693 ret = -ENOMEM; 694 goto out; 695 } 696 697 /* Check for overflow and wraparound */ 698 if (karg.data_sge_offset * 4 > ioc->request_sz || 699 karg.data_sge_offset > (UINT_MAX / 4)) { 700 ret = -EINVAL; 701 goto out; 702 } 703 704 /* copy in request message frame from user */ 705 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 706 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 707 __func__); 708 ret = -EFAULT; 709 goto out; 710 } 711 712 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 713 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 714 if (!smid) { 715 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 716 ioc->name, __func__); 717 ret = -EAGAIN; 718 goto out; 719 } 720 } else { 721 722 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 723 if (!smid) { 724 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 725 ioc->name, __func__); 726 ret = -EAGAIN; 727 goto out; 728 } 729 } 730 731 ret = 0; 732 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 733 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 734 request = mpt3sas_base_get_msg_frame(ioc, smid); 735 memcpy(request, mpi_request, karg.data_sge_offset*4); 736 ioc->ctl_cmds.smid = smid; 737 data_out_sz = karg.data_out_size; 738 data_in_sz = karg.data_in_size; 739 740 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 741 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 742 if (!le16_to_cpu(mpi_request->FunctionDependent1) || 743 le16_to_cpu(mpi_request->FunctionDependent1) > 744 ioc->facts.MaxDevHandle) { 745 ret = -EINVAL; 746 mpt3sas_base_free_smid(ioc, smid); 747 goto out; 748 } 749 } 750 751 /* obtain dma-able memory for data transfer */ 752 if (data_out_sz) /* WRITE */ { 753 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 754 &data_out_dma); 755 if (!data_out) { 756 pr_err("failure at %s:%d/%s()!\n", __FILE__, 757 __LINE__, __func__); 758 ret = -ENOMEM; 759 mpt3sas_base_free_smid(ioc, smid); 760 goto out; 761 } 762 if (copy_from_user(data_out, karg.data_out_buf_ptr, 763 data_out_sz)) { 764 pr_err("failure at %s:%d/%s()!\n", __FILE__, 765 __LINE__, __func__); 766 ret = -EFAULT; 767 mpt3sas_base_free_smid(ioc, smid); 768 goto out; 769 } 770 } 771 772 if (data_in_sz) /* READ */ { 773 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 774 &data_in_dma); 775 if (!data_in) { 776 pr_err("failure at %s:%d/%s()!\n", __FILE__, 777 __LINE__, __func__); 778 ret = -ENOMEM; 779 mpt3sas_base_free_smid(ioc, smid); 780 goto out; 781 } 782 } 783 784 psge = (void *)request + (karg.data_sge_offset*4); 785 786 /* send command to firmware */ 787 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 788 789 init_completion(&ioc->ctl_cmds.done); 790 switch (mpi_request->Function) { 791 case MPI2_FUNCTION_SCSI_IO_REQUEST: 792 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 793 { 794 Mpi2SCSIIORequest_t *scsiio_request = 795 (Mpi2SCSIIORequest_t *)request; 796 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 797 scsiio_request->SenseBufferLowAddress = 798 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 799 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 800 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 801 data_in_dma, data_in_sz); 802 803 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 804 mpt3sas_base_put_smid_scsi_io(ioc, smid, 805 le16_to_cpu(mpi_request->FunctionDependent1)); 806 else 807 mpt3sas_base_put_smid_default(ioc, smid); 808 break; 809 } 810 case MPI2_FUNCTION_SCSI_TASK_MGMT: 811 { 812 Mpi2SCSITaskManagementRequest_t *tm_request = 813 (Mpi2SCSITaskManagementRequest_t *)request; 814 815 dtmprintk(ioc, pr_info(MPT3SAS_FMT 816 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 817 ioc->name, 818 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 819 820 if (tm_request->TaskType == 821 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 822 tm_request->TaskType == 823 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 824 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 825 mpt3sas_base_free_smid(ioc, smid); 826 goto out; 827 } 828 } 829 830 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 831 tm_request->DevHandle)); 832 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 833 data_in_dma, data_in_sz); 834 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 835 break; 836 } 837 case MPI2_FUNCTION_SMP_PASSTHROUGH: 838 { 839 Mpi2SmpPassthroughRequest_t *smp_request = 840 (Mpi2SmpPassthroughRequest_t *)mpi_request; 841 u8 *data; 842 843 /* ioc determines which port to use */ 844 smp_request->PhysicalPort = 0xFF; 845 if (smp_request->PassthroughFlags & 846 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 847 data = (u8 *)&smp_request->SGL; 848 else { 849 if (unlikely(data_out == NULL)) { 850 pr_err("failure at %s:%d/%s()!\n", 851 __FILE__, __LINE__, __func__); 852 mpt3sas_base_free_smid(ioc, smid); 853 ret = -EINVAL; 854 goto out; 855 } 856 data = data_out; 857 } 858 859 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 860 ioc->ioc_link_reset_in_progress = 1; 861 ioc->ignore_loginfos = 1; 862 } 863 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 864 data_in_sz); 865 mpt3sas_base_put_smid_default(ioc, smid); 866 break; 867 } 868 case MPI2_FUNCTION_SATA_PASSTHROUGH: 869 case MPI2_FUNCTION_FW_DOWNLOAD: 870 case MPI2_FUNCTION_FW_UPLOAD: 871 { 872 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 873 data_in_sz); 874 mpt3sas_base_put_smid_default(ioc, smid); 875 break; 876 } 877 case MPI2_FUNCTION_TOOLBOX: 878 { 879 Mpi2ToolboxCleanRequest_t *toolbox_request = 880 (Mpi2ToolboxCleanRequest_t *)mpi_request; 881 882 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 883 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 884 data_in_dma, data_in_sz); 885 } else { 886 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 887 data_in_dma, data_in_sz); 888 } 889 mpt3sas_base_put_smid_default(ioc, smid); 890 break; 891 } 892 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 893 { 894 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 895 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 896 897 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 898 || sasiounit_request->Operation == 899 MPI2_SAS_OP_PHY_LINK_RESET) { 900 ioc->ioc_link_reset_in_progress = 1; 901 ioc->ignore_loginfos = 1; 902 } 903 /* drop to default case for posting the request */ 904 } 905 default: 906 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 907 data_in_dma, data_in_sz); 908 mpt3sas_base_put_smid_default(ioc, smid); 909 break; 910 } 911 912 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 913 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 914 else 915 timeout = karg.timeout; 916 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); 917 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 918 Mpi2SCSITaskManagementRequest_t *tm_request = 919 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 920 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 921 tm_request->DevHandle)); 922 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 923 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 924 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 925 ioc->ioc_link_reset_in_progress) { 926 ioc->ioc_link_reset_in_progress = 0; 927 ioc->ignore_loginfos = 0; 928 } 929 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 930 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 931 __func__); 932 _debug_dump_mf(mpi_request, karg.data_sge_offset); 933 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 934 issue_reset = 1; 935 goto issue_host_reset; 936 } 937 938 mpi_reply = ioc->ctl_cmds.reply; 939 940 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 941 (ioc->logging_level & MPT_DEBUG_TM)) { 942 Mpi2SCSITaskManagementReply_t *tm_reply = 943 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 944 945 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 946 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 947 "TerminationCount(0x%08x)\n", ioc->name, 948 le16_to_cpu(tm_reply->IOCStatus), 949 le32_to_cpu(tm_reply->IOCLogInfo), 950 le32_to_cpu(tm_reply->TerminationCount)); 951 } 952 953 /* copy out xdata to user */ 954 if (data_in_sz) { 955 if (copy_to_user(karg.data_in_buf_ptr, data_in, 956 data_in_sz)) { 957 pr_err("failure at %s:%d/%s()!\n", __FILE__, 958 __LINE__, __func__); 959 ret = -ENODATA; 960 goto out; 961 } 962 } 963 964 /* copy out reply message frame to user */ 965 if (karg.max_reply_bytes) { 966 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 967 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 968 sz)) { 969 pr_err("failure at %s:%d/%s()!\n", __FILE__, 970 __LINE__, __func__); 971 ret = -ENODATA; 972 goto out; 973 } 974 } 975 976 /* copy out sense to user */ 977 if (karg.max_sense_bytes && (mpi_request->Function == 978 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 979 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 980 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); 981 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 982 sz)) { 983 pr_err("failure at %s:%d/%s()!\n", __FILE__, 984 __LINE__, __func__); 985 ret = -ENODATA; 986 goto out; 987 } 988 } 989 990 issue_host_reset: 991 if (issue_reset) { 992 ret = -ENODATA; 993 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 994 mpi_request->Function == 995 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 996 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 997 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 998 ioc->name, 999 le16_to_cpu(mpi_request->FunctionDependent1)); 1000 mpt3sas_halt_firmware(ioc); 1001 mpt3sas_scsih_issue_locked_tm(ioc, 1002 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 1003 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30); 1004 } else 1005 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1006 } 1007 1008 out: 1009 1010 /* free memory associated with sg buffers */ 1011 if (data_in) 1012 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1013 data_in_dma); 1014 1015 if (data_out) 1016 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1017 data_out_dma); 1018 1019 kfree(mpi_request); 1020 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1021 return ret; 1022 } 1023 1024 /** 1025 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1026 * @ioc: per adapter object 1027 * @arg - user space buffer containing ioctl content 1028 */ 1029 static long 1030 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1031 { 1032 struct mpt3_ioctl_iocinfo karg; 1033 1034 if (copy_from_user(&karg, arg, sizeof(karg))) { 1035 pr_err("failure at %s:%d/%s()!\n", 1036 __FILE__, __LINE__, __func__); 1037 return -EFAULT; 1038 } 1039 1040 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1041 __func__)); 1042 1043 memset(&karg, 0 , sizeof(karg)); 1044 if (ioc->pfacts) 1045 karg.port_number = ioc->pfacts[0].PortNumber; 1046 karg.hw_rev = ioc->pdev->revision; 1047 karg.pci_id = ioc->pdev->device; 1048 karg.subsystem_device = ioc->pdev->subsystem_device; 1049 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1050 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1051 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1052 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1053 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1054 karg.firmware_version = ioc->facts.FWVersion.Word; 1055 strcpy(karg.driver_version, ioc->driver_name); 1056 strcat(karg.driver_version, "-"); 1057 switch (ioc->hba_mpi_version_belonged) { 1058 case MPI2_VERSION: 1059 if (ioc->is_warpdrive) 1060 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1061 else 1062 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1063 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1064 break; 1065 case MPI25_VERSION: 1066 case MPI26_VERSION: 1067 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1068 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1069 break; 1070 } 1071 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1072 1073 if (copy_to_user(arg, &karg, sizeof(karg))) { 1074 pr_err("failure at %s:%d/%s()!\n", 1075 __FILE__, __LINE__, __func__); 1076 return -EFAULT; 1077 } 1078 return 0; 1079 } 1080 1081 /** 1082 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1083 * @ioc: per adapter object 1084 * @arg - user space buffer containing ioctl content 1085 */ 1086 static long 1087 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1088 { 1089 struct mpt3_ioctl_eventquery karg; 1090 1091 if (copy_from_user(&karg, arg, sizeof(karg))) { 1092 pr_err("failure at %s:%d/%s()!\n", 1093 __FILE__, __LINE__, __func__); 1094 return -EFAULT; 1095 } 1096 1097 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1098 __func__)); 1099 1100 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1101 memcpy(karg.event_types, ioc->event_type, 1102 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1103 1104 if (copy_to_user(arg, &karg, sizeof(karg))) { 1105 pr_err("failure at %s:%d/%s()!\n", 1106 __FILE__, __LINE__, __func__); 1107 return -EFAULT; 1108 } 1109 return 0; 1110 } 1111 1112 /** 1113 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1114 * @ioc: per adapter object 1115 * @arg - user space buffer containing ioctl content 1116 */ 1117 static long 1118 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1119 { 1120 struct mpt3_ioctl_eventenable karg; 1121 1122 if (copy_from_user(&karg, arg, sizeof(karg))) { 1123 pr_err("failure at %s:%d/%s()!\n", 1124 __FILE__, __LINE__, __func__); 1125 return -EFAULT; 1126 } 1127 1128 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1129 __func__)); 1130 1131 memcpy(ioc->event_type, karg.event_types, 1132 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1133 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1134 1135 if (ioc->event_log) 1136 return 0; 1137 /* initialize event_log */ 1138 ioc->event_context = 0; 1139 ioc->aen_event_read_flag = 0; 1140 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1141 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1142 if (!ioc->event_log) { 1143 pr_err("failure at %s:%d/%s()!\n", 1144 __FILE__, __LINE__, __func__); 1145 return -ENOMEM; 1146 } 1147 return 0; 1148 } 1149 1150 /** 1151 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1152 * @ioc: per adapter object 1153 * @arg - user space buffer containing ioctl content 1154 */ 1155 static long 1156 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1157 { 1158 struct mpt3_ioctl_eventreport karg; 1159 u32 number_bytes, max_events, max; 1160 struct mpt3_ioctl_eventreport __user *uarg = arg; 1161 1162 if (copy_from_user(&karg, arg, sizeof(karg))) { 1163 pr_err("failure at %s:%d/%s()!\n", 1164 __FILE__, __LINE__, __func__); 1165 return -EFAULT; 1166 } 1167 1168 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1169 __func__)); 1170 1171 number_bytes = karg.hdr.max_data_size - 1172 sizeof(struct mpt3_ioctl_header); 1173 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1174 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1175 1176 /* If fewer than 1 event is requested, there must have 1177 * been some type of error. 1178 */ 1179 if (!max || !ioc->event_log) 1180 return -ENODATA; 1181 1182 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1183 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1184 pr_err("failure at %s:%d/%s()!\n", 1185 __FILE__, __LINE__, __func__); 1186 return -EFAULT; 1187 } 1188 1189 /* reset flag so SIGIO can restart */ 1190 ioc->aen_event_read_flag = 0; 1191 return 0; 1192 } 1193 1194 /** 1195 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1196 * @ioc: per adapter object 1197 * @arg - user space buffer containing ioctl content 1198 */ 1199 static long 1200 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1201 { 1202 struct mpt3_ioctl_diag_reset karg; 1203 int retval; 1204 1205 if (copy_from_user(&karg, arg, sizeof(karg))) { 1206 pr_err("failure at %s:%d/%s()!\n", 1207 __FILE__, __LINE__, __func__); 1208 return -EFAULT; 1209 } 1210 1211 if (ioc->shost_recovery || ioc->pci_error_recovery || 1212 ioc->is_driver_loading) 1213 return -EAGAIN; 1214 1215 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1216 __func__)); 1217 1218 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1219 pr_info(MPT3SAS_FMT "host reset: %s\n", 1220 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1221 return 0; 1222 } 1223 1224 /** 1225 * _ctl_btdh_search_sas_device - searching for sas device 1226 * @ioc: per adapter object 1227 * @btdh: btdh ioctl payload 1228 */ 1229 static int 1230 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1231 struct mpt3_ioctl_btdh_mapping *btdh) 1232 { 1233 struct _sas_device *sas_device; 1234 unsigned long flags; 1235 int rc = 0; 1236 1237 if (list_empty(&ioc->sas_device_list)) 1238 return rc; 1239 1240 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1241 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1242 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1243 btdh->handle == sas_device->handle) { 1244 btdh->bus = sas_device->channel; 1245 btdh->id = sas_device->id; 1246 rc = 1; 1247 goto out; 1248 } else if (btdh->bus == sas_device->channel && btdh->id == 1249 sas_device->id && btdh->handle == 0xFFFF) { 1250 btdh->handle = sas_device->handle; 1251 rc = 1; 1252 goto out; 1253 } 1254 } 1255 out: 1256 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1257 return rc; 1258 } 1259 1260 /** 1261 * _ctl_btdh_search_raid_device - searching for raid device 1262 * @ioc: per adapter object 1263 * @btdh: btdh ioctl payload 1264 */ 1265 static int 1266 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1267 struct mpt3_ioctl_btdh_mapping *btdh) 1268 { 1269 struct _raid_device *raid_device; 1270 unsigned long flags; 1271 int rc = 0; 1272 1273 if (list_empty(&ioc->raid_device_list)) 1274 return rc; 1275 1276 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1277 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1278 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1279 btdh->handle == raid_device->handle) { 1280 btdh->bus = raid_device->channel; 1281 btdh->id = raid_device->id; 1282 rc = 1; 1283 goto out; 1284 } else if (btdh->bus == raid_device->channel && btdh->id == 1285 raid_device->id && btdh->handle == 0xFFFF) { 1286 btdh->handle = raid_device->handle; 1287 rc = 1; 1288 goto out; 1289 } 1290 } 1291 out: 1292 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1293 return rc; 1294 } 1295 1296 /** 1297 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1298 * @ioc: per adapter object 1299 * @arg - user space buffer containing ioctl content 1300 */ 1301 static long 1302 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1303 { 1304 struct mpt3_ioctl_btdh_mapping karg; 1305 int rc; 1306 1307 if (copy_from_user(&karg, arg, sizeof(karg))) { 1308 pr_err("failure at %s:%d/%s()!\n", 1309 __FILE__, __LINE__, __func__); 1310 return -EFAULT; 1311 } 1312 1313 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1314 __func__)); 1315 1316 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1317 if (!rc) 1318 _ctl_btdh_search_raid_device(ioc, &karg); 1319 1320 if (copy_to_user(arg, &karg, sizeof(karg))) { 1321 pr_err("failure at %s:%d/%s()!\n", 1322 __FILE__, __LINE__, __func__); 1323 return -EFAULT; 1324 } 1325 return 0; 1326 } 1327 1328 /** 1329 * _ctl_diag_capability - return diag buffer capability 1330 * @ioc: per adapter object 1331 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1332 * 1333 * returns 1 when diag buffer support is enabled in firmware 1334 */ 1335 static u8 1336 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1337 { 1338 u8 rc = 0; 1339 1340 switch (buffer_type) { 1341 case MPI2_DIAG_BUF_TYPE_TRACE: 1342 if (ioc->facts.IOCCapabilities & 1343 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1344 rc = 1; 1345 break; 1346 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1347 if (ioc->facts.IOCCapabilities & 1348 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1349 rc = 1; 1350 break; 1351 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1352 if (ioc->facts.IOCCapabilities & 1353 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1354 rc = 1; 1355 } 1356 1357 return rc; 1358 } 1359 1360 1361 /** 1362 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1363 * @ioc: per adapter object 1364 * @diag_register: the diag_register struct passed in from user space 1365 * 1366 */ 1367 static long 1368 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1369 struct mpt3_diag_register *diag_register) 1370 { 1371 int rc, i; 1372 void *request_data = NULL; 1373 dma_addr_t request_data_dma; 1374 u32 request_data_sz = 0; 1375 Mpi2DiagBufferPostRequest_t *mpi_request; 1376 Mpi2DiagBufferPostReply_t *mpi_reply; 1377 u8 buffer_type; 1378 u16 smid; 1379 u16 ioc_status; 1380 u32 ioc_state; 1381 u8 issue_reset = 0; 1382 1383 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1384 __func__)); 1385 1386 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1387 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1388 pr_err(MPT3SAS_FMT 1389 "%s: failed due to ioc not operational\n", 1390 ioc->name, __func__); 1391 rc = -EAGAIN; 1392 goto out; 1393 } 1394 1395 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1396 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1397 ioc->name, __func__); 1398 rc = -EAGAIN; 1399 goto out; 1400 } 1401 1402 buffer_type = diag_register->buffer_type; 1403 if (!_ctl_diag_capability(ioc, buffer_type)) { 1404 pr_err(MPT3SAS_FMT 1405 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1406 ioc->name, __func__, buffer_type); 1407 return -EPERM; 1408 } 1409 1410 if (ioc->diag_buffer_status[buffer_type] & 1411 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1412 pr_err(MPT3SAS_FMT 1413 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1414 ioc->name, __func__, 1415 buffer_type); 1416 return -EINVAL; 1417 } 1418 1419 if (diag_register->requested_buffer_size % 4) { 1420 pr_err(MPT3SAS_FMT 1421 "%s: the requested_buffer_size is not 4 byte aligned\n", 1422 ioc->name, __func__); 1423 return -EINVAL; 1424 } 1425 1426 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1427 if (!smid) { 1428 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1429 ioc->name, __func__); 1430 rc = -EAGAIN; 1431 goto out; 1432 } 1433 1434 rc = 0; 1435 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1436 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1437 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1438 ioc->ctl_cmds.smid = smid; 1439 1440 request_data = ioc->diag_buffer[buffer_type]; 1441 request_data_sz = diag_register->requested_buffer_size; 1442 ioc->unique_id[buffer_type] = diag_register->unique_id; 1443 ioc->diag_buffer_status[buffer_type] = 0; 1444 memcpy(ioc->product_specific[buffer_type], 1445 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1446 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1447 1448 if (request_data) { 1449 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1450 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1451 pci_free_consistent(ioc->pdev, 1452 ioc->diag_buffer_sz[buffer_type], 1453 request_data, request_data_dma); 1454 request_data = NULL; 1455 } 1456 } 1457 1458 if (request_data == NULL) { 1459 ioc->diag_buffer_sz[buffer_type] = 0; 1460 ioc->diag_buffer_dma[buffer_type] = 0; 1461 request_data = pci_alloc_consistent( 1462 ioc->pdev, request_data_sz, &request_data_dma); 1463 if (request_data == NULL) { 1464 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1465 " for diag buffers, requested size(%d)\n", 1466 ioc->name, __func__, request_data_sz); 1467 mpt3sas_base_free_smid(ioc, smid); 1468 return -ENOMEM; 1469 } 1470 ioc->diag_buffer[buffer_type] = request_data; 1471 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1472 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1473 } 1474 1475 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1476 mpi_request->BufferType = diag_register->buffer_type; 1477 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1478 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1479 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1480 mpi_request->VF_ID = 0; /* TODO */ 1481 mpi_request->VP_ID = 0; 1482 1483 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1484 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1485 ioc->name, __func__, request_data, 1486 (unsigned long long)request_data_dma, 1487 le32_to_cpu(mpi_request->BufferLength))); 1488 1489 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1490 mpi_request->ProductSpecific[i] = 1491 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1492 1493 init_completion(&ioc->ctl_cmds.done); 1494 mpt3sas_base_put_smid_default(ioc, smid); 1495 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1496 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1497 1498 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1499 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1500 __func__); 1501 _debug_dump_mf(mpi_request, 1502 sizeof(Mpi2DiagBufferPostRequest_t)/4); 1503 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1504 issue_reset = 1; 1505 goto issue_host_reset; 1506 } 1507 1508 /* process the completed Reply Message Frame */ 1509 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1510 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1511 ioc->name, __func__); 1512 rc = -EFAULT; 1513 goto out; 1514 } 1515 1516 mpi_reply = ioc->ctl_cmds.reply; 1517 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1518 1519 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1520 ioc->diag_buffer_status[buffer_type] |= 1521 MPT3_DIAG_BUFFER_IS_REGISTERED; 1522 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1523 ioc->name, __func__)); 1524 } else { 1525 pr_info(MPT3SAS_FMT 1526 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1527 ioc->name, __func__, 1528 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1529 rc = -EFAULT; 1530 } 1531 1532 issue_host_reset: 1533 if (issue_reset) 1534 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1535 1536 out: 1537 1538 if (rc && request_data) 1539 pci_free_consistent(ioc->pdev, request_data_sz, 1540 request_data, request_data_dma); 1541 1542 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1543 return rc; 1544 } 1545 1546 /** 1547 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1548 * @ioc: per adapter object 1549 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1550 * 1551 * This is called when command line option diag_buffer_enable is enabled 1552 * at driver load time. 1553 */ 1554 void 1555 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1556 { 1557 struct mpt3_diag_register diag_register; 1558 1559 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1560 1561 if (bits_to_register & 1) { 1562 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1563 ioc->name); 1564 ioc->diag_trigger_master.MasterData = 1565 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1566 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1567 /* register for 2MB buffers */ 1568 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1569 diag_register.unique_id = 0x7075900; 1570 _ctl_diag_register_2(ioc, &diag_register); 1571 } 1572 1573 if (bits_to_register & 2) { 1574 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1575 ioc->name); 1576 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1577 /* register for 2MB buffers */ 1578 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1579 diag_register.unique_id = 0x7075901; 1580 _ctl_diag_register_2(ioc, &diag_register); 1581 } 1582 1583 if (bits_to_register & 4) { 1584 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1585 ioc->name); 1586 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1587 /* register for 2MB buffers */ 1588 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1589 diag_register.unique_id = 0x7075901; 1590 _ctl_diag_register_2(ioc, &diag_register); 1591 } 1592 } 1593 1594 /** 1595 * _ctl_diag_register - application register with driver 1596 * @ioc: per adapter object 1597 * @arg - user space buffer containing ioctl content 1598 * 1599 * This will allow the driver to setup any required buffers that will be 1600 * needed by firmware to communicate with the driver. 1601 */ 1602 static long 1603 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1604 { 1605 struct mpt3_diag_register karg; 1606 long rc; 1607 1608 if (copy_from_user(&karg, arg, sizeof(karg))) { 1609 pr_err("failure at %s:%d/%s()!\n", 1610 __FILE__, __LINE__, __func__); 1611 return -EFAULT; 1612 } 1613 1614 rc = _ctl_diag_register_2(ioc, &karg); 1615 return rc; 1616 } 1617 1618 /** 1619 * _ctl_diag_unregister - application unregister with driver 1620 * @ioc: per adapter object 1621 * @arg - user space buffer containing ioctl content 1622 * 1623 * This will allow the driver to cleanup any memory allocated for diag 1624 * messages and to free up any resources. 1625 */ 1626 static long 1627 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1628 { 1629 struct mpt3_diag_unregister karg; 1630 void *request_data; 1631 dma_addr_t request_data_dma; 1632 u32 request_data_sz; 1633 u8 buffer_type; 1634 1635 if (copy_from_user(&karg, arg, sizeof(karg))) { 1636 pr_err("failure at %s:%d/%s()!\n", 1637 __FILE__, __LINE__, __func__); 1638 return -EFAULT; 1639 } 1640 1641 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1642 __func__)); 1643 1644 buffer_type = karg.unique_id & 0x000000ff; 1645 if (!_ctl_diag_capability(ioc, buffer_type)) { 1646 pr_err(MPT3SAS_FMT 1647 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1648 ioc->name, __func__, buffer_type); 1649 return -EPERM; 1650 } 1651 1652 if ((ioc->diag_buffer_status[buffer_type] & 1653 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1654 pr_err(MPT3SAS_FMT 1655 "%s: buffer_type(0x%02x) is not registered\n", 1656 ioc->name, __func__, buffer_type); 1657 return -EINVAL; 1658 } 1659 if ((ioc->diag_buffer_status[buffer_type] & 1660 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1661 pr_err(MPT3SAS_FMT 1662 "%s: buffer_type(0x%02x) has not been released\n", 1663 ioc->name, __func__, buffer_type); 1664 return -EINVAL; 1665 } 1666 1667 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1668 pr_err(MPT3SAS_FMT 1669 "%s: unique_id(0x%08x) is not registered\n", 1670 ioc->name, __func__, karg.unique_id); 1671 return -EINVAL; 1672 } 1673 1674 request_data = ioc->diag_buffer[buffer_type]; 1675 if (!request_data) { 1676 pr_err(MPT3SAS_FMT 1677 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1678 ioc->name, __func__, buffer_type); 1679 return -ENOMEM; 1680 } 1681 1682 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1683 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1684 pci_free_consistent(ioc->pdev, request_data_sz, 1685 request_data, request_data_dma); 1686 ioc->diag_buffer[buffer_type] = NULL; 1687 ioc->diag_buffer_status[buffer_type] = 0; 1688 return 0; 1689 } 1690 1691 /** 1692 * _ctl_diag_query - query relevant info associated with diag buffers 1693 * @ioc: per adapter object 1694 * @arg - user space buffer containing ioctl content 1695 * 1696 * The application will send only buffer_type and unique_id. Driver will 1697 * inspect unique_id first, if valid, fill in all the info. If unique_id is 1698 * 0x00, the driver will return info specified by Buffer Type. 1699 */ 1700 static long 1701 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1702 { 1703 struct mpt3_diag_query karg; 1704 void *request_data; 1705 int i; 1706 u8 buffer_type; 1707 1708 if (copy_from_user(&karg, arg, sizeof(karg))) { 1709 pr_err("failure at %s:%d/%s()!\n", 1710 __FILE__, __LINE__, __func__); 1711 return -EFAULT; 1712 } 1713 1714 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1715 __func__)); 1716 1717 karg.application_flags = 0; 1718 buffer_type = karg.buffer_type; 1719 1720 if (!_ctl_diag_capability(ioc, buffer_type)) { 1721 pr_err(MPT3SAS_FMT 1722 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1723 ioc->name, __func__, buffer_type); 1724 return -EPERM; 1725 } 1726 1727 if ((ioc->diag_buffer_status[buffer_type] & 1728 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1729 pr_err(MPT3SAS_FMT 1730 "%s: buffer_type(0x%02x) is not registered\n", 1731 ioc->name, __func__, buffer_type); 1732 return -EINVAL; 1733 } 1734 1735 if (karg.unique_id & 0xffffff00) { 1736 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1737 pr_err(MPT3SAS_FMT 1738 "%s: unique_id(0x%08x) is not registered\n", 1739 ioc->name, __func__, karg.unique_id); 1740 return -EINVAL; 1741 } 1742 } 1743 1744 request_data = ioc->diag_buffer[buffer_type]; 1745 if (!request_data) { 1746 pr_err(MPT3SAS_FMT 1747 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1748 ioc->name, __func__, buffer_type); 1749 return -ENOMEM; 1750 } 1751 1752 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) 1753 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1754 MPT3_APP_FLAGS_BUFFER_VALID); 1755 else 1756 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1757 MPT3_APP_FLAGS_BUFFER_VALID | 1758 MPT3_APP_FLAGS_FW_BUFFER_ACCESS); 1759 1760 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1761 karg.product_specific[i] = 1762 ioc->product_specific[buffer_type][i]; 1763 1764 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 1765 karg.driver_added_buffer_size = 0; 1766 karg.unique_id = ioc->unique_id[buffer_type]; 1767 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1768 1769 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1770 pr_err(MPT3SAS_FMT 1771 "%s: unable to write mpt3_diag_query data @ %p\n", 1772 ioc->name, __func__, arg); 1773 return -EFAULT; 1774 } 1775 return 0; 1776 } 1777 1778 /** 1779 * mpt3sas_send_diag_release - Diag Release Message 1780 * @ioc: per adapter object 1781 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED 1782 * @issue_reset - specifies whether host reset is required. 1783 * 1784 */ 1785 int 1786 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1787 u8 *issue_reset) 1788 { 1789 Mpi2DiagReleaseRequest_t *mpi_request; 1790 Mpi2DiagReleaseReply_t *mpi_reply; 1791 u16 smid; 1792 u16 ioc_status; 1793 u32 ioc_state; 1794 int rc; 1795 1796 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1797 __func__)); 1798 1799 rc = 0; 1800 *issue_reset = 0; 1801 1802 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1803 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1804 if (ioc->diag_buffer_status[buffer_type] & 1805 MPT3_DIAG_BUFFER_IS_REGISTERED) 1806 ioc->diag_buffer_status[buffer_type] |= 1807 MPT3_DIAG_BUFFER_IS_RELEASED; 1808 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1809 "%s: skipping due to FAULT state\n", ioc->name, 1810 __func__)); 1811 rc = -EAGAIN; 1812 goto out; 1813 } 1814 1815 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1816 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1817 ioc->name, __func__); 1818 rc = -EAGAIN; 1819 goto out; 1820 } 1821 1822 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1823 if (!smid) { 1824 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1825 ioc->name, __func__); 1826 rc = -EAGAIN; 1827 goto out; 1828 } 1829 1830 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1831 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1832 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1833 ioc->ctl_cmds.smid = smid; 1834 1835 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1836 mpi_request->BufferType = buffer_type; 1837 mpi_request->VF_ID = 0; /* TODO */ 1838 mpi_request->VP_ID = 0; 1839 1840 init_completion(&ioc->ctl_cmds.done); 1841 mpt3sas_base_put_smid_default(ioc, smid); 1842 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1843 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1844 1845 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1846 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1847 __func__); 1848 _debug_dump_mf(mpi_request, 1849 sizeof(Mpi2DiagReleaseRequest_t)/4); 1850 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1851 *issue_reset = 1; 1852 rc = -EFAULT; 1853 goto out; 1854 } 1855 1856 /* process the completed Reply Message Frame */ 1857 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1858 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1859 ioc->name, __func__); 1860 rc = -EFAULT; 1861 goto out; 1862 } 1863 1864 mpi_reply = ioc->ctl_cmds.reply; 1865 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1866 1867 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1868 ioc->diag_buffer_status[buffer_type] |= 1869 MPT3_DIAG_BUFFER_IS_RELEASED; 1870 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1871 ioc->name, __func__)); 1872 } else { 1873 pr_info(MPT3SAS_FMT 1874 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1875 ioc->name, __func__, 1876 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1877 rc = -EFAULT; 1878 } 1879 1880 out: 1881 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1882 return rc; 1883 } 1884 1885 /** 1886 * _ctl_diag_release - request to send Diag Release Message to firmware 1887 * @arg - user space buffer containing ioctl content 1888 * 1889 * This allows ownership of the specified buffer to returned to the driver, 1890 * allowing an application to read the buffer without fear that firmware is 1891 * overwritting information in the buffer. 1892 */ 1893 static long 1894 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1895 { 1896 struct mpt3_diag_release karg; 1897 void *request_data; 1898 int rc; 1899 u8 buffer_type; 1900 u8 issue_reset = 0; 1901 1902 if (copy_from_user(&karg, arg, sizeof(karg))) { 1903 pr_err("failure at %s:%d/%s()!\n", 1904 __FILE__, __LINE__, __func__); 1905 return -EFAULT; 1906 } 1907 1908 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1909 __func__)); 1910 1911 buffer_type = karg.unique_id & 0x000000ff; 1912 if (!_ctl_diag_capability(ioc, buffer_type)) { 1913 pr_err(MPT3SAS_FMT 1914 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1915 ioc->name, __func__, buffer_type); 1916 return -EPERM; 1917 } 1918 1919 if ((ioc->diag_buffer_status[buffer_type] & 1920 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1921 pr_err(MPT3SAS_FMT 1922 "%s: buffer_type(0x%02x) is not registered\n", 1923 ioc->name, __func__, buffer_type); 1924 return -EINVAL; 1925 } 1926 1927 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1928 pr_err(MPT3SAS_FMT 1929 "%s: unique_id(0x%08x) is not registered\n", 1930 ioc->name, __func__, karg.unique_id); 1931 return -EINVAL; 1932 } 1933 1934 if (ioc->diag_buffer_status[buffer_type] & 1935 MPT3_DIAG_BUFFER_IS_RELEASED) { 1936 pr_err(MPT3SAS_FMT 1937 "%s: buffer_type(0x%02x) is already released\n", 1938 ioc->name, __func__, 1939 buffer_type); 1940 return 0; 1941 } 1942 1943 request_data = ioc->diag_buffer[buffer_type]; 1944 1945 if (!request_data) { 1946 pr_err(MPT3SAS_FMT 1947 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1948 ioc->name, __func__, buffer_type); 1949 return -ENOMEM; 1950 } 1951 1952 /* buffers were released by due to host reset */ 1953 if ((ioc->diag_buffer_status[buffer_type] & 1954 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 1955 ioc->diag_buffer_status[buffer_type] |= 1956 MPT3_DIAG_BUFFER_IS_RELEASED; 1957 ioc->diag_buffer_status[buffer_type] &= 1958 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 1959 pr_err(MPT3SAS_FMT 1960 "%s: buffer_type(0x%02x) was released due to host reset\n", 1961 ioc->name, __func__, buffer_type); 1962 return 0; 1963 } 1964 1965 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 1966 1967 if (issue_reset) 1968 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1969 1970 return rc; 1971 } 1972 1973 /** 1974 * _ctl_diag_read_buffer - request for copy of the diag buffer 1975 * @ioc: per adapter object 1976 * @arg - user space buffer containing ioctl content 1977 */ 1978 static long 1979 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1980 { 1981 struct mpt3_diag_read_buffer karg; 1982 struct mpt3_diag_read_buffer __user *uarg = arg; 1983 void *request_data, *diag_data; 1984 Mpi2DiagBufferPostRequest_t *mpi_request; 1985 Mpi2DiagBufferPostReply_t *mpi_reply; 1986 int rc, i; 1987 u8 buffer_type; 1988 unsigned long request_size, copy_size; 1989 u16 smid; 1990 u16 ioc_status; 1991 u8 issue_reset = 0; 1992 1993 if (copy_from_user(&karg, arg, sizeof(karg))) { 1994 pr_err("failure at %s:%d/%s()!\n", 1995 __FILE__, __LINE__, __func__); 1996 return -EFAULT; 1997 } 1998 1999 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2000 __func__)); 2001 2002 buffer_type = karg.unique_id & 0x000000ff; 2003 if (!_ctl_diag_capability(ioc, buffer_type)) { 2004 pr_err(MPT3SAS_FMT 2005 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2006 ioc->name, __func__, buffer_type); 2007 return -EPERM; 2008 } 2009 2010 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2011 pr_err(MPT3SAS_FMT 2012 "%s: unique_id(0x%08x) is not registered\n", 2013 ioc->name, __func__, karg.unique_id); 2014 return -EINVAL; 2015 } 2016 2017 request_data = ioc->diag_buffer[buffer_type]; 2018 if (!request_data) { 2019 pr_err(MPT3SAS_FMT 2020 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2021 ioc->name, __func__, buffer_type); 2022 return -ENOMEM; 2023 } 2024 2025 request_size = ioc->diag_buffer_sz[buffer_type]; 2026 2027 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2028 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2029 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2030 __func__); 2031 return -EINVAL; 2032 } 2033 2034 if (karg.starting_offset > request_size) 2035 return -EINVAL; 2036 2037 diag_data = (void *)(request_data + karg.starting_offset); 2038 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2039 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2040 ioc->name, __func__, 2041 diag_data, karg.starting_offset, karg.bytes_to_read)); 2042 2043 /* Truncate data on requests that are too large */ 2044 if ((diag_data + karg.bytes_to_read < diag_data) || 2045 (diag_data + karg.bytes_to_read > request_data + request_size)) 2046 copy_size = request_size - karg.starting_offset; 2047 else 2048 copy_size = karg.bytes_to_read; 2049 2050 if (copy_to_user((void __user *)uarg->diagnostic_data, 2051 diag_data, copy_size)) { 2052 pr_err(MPT3SAS_FMT 2053 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2054 ioc->name, __func__, diag_data); 2055 return -EFAULT; 2056 } 2057 2058 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2059 return 0; 2060 2061 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2062 "%s: Reregister buffer_type(0x%02x)\n", 2063 ioc->name, __func__, buffer_type)); 2064 if ((ioc->diag_buffer_status[buffer_type] & 2065 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2066 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2067 "%s: buffer_type(0x%02x) is still registered\n", 2068 ioc->name, __func__, buffer_type)); 2069 return 0; 2070 } 2071 /* Get a free request frame and save the message context. 2072 */ 2073 2074 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2075 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2076 ioc->name, __func__); 2077 rc = -EAGAIN; 2078 goto out; 2079 } 2080 2081 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2082 if (!smid) { 2083 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2084 ioc->name, __func__); 2085 rc = -EAGAIN; 2086 goto out; 2087 } 2088 2089 rc = 0; 2090 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2091 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2092 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2093 ioc->ctl_cmds.smid = smid; 2094 2095 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2096 mpi_request->BufferType = buffer_type; 2097 mpi_request->BufferLength = 2098 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2099 mpi_request->BufferAddress = 2100 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2101 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2102 mpi_request->ProductSpecific[i] = 2103 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2104 mpi_request->VF_ID = 0; /* TODO */ 2105 mpi_request->VP_ID = 0; 2106 2107 init_completion(&ioc->ctl_cmds.done); 2108 mpt3sas_base_put_smid_default(ioc, smid); 2109 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2110 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2111 2112 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2113 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 2114 __func__); 2115 _debug_dump_mf(mpi_request, 2116 sizeof(Mpi2DiagBufferPostRequest_t)/4); 2117 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 2118 issue_reset = 1; 2119 goto issue_host_reset; 2120 } 2121 2122 /* process the completed Reply Message Frame */ 2123 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2124 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2125 ioc->name, __func__); 2126 rc = -EFAULT; 2127 goto out; 2128 } 2129 2130 mpi_reply = ioc->ctl_cmds.reply; 2131 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2132 2133 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2134 ioc->diag_buffer_status[buffer_type] |= 2135 MPT3_DIAG_BUFFER_IS_REGISTERED; 2136 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2137 ioc->name, __func__)); 2138 } else { 2139 pr_info(MPT3SAS_FMT 2140 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2141 ioc->name, __func__, 2142 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2143 rc = -EFAULT; 2144 } 2145 2146 issue_host_reset: 2147 if (issue_reset) 2148 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2149 2150 out: 2151 2152 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2153 return rc; 2154 } 2155 2156 2157 2158 #ifdef CONFIG_COMPAT 2159 /** 2160 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2161 * @ioc: per adapter object 2162 * @cmd - ioctl opcode 2163 * @arg - (struct mpt3_ioctl_command32) 2164 * 2165 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2166 */ 2167 static long 2168 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2169 void __user *arg) 2170 { 2171 struct mpt3_ioctl_command32 karg32; 2172 struct mpt3_ioctl_command32 __user *uarg; 2173 struct mpt3_ioctl_command karg; 2174 2175 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2176 return -EINVAL; 2177 2178 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2179 2180 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2181 pr_err("failure at %s:%d/%s()!\n", 2182 __FILE__, __LINE__, __func__); 2183 return -EFAULT; 2184 } 2185 2186 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2187 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2188 karg.hdr.port_number = karg32.hdr.port_number; 2189 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2190 karg.timeout = karg32.timeout; 2191 karg.max_reply_bytes = karg32.max_reply_bytes; 2192 karg.data_in_size = karg32.data_in_size; 2193 karg.data_out_size = karg32.data_out_size; 2194 karg.max_sense_bytes = karg32.max_sense_bytes; 2195 karg.data_sge_offset = karg32.data_sge_offset; 2196 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2197 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2198 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2199 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2200 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2201 } 2202 #endif 2203 2204 /** 2205 * _ctl_ioctl_main - main ioctl entry point 2206 * @file - (struct file) 2207 * @cmd - ioctl opcode 2208 * @arg - user space data buffer 2209 * @compat - handles 32 bit applications in 64bit os 2210 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2211 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 2212 */ 2213 static long 2214 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2215 u8 compat, u16 mpi_version) 2216 { 2217 struct MPT3SAS_ADAPTER *ioc; 2218 struct mpt3_ioctl_header ioctl_header; 2219 enum block_state state; 2220 long ret = -EINVAL; 2221 2222 /* get IOCTL header */ 2223 if (copy_from_user(&ioctl_header, (char __user *)arg, 2224 sizeof(struct mpt3_ioctl_header))) { 2225 pr_err("failure at %s:%d/%s()!\n", 2226 __FILE__, __LINE__, __func__); 2227 return -EFAULT; 2228 } 2229 2230 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2231 &ioc, mpi_version) == -1 || !ioc) 2232 return -ENODEV; 2233 2234 /* pci_access_mutex lock acquired by ioctl path */ 2235 mutex_lock(&ioc->pci_access_mutex); 2236 2237 if (ioc->shost_recovery || ioc->pci_error_recovery || 2238 ioc->is_driver_loading || ioc->remove_host) { 2239 ret = -EAGAIN; 2240 goto out_unlock_pciaccess; 2241 } 2242 2243 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2244 if (state == NON_BLOCKING) { 2245 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2246 ret = -EAGAIN; 2247 goto out_unlock_pciaccess; 2248 } 2249 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2250 ret = -ERESTARTSYS; 2251 goto out_unlock_pciaccess; 2252 } 2253 2254 2255 switch (cmd) { 2256 case MPT3IOCINFO: 2257 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2258 ret = _ctl_getiocinfo(ioc, arg); 2259 break; 2260 #ifdef CONFIG_COMPAT 2261 case MPT3COMMAND32: 2262 #endif 2263 case MPT3COMMAND: 2264 { 2265 struct mpt3_ioctl_command __user *uarg; 2266 struct mpt3_ioctl_command karg; 2267 2268 #ifdef CONFIG_COMPAT 2269 if (compat) { 2270 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2271 break; 2272 } 2273 #endif 2274 if (copy_from_user(&karg, arg, sizeof(karg))) { 2275 pr_err("failure at %s:%d/%s()!\n", 2276 __FILE__, __LINE__, __func__); 2277 ret = -EFAULT; 2278 break; 2279 } 2280 2281 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2282 uarg = arg; 2283 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2284 } 2285 break; 2286 } 2287 case MPT3EVENTQUERY: 2288 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2289 ret = _ctl_eventquery(ioc, arg); 2290 break; 2291 case MPT3EVENTENABLE: 2292 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2293 ret = _ctl_eventenable(ioc, arg); 2294 break; 2295 case MPT3EVENTREPORT: 2296 ret = _ctl_eventreport(ioc, arg); 2297 break; 2298 case MPT3HARDRESET: 2299 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2300 ret = _ctl_do_reset(ioc, arg); 2301 break; 2302 case MPT3BTDHMAPPING: 2303 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2304 ret = _ctl_btdh_mapping(ioc, arg); 2305 break; 2306 case MPT3DIAGREGISTER: 2307 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2308 ret = _ctl_diag_register(ioc, arg); 2309 break; 2310 case MPT3DIAGUNREGISTER: 2311 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2312 ret = _ctl_diag_unregister(ioc, arg); 2313 break; 2314 case MPT3DIAGQUERY: 2315 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2316 ret = _ctl_diag_query(ioc, arg); 2317 break; 2318 case MPT3DIAGRELEASE: 2319 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2320 ret = _ctl_diag_release(ioc, arg); 2321 break; 2322 case MPT3DIAGREADBUFFER: 2323 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2324 ret = _ctl_diag_read_buffer(ioc, arg); 2325 break; 2326 default: 2327 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2328 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2329 break; 2330 } 2331 2332 mutex_unlock(&ioc->ctl_cmds.mutex); 2333 out_unlock_pciaccess: 2334 mutex_unlock(&ioc->pci_access_mutex); 2335 return ret; 2336 } 2337 2338 /** 2339 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 2340 * @file - (struct file) 2341 * @cmd - ioctl opcode 2342 * @arg - 2343 */ 2344 static long 2345 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2346 { 2347 long ret; 2348 2349 /* pass MPI25_VERSION | MPI26_VERSION value, 2350 * to indicate that this ioctl cmd 2351 * came from mpt3ctl ioctl device. 2352 */ 2353 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, 2354 MPI25_VERSION | MPI26_VERSION); 2355 return ret; 2356 } 2357 2358 /** 2359 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 2360 * @file - (struct file) 2361 * @cmd - ioctl opcode 2362 * @arg - 2363 */ 2364 static long 2365 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2366 { 2367 long ret; 2368 2369 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 2370 * came from mpt2ctl ioctl device. 2371 */ 2372 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 2373 return ret; 2374 } 2375 #ifdef CONFIG_COMPAT 2376 /** 2377 *_ ctl_ioctl_compat - main ioctl entry point (compat) 2378 * @file - 2379 * @cmd - 2380 * @arg - 2381 * 2382 * This routine handles 32 bit applications in 64bit os. 2383 */ 2384 static long 2385 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2386 { 2387 long ret; 2388 2389 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, 2390 MPI25_VERSION | MPI26_VERSION); 2391 return ret; 2392 } 2393 2394 /** 2395 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 2396 * @file - 2397 * @cmd - 2398 * @arg - 2399 * 2400 * This routine handles 32 bit applications in 64bit os. 2401 */ 2402 static long 2403 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2404 { 2405 long ret; 2406 2407 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 2408 return ret; 2409 } 2410 #endif 2411 2412 /* scsi host attributes */ 2413 /** 2414 * _ctl_version_fw_show - firmware version 2415 * @cdev - pointer to embedded class device 2416 * @buf - the buffer returned 2417 * 2418 * A sysfs 'read-only' shost attribute. 2419 */ 2420 static ssize_t 2421 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, 2422 char *buf) 2423 { 2424 struct Scsi_Host *shost = class_to_shost(cdev); 2425 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2426 2427 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2428 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2429 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2430 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2431 ioc->facts.FWVersion.Word & 0x000000FF); 2432 } 2433 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); 2434 2435 /** 2436 * _ctl_version_bios_show - bios version 2437 * @cdev - pointer to embedded class device 2438 * @buf - the buffer returned 2439 * 2440 * A sysfs 'read-only' shost attribute. 2441 */ 2442 static ssize_t 2443 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, 2444 char *buf) 2445 { 2446 struct Scsi_Host *shost = class_to_shost(cdev); 2447 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2448 2449 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2450 2451 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2452 (version & 0xFF000000) >> 24, 2453 (version & 0x00FF0000) >> 16, 2454 (version & 0x0000FF00) >> 8, 2455 version & 0x000000FF); 2456 } 2457 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); 2458 2459 /** 2460 * _ctl_version_mpi_show - MPI (message passing interface) version 2461 * @cdev - pointer to embedded class device 2462 * @buf - the buffer returned 2463 * 2464 * A sysfs 'read-only' shost attribute. 2465 */ 2466 static ssize_t 2467 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, 2468 char *buf) 2469 { 2470 struct Scsi_Host *shost = class_to_shost(cdev); 2471 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2472 2473 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 2474 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 2475 } 2476 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); 2477 2478 /** 2479 * _ctl_version_product_show - product name 2480 * @cdev - pointer to embedded class device 2481 * @buf - the buffer returned 2482 * 2483 * A sysfs 'read-only' shost attribute. 2484 */ 2485 static ssize_t 2486 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, 2487 char *buf) 2488 { 2489 struct Scsi_Host *shost = class_to_shost(cdev); 2490 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2491 2492 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 2493 } 2494 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); 2495 2496 /** 2497 * _ctl_version_nvdata_persistent_show - ndvata persistent version 2498 * @cdev - pointer to embedded class device 2499 * @buf - the buffer returned 2500 * 2501 * A sysfs 'read-only' shost attribute. 2502 */ 2503 static ssize_t 2504 _ctl_version_nvdata_persistent_show(struct device *cdev, 2505 struct device_attribute *attr, char *buf) 2506 { 2507 struct Scsi_Host *shost = class_to_shost(cdev); 2508 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2509 2510 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2511 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2512 } 2513 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2514 _ctl_version_nvdata_persistent_show, NULL); 2515 2516 /** 2517 * _ctl_version_nvdata_default_show - nvdata default version 2518 * @cdev - pointer to embedded class device 2519 * @buf - the buffer returned 2520 * 2521 * A sysfs 'read-only' shost attribute. 2522 */ 2523 static ssize_t 2524 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute 2525 *attr, char *buf) 2526 { 2527 struct Scsi_Host *shost = class_to_shost(cdev); 2528 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2529 2530 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2531 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2532 } 2533 static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2534 _ctl_version_nvdata_default_show, NULL); 2535 2536 /** 2537 * _ctl_board_name_show - board name 2538 * @cdev - pointer to embedded class device 2539 * @buf - the buffer returned 2540 * 2541 * A sysfs 'read-only' shost attribute. 2542 */ 2543 static ssize_t 2544 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, 2545 char *buf) 2546 { 2547 struct Scsi_Host *shost = class_to_shost(cdev); 2548 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2549 2550 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 2551 } 2552 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); 2553 2554 /** 2555 * _ctl_board_assembly_show - board assembly name 2556 * @cdev - pointer to embedded class device 2557 * @buf - the buffer returned 2558 * 2559 * A sysfs 'read-only' shost attribute. 2560 */ 2561 static ssize_t 2562 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, 2563 char *buf) 2564 { 2565 struct Scsi_Host *shost = class_to_shost(cdev); 2566 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2567 2568 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 2569 } 2570 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); 2571 2572 /** 2573 * _ctl_board_tracer_show - board tracer number 2574 * @cdev - pointer to embedded class device 2575 * @buf - the buffer returned 2576 * 2577 * A sysfs 'read-only' shost attribute. 2578 */ 2579 static ssize_t 2580 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, 2581 char *buf) 2582 { 2583 struct Scsi_Host *shost = class_to_shost(cdev); 2584 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2585 2586 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 2587 } 2588 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); 2589 2590 /** 2591 * _ctl_io_delay_show - io missing delay 2592 * @cdev - pointer to embedded class device 2593 * @buf - the buffer returned 2594 * 2595 * This is for firmware implemention for deboucing device 2596 * removal events. 2597 * 2598 * A sysfs 'read-only' shost attribute. 2599 */ 2600 static ssize_t 2601 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, 2602 char *buf) 2603 { 2604 struct Scsi_Host *shost = class_to_shost(cdev); 2605 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2606 2607 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 2608 } 2609 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); 2610 2611 /** 2612 * _ctl_device_delay_show - device missing delay 2613 * @cdev - pointer to embedded class device 2614 * @buf - the buffer returned 2615 * 2616 * This is for firmware implemention for deboucing device 2617 * removal events. 2618 * 2619 * A sysfs 'read-only' shost attribute. 2620 */ 2621 static ssize_t 2622 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, 2623 char *buf) 2624 { 2625 struct Scsi_Host *shost = class_to_shost(cdev); 2626 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2627 2628 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 2629 } 2630 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); 2631 2632 /** 2633 * _ctl_fw_queue_depth_show - global credits 2634 * @cdev - pointer to embedded class device 2635 * @buf - the buffer returned 2636 * 2637 * This is firmware queue depth limit 2638 * 2639 * A sysfs 'read-only' shost attribute. 2640 */ 2641 static ssize_t 2642 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 2643 char *buf) 2644 { 2645 struct Scsi_Host *shost = class_to_shost(cdev); 2646 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2647 2648 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 2649 } 2650 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); 2651 2652 /** 2653 * _ctl_sas_address_show - sas address 2654 * @cdev - pointer to embedded class device 2655 * @buf - the buffer returned 2656 * 2657 * This is the controller sas address 2658 * 2659 * A sysfs 'read-only' shost attribute. 2660 */ 2661 static ssize_t 2662 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, 2663 char *buf) 2664 2665 { 2666 struct Scsi_Host *shost = class_to_shost(cdev); 2667 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2668 2669 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 2670 (unsigned long long)ioc->sas_hba.sas_address); 2671 } 2672 static DEVICE_ATTR(host_sas_address, S_IRUGO, 2673 _ctl_host_sas_address_show, NULL); 2674 2675 /** 2676 * _ctl_logging_level_show - logging level 2677 * @cdev - pointer to embedded class device 2678 * @buf - the buffer returned 2679 * 2680 * A sysfs 'read/write' shost attribute. 2681 */ 2682 static ssize_t 2683 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, 2684 char *buf) 2685 { 2686 struct Scsi_Host *shost = class_to_shost(cdev); 2687 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2688 2689 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 2690 } 2691 static ssize_t 2692 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, 2693 const char *buf, size_t count) 2694 { 2695 struct Scsi_Host *shost = class_to_shost(cdev); 2696 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2697 int val = 0; 2698 2699 if (sscanf(buf, "%x", &val) != 1) 2700 return -EINVAL; 2701 2702 ioc->logging_level = val; 2703 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2704 ioc->logging_level); 2705 return strlen(buf); 2706 } 2707 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2708 _ctl_logging_level_store); 2709 2710 /** 2711 * _ctl_fwfault_debug_show - show/store fwfault_debug 2712 * @cdev - pointer to embedded class device 2713 * @buf - the buffer returned 2714 * 2715 * mpt3sas_fwfault_debug is command line option 2716 * A sysfs 'read/write' shost attribute. 2717 */ 2718 static ssize_t 2719 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 2720 char *buf) 2721 { 2722 struct Scsi_Host *shost = class_to_shost(cdev); 2723 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2724 2725 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 2726 } 2727 static ssize_t 2728 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 2729 const char *buf, size_t count) 2730 { 2731 struct Scsi_Host *shost = class_to_shost(cdev); 2732 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2733 int val = 0; 2734 2735 if (sscanf(buf, "%d", &val) != 1) 2736 return -EINVAL; 2737 2738 ioc->fwfault_debug = val; 2739 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2740 ioc->fwfault_debug); 2741 return strlen(buf); 2742 } 2743 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2744 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2745 2746 /** 2747 * _ctl_ioc_reset_count_show - ioc reset count 2748 * @cdev - pointer to embedded class device 2749 * @buf - the buffer returned 2750 * 2751 * This is firmware queue depth limit 2752 * 2753 * A sysfs 'read-only' shost attribute. 2754 */ 2755 static ssize_t 2756 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 2757 char *buf) 2758 { 2759 struct Scsi_Host *shost = class_to_shost(cdev); 2760 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2761 2762 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 2763 } 2764 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); 2765 2766 /** 2767 * _ctl_ioc_reply_queue_count_show - number of reply queues 2768 * @cdev - pointer to embedded class device 2769 * @buf - the buffer returned 2770 * 2771 * This is number of reply queues 2772 * 2773 * A sysfs 'read-only' shost attribute. 2774 */ 2775 static ssize_t 2776 _ctl_ioc_reply_queue_count_show(struct device *cdev, 2777 struct device_attribute *attr, char *buf) 2778 { 2779 u8 reply_queue_count; 2780 struct Scsi_Host *shost = class_to_shost(cdev); 2781 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2782 2783 if ((ioc->facts.IOCCapabilities & 2784 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 2785 reply_queue_count = ioc->reply_queue_count; 2786 else 2787 reply_queue_count = 1; 2788 2789 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 2790 } 2791 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, 2792 NULL); 2793 2794 /** 2795 * _ctl_BRM_status_show - Backup Rail Monitor Status 2796 * @cdev - pointer to embedded class device 2797 * @buf - the buffer returned 2798 * 2799 * This is number of reply queues 2800 * 2801 * A sysfs 'read-only' shost attribute. 2802 */ 2803 static ssize_t 2804 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, 2805 char *buf) 2806 { 2807 struct Scsi_Host *shost = class_to_shost(cdev); 2808 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2809 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; 2810 Mpi2ConfigReply_t mpi_reply; 2811 u16 backup_rail_monitor_status = 0; 2812 u16 ioc_status; 2813 int sz; 2814 ssize_t rc = 0; 2815 2816 if (!ioc->is_warpdrive) { 2817 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2818 " warpdrive\n", ioc->name, __func__); 2819 goto out; 2820 } 2821 /* pci_access_mutex lock acquired by sysfs show path */ 2822 mutex_lock(&ioc->pci_access_mutex); 2823 if (ioc->pci_error_recovery || ioc->remove_host) { 2824 mutex_unlock(&ioc->pci_access_mutex); 2825 return 0; 2826 } 2827 2828 /* allocate upto GPIOVal 36 entries */ 2829 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2830 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2831 if (!io_unit_pg3) { 2832 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2833 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2834 goto out; 2835 } 2836 2837 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2838 0) { 2839 pr_err(MPT3SAS_FMT 2840 "%s: failed reading iounit_pg3\n", ioc->name, 2841 __func__); 2842 goto out; 2843 } 2844 2845 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2846 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2847 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2848 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2849 goto out; 2850 } 2851 2852 if (io_unit_pg3->GPIOCount < 25) { 2853 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2854 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2855 io_unit_pg3->GPIOCount); 2856 goto out; 2857 } 2858 2859 /* BRM status is in bit zero of GPIOVal[24] */ 2860 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); 2861 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 2862 2863 out: 2864 kfree(io_unit_pg3); 2865 mutex_unlock(&ioc->pci_access_mutex); 2866 return rc; 2867 } 2868 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2869 2870 struct DIAG_BUFFER_START { 2871 __le32 Size; 2872 __le32 DiagVersion; 2873 u8 BufferType; 2874 u8 Reserved[3]; 2875 __le32 Reserved1; 2876 __le32 Reserved2; 2877 __le32 Reserved3; 2878 }; 2879 2880 /** 2881 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 2882 * @cdev - pointer to embedded class device 2883 * @buf - the buffer returned 2884 * 2885 * A sysfs 'read-only' shost attribute. 2886 */ 2887 static ssize_t 2888 _ctl_host_trace_buffer_size_show(struct device *cdev, 2889 struct device_attribute *attr, char *buf) 2890 { 2891 struct Scsi_Host *shost = class_to_shost(cdev); 2892 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2893 u32 size = 0; 2894 struct DIAG_BUFFER_START *request_data; 2895 2896 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2897 pr_err(MPT3SAS_FMT 2898 "%s: host_trace_buffer is not registered\n", 2899 ioc->name, __func__); 2900 return 0; 2901 } 2902 2903 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2904 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2905 pr_err(MPT3SAS_FMT 2906 "%s: host_trace_buffer is not registered\n", 2907 ioc->name, __func__); 2908 return 0; 2909 } 2910 2911 request_data = (struct DIAG_BUFFER_START *) 2912 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 2913 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 2914 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 2915 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 2916 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 2917 size = le32_to_cpu(request_data->Size); 2918 2919 ioc->ring_buffer_sz = size; 2920 return snprintf(buf, PAGE_SIZE, "%d\n", size); 2921 } 2922 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, 2923 _ctl_host_trace_buffer_size_show, NULL); 2924 2925 /** 2926 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) 2927 * @cdev - pointer to embedded class device 2928 * @buf - the buffer returned 2929 * 2930 * A sysfs 'read/write' shost attribute. 2931 * 2932 * You will only be able to read 4k bytes of ring buffer at a time. 2933 * In order to read beyond 4k bytes, you will have to write out the 2934 * offset to the same attribute, it will move the pointer. 2935 */ 2936 static ssize_t 2937 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 2938 char *buf) 2939 { 2940 struct Scsi_Host *shost = class_to_shost(cdev); 2941 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2942 void *request_data; 2943 u32 size; 2944 2945 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2946 pr_err(MPT3SAS_FMT 2947 "%s: host_trace_buffer is not registered\n", 2948 ioc->name, __func__); 2949 return 0; 2950 } 2951 2952 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2953 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2954 pr_err(MPT3SAS_FMT 2955 "%s: host_trace_buffer is not registered\n", 2956 ioc->name, __func__); 2957 return 0; 2958 } 2959 2960 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 2961 return 0; 2962 2963 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 2964 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2965 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 2966 memcpy(buf, request_data, size); 2967 return size; 2968 } 2969 2970 static ssize_t 2971 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 2972 const char *buf, size_t count) 2973 { 2974 struct Scsi_Host *shost = class_to_shost(cdev); 2975 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2976 int val = 0; 2977 2978 if (sscanf(buf, "%d", &val) != 1) 2979 return -EINVAL; 2980 2981 ioc->ring_buffer_offset = val; 2982 return strlen(buf); 2983 } 2984 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, 2985 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); 2986 2987 2988 /*****************************************/ 2989 2990 /** 2991 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) 2992 * @cdev - pointer to embedded class device 2993 * @buf - the buffer returned 2994 * 2995 * A sysfs 'read/write' shost attribute. 2996 * 2997 * This is a mechnism to post/release host_trace_buffers 2998 */ 2999 static ssize_t 3000 _ctl_host_trace_buffer_enable_show(struct device *cdev, 3001 struct device_attribute *attr, char *buf) 3002 { 3003 struct Scsi_Host *shost = class_to_shost(cdev); 3004 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3005 3006 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 3007 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3008 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3009 return snprintf(buf, PAGE_SIZE, "off\n"); 3010 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3011 MPT3_DIAG_BUFFER_IS_RELEASED)) 3012 return snprintf(buf, PAGE_SIZE, "release\n"); 3013 else 3014 return snprintf(buf, PAGE_SIZE, "post\n"); 3015 } 3016 3017 static ssize_t 3018 _ctl_host_trace_buffer_enable_store(struct device *cdev, 3019 struct device_attribute *attr, const char *buf, size_t count) 3020 { 3021 struct Scsi_Host *shost = class_to_shost(cdev); 3022 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3023 char str[10] = ""; 3024 struct mpt3_diag_register diag_register; 3025 u8 issue_reset = 0; 3026 3027 /* don't allow post/release occurr while recovery is active */ 3028 if (ioc->shost_recovery || ioc->remove_host || 3029 ioc->pci_error_recovery || ioc->is_driver_loading) 3030 return -EBUSY; 3031 3032 if (sscanf(buf, "%9s", str) != 1) 3033 return -EINVAL; 3034 3035 if (!strcmp(str, "post")) { 3036 /* exit out if host buffers are already posted */ 3037 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3038 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3039 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3040 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3041 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3042 goto out; 3043 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3044 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3045 ioc->name); 3046 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3047 diag_register.requested_buffer_size = (1024 * 1024); 3048 diag_register.unique_id = 0x7075900; 3049 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3050 _ctl_diag_register_2(ioc, &diag_register); 3051 } else if (!strcmp(str, "release")) { 3052 /* exit out if host buffers are already released */ 3053 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3054 goto out; 3055 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3056 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3057 goto out; 3058 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3059 MPT3_DIAG_BUFFER_IS_RELEASED)) 3060 goto out; 3061 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3062 ioc->name); 3063 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3064 &issue_reset); 3065 } 3066 3067 out: 3068 return strlen(buf); 3069 } 3070 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, 3071 _ctl_host_trace_buffer_enable_show, 3072 _ctl_host_trace_buffer_enable_store); 3073 3074 /*********** diagnostic trigger suppport *********************************/ 3075 3076 /** 3077 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute 3078 * @cdev - pointer to embedded class device 3079 * @buf - the buffer returned 3080 * 3081 * A sysfs 'read/write' shost attribute. 3082 */ 3083 static ssize_t 3084 _ctl_diag_trigger_master_show(struct device *cdev, 3085 struct device_attribute *attr, char *buf) 3086 3087 { 3088 struct Scsi_Host *shost = class_to_shost(cdev); 3089 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3090 unsigned long flags; 3091 ssize_t rc; 3092 3093 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3094 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3095 memcpy(buf, &ioc->diag_trigger_master, rc); 3096 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3097 return rc; 3098 } 3099 3100 /** 3101 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute 3102 * @cdev - pointer to embedded class device 3103 * @buf - the buffer returned 3104 * 3105 * A sysfs 'read/write' shost attribute. 3106 */ 3107 static ssize_t 3108 _ctl_diag_trigger_master_store(struct device *cdev, 3109 struct device_attribute *attr, const char *buf, size_t count) 3110 3111 { 3112 struct Scsi_Host *shost = class_to_shost(cdev); 3113 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3114 unsigned long flags; 3115 ssize_t rc; 3116 3117 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3118 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3119 memset(&ioc->diag_trigger_master, 0, 3120 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3121 memcpy(&ioc->diag_trigger_master, buf, rc); 3122 ioc->diag_trigger_master.MasterData |= 3123 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3124 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3125 return rc; 3126 } 3127 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, 3128 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); 3129 3130 3131 /** 3132 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute 3133 * @cdev - pointer to embedded class device 3134 * @buf - the buffer returned 3135 * 3136 * A sysfs 'read/write' shost attribute. 3137 */ 3138 static ssize_t 3139 _ctl_diag_trigger_event_show(struct device *cdev, 3140 struct device_attribute *attr, char *buf) 3141 { 3142 struct Scsi_Host *shost = class_to_shost(cdev); 3143 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3144 unsigned long flags; 3145 ssize_t rc; 3146 3147 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3148 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3149 memcpy(buf, &ioc->diag_trigger_event, rc); 3150 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3151 return rc; 3152 } 3153 3154 /** 3155 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute 3156 * @cdev - pointer to embedded class device 3157 * @buf - the buffer returned 3158 * 3159 * A sysfs 'read/write' shost attribute. 3160 */ 3161 static ssize_t 3162 _ctl_diag_trigger_event_store(struct device *cdev, 3163 struct device_attribute *attr, const char *buf, size_t count) 3164 3165 { 3166 struct Scsi_Host *shost = class_to_shost(cdev); 3167 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3168 unsigned long flags; 3169 ssize_t sz; 3170 3171 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3172 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3173 memset(&ioc->diag_trigger_event, 0, 3174 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3175 memcpy(&ioc->diag_trigger_event, buf, sz); 3176 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3177 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3178 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3179 return sz; 3180 } 3181 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, 3182 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); 3183 3184 3185 /** 3186 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3187 * @cdev - pointer to embedded class device 3188 * @buf - the buffer returned 3189 * 3190 * A sysfs 'read/write' shost attribute. 3191 */ 3192 static ssize_t 3193 _ctl_diag_trigger_scsi_show(struct device *cdev, 3194 struct device_attribute *attr, char *buf) 3195 { 3196 struct Scsi_Host *shost = class_to_shost(cdev); 3197 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3198 unsigned long flags; 3199 ssize_t rc; 3200 3201 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3202 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 3203 memcpy(buf, &ioc->diag_trigger_scsi, rc); 3204 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3205 return rc; 3206 } 3207 3208 /** 3209 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute 3210 * @cdev - pointer to embedded class device 3211 * @buf - the buffer returned 3212 * 3213 * A sysfs 'read/write' shost attribute. 3214 */ 3215 static ssize_t 3216 _ctl_diag_trigger_scsi_store(struct device *cdev, 3217 struct device_attribute *attr, const char *buf, size_t count) 3218 { 3219 struct Scsi_Host *shost = class_to_shost(cdev); 3220 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3221 unsigned long flags; 3222 ssize_t sz; 3223 3224 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3225 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3226 memset(&ioc->diag_trigger_scsi, 0, 3227 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3228 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3229 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3230 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3231 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3232 return sz; 3233 } 3234 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, 3235 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); 3236 3237 3238 /** 3239 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute 3240 * @cdev - pointer to embedded class device 3241 * @buf - the buffer returned 3242 * 3243 * A sysfs 'read/write' shost attribute. 3244 */ 3245 static ssize_t 3246 _ctl_diag_trigger_mpi_show(struct device *cdev, 3247 struct device_attribute *attr, char *buf) 3248 { 3249 struct Scsi_Host *shost = class_to_shost(cdev); 3250 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3251 unsigned long flags; 3252 ssize_t rc; 3253 3254 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3255 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 3256 memcpy(buf, &ioc->diag_trigger_mpi, rc); 3257 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3258 return rc; 3259 } 3260 3261 /** 3262 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute 3263 * @cdev - pointer to embedded class device 3264 * @buf - the buffer returned 3265 * 3266 * A sysfs 'read/write' shost attribute. 3267 */ 3268 static ssize_t 3269 _ctl_diag_trigger_mpi_store(struct device *cdev, 3270 struct device_attribute *attr, const char *buf, size_t count) 3271 { 3272 struct Scsi_Host *shost = class_to_shost(cdev); 3273 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3274 unsigned long flags; 3275 ssize_t sz; 3276 3277 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3278 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3279 memset(&ioc->diag_trigger_mpi, 0, 3280 sizeof(ioc->diag_trigger_mpi)); 3281 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3282 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3283 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3284 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3285 return sz; 3286 } 3287 3288 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, 3289 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); 3290 3291 /*********** diagnostic trigger suppport *** END ****************************/ 3292 3293 3294 3295 /*****************************************/ 3296 3297 struct device_attribute *mpt3sas_host_attrs[] = { 3298 &dev_attr_version_fw, 3299 &dev_attr_version_bios, 3300 &dev_attr_version_mpi, 3301 &dev_attr_version_product, 3302 &dev_attr_version_nvdata_persistent, 3303 &dev_attr_version_nvdata_default, 3304 &dev_attr_board_name, 3305 &dev_attr_board_assembly, 3306 &dev_attr_board_tracer, 3307 &dev_attr_io_delay, 3308 &dev_attr_device_delay, 3309 &dev_attr_logging_level, 3310 &dev_attr_fwfault_debug, 3311 &dev_attr_fw_queue_depth, 3312 &dev_attr_host_sas_address, 3313 &dev_attr_ioc_reset_count, 3314 &dev_attr_host_trace_buffer_size, 3315 &dev_attr_host_trace_buffer, 3316 &dev_attr_host_trace_buffer_enable, 3317 &dev_attr_reply_queue_count, 3318 &dev_attr_diag_trigger_master, 3319 &dev_attr_diag_trigger_event, 3320 &dev_attr_diag_trigger_scsi, 3321 &dev_attr_diag_trigger_mpi, 3322 &dev_attr_BRM_status, 3323 NULL, 3324 }; 3325 3326 /* device attributes */ 3327 3328 /** 3329 * _ctl_device_sas_address_show - sas address 3330 * @cdev - pointer to embedded class device 3331 * @buf - the buffer returned 3332 * 3333 * This is the sas address for the target 3334 * 3335 * A sysfs 'read-only' shost attribute. 3336 */ 3337 static ssize_t 3338 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, 3339 char *buf) 3340 { 3341 struct scsi_device *sdev = to_scsi_device(dev); 3342 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3343 3344 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3345 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 3346 } 3347 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); 3348 3349 /** 3350 * _ctl_device_handle_show - device handle 3351 * @cdev - pointer to embedded class device 3352 * @buf - the buffer returned 3353 * 3354 * This is the firmware assigned device handle 3355 * 3356 * A sysfs 'read-only' shost attribute. 3357 */ 3358 static ssize_t 3359 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, 3360 char *buf) 3361 { 3362 struct scsi_device *sdev = to_scsi_device(dev); 3363 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3364 3365 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 3366 sas_device_priv_data->sas_target->handle); 3367 } 3368 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3369 3370 struct device_attribute *mpt3sas_dev_attrs[] = { 3371 &dev_attr_sas_address, 3372 &dev_attr_sas_device_handle, 3373 NULL, 3374 }; 3375 3376 /* file operations table for mpt3ctl device */ 3377 static const struct file_operations ctl_fops = { 3378 .owner = THIS_MODULE, 3379 .unlocked_ioctl = _ctl_ioctl, 3380 .poll = _ctl_poll, 3381 .fasync = _ctl_fasync, 3382 #ifdef CONFIG_COMPAT 3383 .compat_ioctl = _ctl_ioctl_compat, 3384 #endif 3385 }; 3386 3387 /* file operations table for mpt2ctl device */ 3388 static const struct file_operations ctl_gen2_fops = { 3389 .owner = THIS_MODULE, 3390 .unlocked_ioctl = _ctl_mpt2_ioctl, 3391 .poll = _ctl_poll, 3392 .fasync = _ctl_fasync, 3393 #ifdef CONFIG_COMPAT 3394 .compat_ioctl = _ctl_mpt2_ioctl_compat, 3395 #endif 3396 }; 3397 3398 static struct miscdevice ctl_dev = { 3399 .minor = MPT3SAS_MINOR, 3400 .name = MPT3SAS_DEV_NAME, 3401 .fops = &ctl_fops, 3402 }; 3403 3404 static struct miscdevice gen2_ctl_dev = { 3405 .minor = MPT2SAS_MINOR, 3406 .name = MPT2SAS_DEV_NAME, 3407 .fops = &ctl_gen2_fops, 3408 }; 3409 3410 /** 3411 * mpt3sas_ctl_init - main entry point for ctl. 3412 * 3413 */ 3414 void 3415 mpt3sas_ctl_init(ushort hbas_to_enumerate) 3416 { 3417 async_queue = NULL; 3418 3419 /* Don't register mpt3ctl ioctl device if 3420 * hbas_to_enumarate is one. 3421 */ 3422 if (hbas_to_enumerate != 1) 3423 if (misc_register(&ctl_dev) < 0) 3424 pr_err("%s can't register misc device [minor=%d]\n", 3425 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 3426 3427 /* Don't register mpt3ctl ioctl device if 3428 * hbas_to_enumarate is two. 3429 */ 3430 if (hbas_to_enumerate != 2) 3431 if (misc_register(&gen2_ctl_dev) < 0) 3432 pr_err("%s can't register misc device [minor=%d]\n", 3433 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 3434 3435 init_waitqueue_head(&ctl_poll_wait); 3436 } 3437 3438 /** 3439 * mpt3sas_ctl_exit - exit point for ctl 3440 * 3441 */ 3442 void 3443 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 3444 { 3445 struct MPT3SAS_ADAPTER *ioc; 3446 int i; 3447 3448 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 3449 3450 /* free memory associated to diag buffers */ 3451 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 3452 if (!ioc->diag_buffer[i]) 3453 continue; 3454 if (!(ioc->diag_buffer_status[i] & 3455 MPT3_DIAG_BUFFER_IS_REGISTERED)) 3456 continue; 3457 if ((ioc->diag_buffer_status[i] & 3458 MPT3_DIAG_BUFFER_IS_RELEASED)) 3459 continue; 3460 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3461 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3462 ioc->diag_buffer[i] = NULL; 3463 ioc->diag_buffer_status[i] = 0; 3464 } 3465 3466 kfree(ioc->event_log); 3467 } 3468 if (hbas_to_enumerate != 1) 3469 misc_deregister(&ctl_dev); 3470 if (hbas_to_enumerate != 2) 3471 misc_deregister(&gen2_ctl_dev); 3472 } 3473