1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_sas_device_find_by_handle - sas device search 83 * @ioc: per adapter object 84 * @handle: sas device handle (assigned by firmware) 85 * Context: Calling function should acquire ioc->sas_device_lock 86 * 87 * This searches for sas_device based on sas_address, then return sas_device 88 * object. 89 */ 90 static struct _sas_device * 91 _ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 92 { 93 struct _sas_device *sas_device, *r; 94 95 r = NULL; 96 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 97 if (sas_device->handle != handle) 98 continue; 99 r = sas_device; 100 goto out; 101 } 102 103 out: 104 return r; 105 } 106 107 /** 108 * _ctl_display_some_debug - debug routine 109 * @ioc: per adapter object 110 * @smid: system request message index 111 * @calling_function_name: string pass from calling function 112 * @mpi_reply: reply message frame 113 * Context: none. 114 * 115 * Function for displaying debug info helpful when debugging issues 116 * in this module. 117 */ 118 static void 119 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 120 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 121 { 122 Mpi2ConfigRequest_t *mpi_request; 123 char *desc = NULL; 124 125 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 126 return; 127 128 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 129 switch (mpi_request->Function) { 130 case MPI2_FUNCTION_SCSI_IO_REQUEST: 131 { 132 Mpi2SCSIIORequest_t *scsi_request = 133 (Mpi2SCSIIORequest_t *)mpi_request; 134 135 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 136 "scsi_io, cmd(0x%02x), cdb_len(%d)", 137 scsi_request->CDB.CDB32[0], 138 le16_to_cpu(scsi_request->IoFlags) & 0xF); 139 desc = ioc->tmp_string; 140 break; 141 } 142 case MPI2_FUNCTION_SCSI_TASK_MGMT: 143 desc = "task_mgmt"; 144 break; 145 case MPI2_FUNCTION_IOC_INIT: 146 desc = "ioc_init"; 147 break; 148 case MPI2_FUNCTION_IOC_FACTS: 149 desc = "ioc_facts"; 150 break; 151 case MPI2_FUNCTION_CONFIG: 152 { 153 Mpi2ConfigRequest_t *config_request = 154 (Mpi2ConfigRequest_t *)mpi_request; 155 156 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 157 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 158 (config_request->Header.PageType & 159 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 160 config_request->Header.PageNumber); 161 desc = ioc->tmp_string; 162 break; 163 } 164 case MPI2_FUNCTION_PORT_FACTS: 165 desc = "port_facts"; 166 break; 167 case MPI2_FUNCTION_PORT_ENABLE: 168 desc = "port_enable"; 169 break; 170 case MPI2_FUNCTION_EVENT_NOTIFICATION: 171 desc = "event_notification"; 172 break; 173 case MPI2_FUNCTION_FW_DOWNLOAD: 174 desc = "fw_download"; 175 break; 176 case MPI2_FUNCTION_FW_UPLOAD: 177 desc = "fw_upload"; 178 break; 179 case MPI2_FUNCTION_RAID_ACTION: 180 desc = "raid_action"; 181 break; 182 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 183 { 184 Mpi2SCSIIORequest_t *scsi_request = 185 (Mpi2SCSIIORequest_t *)mpi_request; 186 187 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 188 "raid_pass, cmd(0x%02x), cdb_len(%d)", 189 scsi_request->CDB.CDB32[0], 190 le16_to_cpu(scsi_request->IoFlags) & 0xF); 191 desc = ioc->tmp_string; 192 break; 193 } 194 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 195 desc = "sas_iounit_cntl"; 196 break; 197 case MPI2_FUNCTION_SATA_PASSTHROUGH: 198 desc = "sata_pass"; 199 break; 200 case MPI2_FUNCTION_DIAG_BUFFER_POST: 201 desc = "diag_buffer_post"; 202 break; 203 case MPI2_FUNCTION_DIAG_RELEASE: 204 desc = "diag_release"; 205 break; 206 case MPI2_FUNCTION_SMP_PASSTHROUGH: 207 desc = "smp_passthrough"; 208 break; 209 } 210 211 if (!desc) 212 return; 213 214 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 215 ioc->name, calling_function_name, desc, smid); 216 217 if (!mpi_reply) 218 return; 219 220 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 221 pr_info(MPT3SAS_FMT 222 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 223 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 224 le32_to_cpu(mpi_reply->IOCLogInfo)); 225 226 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 227 mpi_request->Function == 228 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 229 Mpi2SCSIIOReply_t *scsi_reply = 230 (Mpi2SCSIIOReply_t *)mpi_reply; 231 struct _sas_device *sas_device = NULL; 232 unsigned long flags; 233 234 spin_lock_irqsave(&ioc->sas_device_lock, flags); 235 sas_device = _ctl_sas_device_find_by_handle(ioc, 236 le16_to_cpu(scsi_reply->DevHandle)); 237 if (sas_device) { 238 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 239 ioc->name, (unsigned long long) 240 sas_device->sas_address, sas_device->phy); 241 pr_warn(MPT3SAS_FMT 242 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 243 ioc->name, (unsigned long long) 244 sas_device->enclosure_logical_id, sas_device->slot); 245 } 246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 247 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 248 pr_info(MPT3SAS_FMT 249 "\tscsi_state(0x%02x), scsi_status" 250 "(0x%02x)\n", ioc->name, 251 scsi_reply->SCSIState, 252 scsi_reply->SCSIStatus); 253 } 254 } 255 256 /** 257 * mpt3sas_ctl_done - ctl module completion routine 258 * @ioc: per adapter object 259 * @smid: system request message index 260 * @msix_index: MSIX table index supplied by the OS 261 * @reply: reply message frame(lower 32bit addr) 262 * Context: none. 263 * 264 * The callback handler when using ioc->ctl_cb_idx. 265 * 266 * Return 1 meaning mf should be freed from _base_interrupt 267 * 0 means the mf is freed from this function. 268 */ 269 u8 270 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 271 u32 reply) 272 { 273 MPI2DefaultReply_t *mpi_reply; 274 Mpi2SCSIIOReply_t *scsiio_reply; 275 const void *sense_data; 276 u32 sz; 277 278 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 279 return 1; 280 if (ioc->ctl_cmds.smid != smid) 281 return 1; 282 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 283 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 284 if (mpi_reply) { 285 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 286 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 287 /* get sense data */ 288 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 289 mpi_reply->Function == 290 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 291 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 292 if (scsiio_reply->SCSIState & 293 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 294 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 295 le32_to_cpu(scsiio_reply->SenseCount)); 296 sense_data = mpt3sas_base_get_sense_buffer(ioc, 297 smid); 298 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 299 } 300 } 301 } 302 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 303 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 304 complete(&ioc->ctl_cmds.done); 305 return 1; 306 } 307 308 /** 309 * _ctl_check_event_type - determines when an event needs logging 310 * @ioc: per adapter object 311 * @event: firmware event 312 * 313 * The bitmask in ioc->event_type[] indicates which events should be 314 * be saved in the driver event_log. This bitmask is set by application. 315 * 316 * Returns 1 when event should be captured, or zero means no match. 317 */ 318 static int 319 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 320 { 321 u16 i; 322 u32 desired_event; 323 324 if (event >= 128 || !event || !ioc->event_log) 325 return 0; 326 327 desired_event = (1 << (event % 32)); 328 if (!desired_event) 329 desired_event = 1; 330 i = event / 32; 331 return desired_event & ioc->event_type[i]; 332 } 333 334 /** 335 * mpt3sas_ctl_add_to_event_log - add event 336 * @ioc: per adapter object 337 * @mpi_reply: reply message frame 338 * 339 * Return nothing. 340 */ 341 void 342 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 343 Mpi2EventNotificationReply_t *mpi_reply) 344 { 345 struct MPT3_IOCTL_EVENTS *event_log; 346 u16 event; 347 int i; 348 u32 sz, event_data_sz; 349 u8 send_aen = 0; 350 351 if (!ioc->event_log) 352 return; 353 354 event = le16_to_cpu(mpi_reply->Event); 355 356 if (_ctl_check_event_type(ioc, event)) { 357 358 /* insert entry into circular event_log */ 359 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 360 event_log = ioc->event_log; 361 event_log[i].event = event; 362 event_log[i].context = ioc->event_context++; 363 364 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 365 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 366 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 367 memcpy(event_log[i].data, mpi_reply->EventData, sz); 368 send_aen = 1; 369 } 370 371 /* This aen_event_read_flag flag is set until the 372 * application has read the event log. 373 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 374 */ 375 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 376 (send_aen && !ioc->aen_event_read_flag)) { 377 ioc->aen_event_read_flag = 1; 378 wake_up_interruptible(&ctl_poll_wait); 379 if (async_queue) 380 kill_fasync(&async_queue, SIGIO, POLL_IN); 381 } 382 } 383 384 /** 385 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 386 * @ioc: per adapter object 387 * @msix_index: MSIX table index supplied by the OS 388 * @reply: reply message frame(lower 32bit addr) 389 * Context: interrupt. 390 * 391 * This function merely adds a new work task into ioc->firmware_event_thread. 392 * The tasks are worked from _firmware_event_work in user context. 393 * 394 * Return 1 meaning mf should be freed from _base_interrupt 395 * 0 means the mf is freed from this function. 396 */ 397 u8 398 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 399 u32 reply) 400 { 401 Mpi2EventNotificationReply_t *mpi_reply; 402 403 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 404 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 405 return 1; 406 } 407 408 /** 409 * _ctl_verify_adapter - validates ioc_number passed from application 410 * @ioc: per adapter object 411 * @iocpp: The ioc pointer is returned in this. 412 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 413 * MPI25_VERSION for mpt3ctl ioctl device. 414 * 415 * Return (-1) means error, else ioc_number. 416 */ 417 static int 418 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 419 int mpi_version) 420 { 421 struct MPT3SAS_ADAPTER *ioc; 422 /* global ioc lock to protect controller on list operations */ 423 spin_lock(&gioc_lock); 424 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 425 if (ioc->id != ioc_number) 426 continue; 427 /* Check whether this ioctl command is from right 428 * ioctl device or not, if not continue the search. 429 */ 430 if (ioc->hba_mpi_version_belonged != mpi_version) 431 continue; 432 spin_unlock(&gioc_lock); 433 *iocpp = ioc; 434 return ioc_number; 435 } 436 spin_unlock(&gioc_lock); 437 *iocpp = NULL; 438 return -1; 439 } 440 441 /** 442 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) 443 * @ioc: per adapter object 444 * @reset_phase: phase 445 * 446 * The handler for doing any required cleanup or initialization. 447 * 448 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 449 * MPT3_IOC_DONE_RESET 450 */ 451 void 452 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 453 { 454 int i; 455 u8 issue_reset; 456 457 switch (reset_phase) { 458 case MPT3_IOC_PRE_RESET: 459 dtmprintk(ioc, pr_info(MPT3SAS_FMT 460 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 461 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 462 if (!(ioc->diag_buffer_status[i] & 463 MPT3_DIAG_BUFFER_IS_REGISTERED)) 464 continue; 465 if ((ioc->diag_buffer_status[i] & 466 MPT3_DIAG_BUFFER_IS_RELEASED)) 467 continue; 468 mpt3sas_send_diag_release(ioc, i, &issue_reset); 469 } 470 break; 471 case MPT3_IOC_AFTER_RESET: 472 dtmprintk(ioc, pr_info(MPT3SAS_FMT 473 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 474 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 475 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 476 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 477 complete(&ioc->ctl_cmds.done); 478 } 479 break; 480 case MPT3_IOC_DONE_RESET: 481 dtmprintk(ioc, pr_info(MPT3SAS_FMT 482 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 483 484 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 485 if (!(ioc->diag_buffer_status[i] & 486 MPT3_DIAG_BUFFER_IS_REGISTERED)) 487 continue; 488 if ((ioc->diag_buffer_status[i] & 489 MPT3_DIAG_BUFFER_IS_RELEASED)) 490 continue; 491 ioc->diag_buffer_status[i] |= 492 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 493 } 494 break; 495 } 496 } 497 498 /** 499 * _ctl_fasync - 500 * @fd - 501 * @filep - 502 * @mode - 503 * 504 * Called when application request fasyn callback handler. 505 */ 506 int 507 _ctl_fasync(int fd, struct file *filep, int mode) 508 { 509 return fasync_helper(fd, filep, mode, &async_queue); 510 } 511 512 /** 513 * _ctl_poll - 514 * @file - 515 * @wait - 516 * 517 */ 518 unsigned int 519 _ctl_poll(struct file *filep, poll_table *wait) 520 { 521 struct MPT3SAS_ADAPTER *ioc; 522 523 poll_wait(filep, &ctl_poll_wait, wait); 524 525 /* global ioc lock to protect controller on list operations */ 526 spin_lock(&gioc_lock); 527 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 528 if (ioc->aen_event_read_flag) { 529 spin_unlock(&gioc_lock); 530 return POLLIN | POLLRDNORM; 531 } 532 } 533 spin_unlock(&gioc_lock); 534 return 0; 535 } 536 537 /** 538 * _ctl_set_task_mid - assign an active smid to tm request 539 * @ioc: per adapter object 540 * @karg - (struct mpt3_ioctl_command) 541 * @tm_request - pointer to mf from user space 542 * 543 * Returns 0 when an smid if found, else fail. 544 * during failure, the reply frame is filled. 545 */ 546 static int 547 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 548 Mpi2SCSITaskManagementRequest_t *tm_request) 549 { 550 u8 found = 0; 551 u16 i; 552 u16 handle; 553 struct scsi_cmnd *scmd; 554 struct MPT3SAS_DEVICE *priv_data; 555 unsigned long flags; 556 Mpi2SCSITaskManagementReply_t *tm_reply; 557 u32 sz; 558 u32 lun; 559 char *desc = NULL; 560 561 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 562 desc = "abort_task"; 563 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 564 desc = "query_task"; 565 else 566 return 0; 567 568 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 569 570 handle = le16_to_cpu(tm_request->DevHandle); 571 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 572 for (i = ioc->scsiio_depth; i && !found; i--) { 573 scmd = ioc->scsi_lookup[i - 1].scmd; 574 if (scmd == NULL || scmd->device == NULL || 575 scmd->device->hostdata == NULL) 576 continue; 577 if (lun != scmd->device->lun) 578 continue; 579 priv_data = scmd->device->hostdata; 580 if (priv_data->sas_target == NULL) 581 continue; 582 if (priv_data->sas_target->handle != handle) 583 continue; 584 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); 585 found = 1; 586 } 587 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 588 589 if (!found) { 590 dctlprintk(ioc, pr_info(MPT3SAS_FMT 591 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 592 ioc->name, 593 desc, le16_to_cpu(tm_request->DevHandle), lun)); 594 tm_reply = ioc->ctl_cmds.reply; 595 tm_reply->DevHandle = tm_request->DevHandle; 596 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 597 tm_reply->TaskType = tm_request->TaskType; 598 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 599 tm_reply->VP_ID = tm_request->VP_ID; 600 tm_reply->VF_ID = tm_request->VF_ID; 601 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 602 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 603 sz)) 604 pr_err("failure at %s:%d/%s()!\n", __FILE__, 605 __LINE__, __func__); 606 return 1; 607 } 608 609 dctlprintk(ioc, pr_info(MPT3SAS_FMT 610 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 611 desc, le16_to_cpu(tm_request->DevHandle), lun, 612 le16_to_cpu(tm_request->TaskMID))); 613 return 0; 614 } 615 616 /** 617 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 618 * @ioc: per adapter object 619 * @karg - (struct mpt3_ioctl_command) 620 * @mf - pointer to mf in user space 621 */ 622 static long 623 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 624 void __user *mf) 625 { 626 MPI2RequestHeader_t *mpi_request = NULL, *request; 627 MPI2DefaultReply_t *mpi_reply; 628 u32 ioc_state; 629 u16 ioc_status; 630 u16 smid; 631 unsigned long timeout, timeleft; 632 u8 issue_reset; 633 u32 sz; 634 void *psge; 635 void *data_out = NULL; 636 dma_addr_t data_out_dma = 0; 637 size_t data_out_sz = 0; 638 void *data_in = NULL; 639 dma_addr_t data_in_dma = 0; 640 size_t data_in_sz = 0; 641 long ret; 642 u16 wait_state_count; 643 644 issue_reset = 0; 645 646 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 647 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 648 ioc->name, __func__); 649 ret = -EAGAIN; 650 goto out; 651 } 652 653 wait_state_count = 0; 654 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 655 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 656 if (wait_state_count++ == 10) { 657 pr_err(MPT3SAS_FMT 658 "%s: failed due to ioc not operational\n", 659 ioc->name, __func__); 660 ret = -EFAULT; 661 goto out; 662 } 663 ssleep(1); 664 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 665 pr_info(MPT3SAS_FMT 666 "%s: waiting for operational state(count=%d)\n", 667 ioc->name, 668 __func__, wait_state_count); 669 } 670 if (wait_state_count) 671 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 672 ioc->name, __func__); 673 674 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 675 if (!mpi_request) { 676 pr_err(MPT3SAS_FMT 677 "%s: failed obtaining a memory for mpi_request\n", 678 ioc->name, __func__); 679 ret = -ENOMEM; 680 goto out; 681 } 682 683 /* Check for overflow and wraparound */ 684 if (karg.data_sge_offset * 4 > ioc->request_sz || 685 karg.data_sge_offset > (UINT_MAX / 4)) { 686 ret = -EINVAL; 687 goto out; 688 } 689 690 /* copy in request message frame from user */ 691 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 692 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 693 __func__); 694 ret = -EFAULT; 695 goto out; 696 } 697 698 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 699 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 700 if (!smid) { 701 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 702 ioc->name, __func__); 703 ret = -EAGAIN; 704 goto out; 705 } 706 } else { 707 708 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 709 if (!smid) { 710 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 711 ioc->name, __func__); 712 ret = -EAGAIN; 713 goto out; 714 } 715 } 716 717 ret = 0; 718 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 719 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 720 request = mpt3sas_base_get_msg_frame(ioc, smid); 721 memcpy(request, mpi_request, karg.data_sge_offset*4); 722 ioc->ctl_cmds.smid = smid; 723 data_out_sz = karg.data_out_size; 724 data_in_sz = karg.data_in_size; 725 726 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 727 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 728 if (!le16_to_cpu(mpi_request->FunctionDependent1) || 729 le16_to_cpu(mpi_request->FunctionDependent1) > 730 ioc->facts.MaxDevHandle) { 731 ret = -EINVAL; 732 mpt3sas_base_free_smid(ioc, smid); 733 goto out; 734 } 735 } 736 737 /* obtain dma-able memory for data transfer */ 738 if (data_out_sz) /* WRITE */ { 739 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 740 &data_out_dma); 741 if (!data_out) { 742 pr_err("failure at %s:%d/%s()!\n", __FILE__, 743 __LINE__, __func__); 744 ret = -ENOMEM; 745 mpt3sas_base_free_smid(ioc, smid); 746 goto out; 747 } 748 if (copy_from_user(data_out, karg.data_out_buf_ptr, 749 data_out_sz)) { 750 pr_err("failure at %s:%d/%s()!\n", __FILE__, 751 __LINE__, __func__); 752 ret = -EFAULT; 753 mpt3sas_base_free_smid(ioc, smid); 754 goto out; 755 } 756 } 757 758 if (data_in_sz) /* READ */ { 759 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 760 &data_in_dma); 761 if (!data_in) { 762 pr_err("failure at %s:%d/%s()!\n", __FILE__, 763 __LINE__, __func__); 764 ret = -ENOMEM; 765 mpt3sas_base_free_smid(ioc, smid); 766 goto out; 767 } 768 } 769 770 psge = (void *)request + (karg.data_sge_offset*4); 771 772 /* send command to firmware */ 773 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 774 775 init_completion(&ioc->ctl_cmds.done); 776 switch (mpi_request->Function) { 777 case MPI2_FUNCTION_SCSI_IO_REQUEST: 778 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 779 { 780 Mpi2SCSIIORequest_t *scsiio_request = 781 (Mpi2SCSIIORequest_t *)request; 782 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 783 scsiio_request->SenseBufferLowAddress = 784 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 785 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 786 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 787 data_in_dma, data_in_sz); 788 789 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 790 mpt3sas_base_put_smid_scsi_io(ioc, smid, 791 le16_to_cpu(mpi_request->FunctionDependent1)); 792 else 793 mpt3sas_base_put_smid_default(ioc, smid); 794 break; 795 } 796 case MPI2_FUNCTION_SCSI_TASK_MGMT: 797 { 798 Mpi2SCSITaskManagementRequest_t *tm_request = 799 (Mpi2SCSITaskManagementRequest_t *)request; 800 801 dtmprintk(ioc, pr_info(MPT3SAS_FMT 802 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 803 ioc->name, 804 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 805 806 if (tm_request->TaskType == 807 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 808 tm_request->TaskType == 809 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 810 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 811 mpt3sas_base_free_smid(ioc, smid); 812 goto out; 813 } 814 } 815 816 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 817 tm_request->DevHandle)); 818 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 819 data_in_dma, data_in_sz); 820 mpt3sas_base_put_smid_hi_priority(ioc, smid); 821 break; 822 } 823 case MPI2_FUNCTION_SMP_PASSTHROUGH: 824 { 825 Mpi2SmpPassthroughRequest_t *smp_request = 826 (Mpi2SmpPassthroughRequest_t *)mpi_request; 827 u8 *data; 828 829 /* ioc determines which port to use */ 830 smp_request->PhysicalPort = 0xFF; 831 if (smp_request->PassthroughFlags & 832 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 833 data = (u8 *)&smp_request->SGL; 834 else { 835 if (unlikely(data_out == NULL)) { 836 pr_err("failure at %s:%d/%s()!\n", 837 __FILE__, __LINE__, __func__); 838 mpt3sas_base_free_smid(ioc, smid); 839 ret = -EINVAL; 840 goto out; 841 } 842 data = data_out; 843 } 844 845 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 846 ioc->ioc_link_reset_in_progress = 1; 847 ioc->ignore_loginfos = 1; 848 } 849 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 850 data_in_sz); 851 mpt3sas_base_put_smid_default(ioc, smid); 852 break; 853 } 854 case MPI2_FUNCTION_SATA_PASSTHROUGH: 855 case MPI2_FUNCTION_FW_DOWNLOAD: 856 case MPI2_FUNCTION_FW_UPLOAD: 857 { 858 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 859 data_in_sz); 860 mpt3sas_base_put_smid_default(ioc, smid); 861 break; 862 } 863 case MPI2_FUNCTION_TOOLBOX: 864 { 865 Mpi2ToolboxCleanRequest_t *toolbox_request = 866 (Mpi2ToolboxCleanRequest_t *)mpi_request; 867 868 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 869 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 870 data_in_dma, data_in_sz); 871 } else { 872 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 873 data_in_dma, data_in_sz); 874 } 875 mpt3sas_base_put_smid_default(ioc, smid); 876 break; 877 } 878 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 879 { 880 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 881 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 882 883 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 884 || sasiounit_request->Operation == 885 MPI2_SAS_OP_PHY_LINK_RESET) { 886 ioc->ioc_link_reset_in_progress = 1; 887 ioc->ignore_loginfos = 1; 888 } 889 /* drop to default case for posting the request */ 890 } 891 default: 892 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 893 data_in_dma, data_in_sz); 894 mpt3sas_base_put_smid_default(ioc, smid); 895 break; 896 } 897 898 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 899 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 900 else 901 timeout = karg.timeout; 902 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 903 timeout*HZ); 904 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 905 Mpi2SCSITaskManagementRequest_t *tm_request = 906 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 907 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 908 tm_request->DevHandle)); 909 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 910 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 911 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 912 ioc->ioc_link_reset_in_progress) { 913 ioc->ioc_link_reset_in_progress = 0; 914 ioc->ignore_loginfos = 0; 915 } 916 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 917 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 918 __func__); 919 _debug_dump_mf(mpi_request, karg.data_sge_offset); 920 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 921 issue_reset = 1; 922 goto issue_host_reset; 923 } 924 925 mpi_reply = ioc->ctl_cmds.reply; 926 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 927 928 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 929 (ioc->logging_level & MPT_DEBUG_TM)) { 930 Mpi2SCSITaskManagementReply_t *tm_reply = 931 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 932 933 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 934 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 935 "TerminationCount(0x%08x)\n", ioc->name, 936 le16_to_cpu(tm_reply->IOCStatus), 937 le32_to_cpu(tm_reply->IOCLogInfo), 938 le32_to_cpu(tm_reply->TerminationCount)); 939 } 940 941 /* copy out xdata to user */ 942 if (data_in_sz) { 943 if (copy_to_user(karg.data_in_buf_ptr, data_in, 944 data_in_sz)) { 945 pr_err("failure at %s:%d/%s()!\n", __FILE__, 946 __LINE__, __func__); 947 ret = -ENODATA; 948 goto out; 949 } 950 } 951 952 /* copy out reply message frame to user */ 953 if (karg.max_reply_bytes) { 954 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 955 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 956 sz)) { 957 pr_err("failure at %s:%d/%s()!\n", __FILE__, 958 __LINE__, __func__); 959 ret = -ENODATA; 960 goto out; 961 } 962 } 963 964 /* copy out sense to user */ 965 if (karg.max_sense_bytes && (mpi_request->Function == 966 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 967 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 968 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); 969 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 970 sz)) { 971 pr_err("failure at %s:%d/%s()!\n", __FILE__, 972 __LINE__, __func__); 973 ret = -ENODATA; 974 goto out; 975 } 976 } 977 978 issue_host_reset: 979 if (issue_reset) { 980 ret = -ENODATA; 981 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 982 mpi_request->Function == 983 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 984 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 985 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 986 ioc->name, 987 le16_to_cpu(mpi_request->FunctionDependent1)); 988 mpt3sas_halt_firmware(ioc); 989 mpt3sas_scsih_issue_tm(ioc, 990 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 991 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 992 TM_MUTEX_ON); 993 } else 994 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 995 FORCE_BIG_HAMMER); 996 } 997 998 out: 999 1000 /* free memory associated with sg buffers */ 1001 if (data_in) 1002 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1003 data_in_dma); 1004 1005 if (data_out) 1006 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1007 data_out_dma); 1008 1009 kfree(mpi_request); 1010 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1011 return ret; 1012 } 1013 1014 /** 1015 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1016 * @ioc: per adapter object 1017 * @arg - user space buffer containing ioctl content 1018 */ 1019 static long 1020 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1021 { 1022 struct mpt3_ioctl_iocinfo karg; 1023 1024 if (copy_from_user(&karg, arg, sizeof(karg))) { 1025 pr_err("failure at %s:%d/%s()!\n", 1026 __FILE__, __LINE__, __func__); 1027 return -EFAULT; 1028 } 1029 1030 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1031 __func__)); 1032 1033 memset(&karg, 0 , sizeof(karg)); 1034 if (ioc->pfacts) 1035 karg.port_number = ioc->pfacts[0].PortNumber; 1036 karg.hw_rev = ioc->pdev->revision; 1037 karg.pci_id = ioc->pdev->device; 1038 karg.subsystem_device = ioc->pdev->subsystem_device; 1039 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1040 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1041 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1042 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1043 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1044 karg.firmware_version = ioc->facts.FWVersion.Word; 1045 strcpy(karg.driver_version, ioc->driver_name); 1046 strcat(karg.driver_version, "-"); 1047 switch (ioc->hba_mpi_version_belonged) { 1048 case MPI2_VERSION: 1049 if (ioc->is_warpdrive) 1050 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1051 else 1052 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1053 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1054 break; 1055 case MPI25_VERSION: 1056 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1057 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1058 break; 1059 } 1060 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1061 1062 if (copy_to_user(arg, &karg, sizeof(karg))) { 1063 pr_err("failure at %s:%d/%s()!\n", 1064 __FILE__, __LINE__, __func__); 1065 return -EFAULT; 1066 } 1067 return 0; 1068 } 1069 1070 /** 1071 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1072 * @ioc: per adapter object 1073 * @arg - user space buffer containing ioctl content 1074 */ 1075 static long 1076 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1077 { 1078 struct mpt3_ioctl_eventquery karg; 1079 1080 if (copy_from_user(&karg, arg, sizeof(karg))) { 1081 pr_err("failure at %s:%d/%s()!\n", 1082 __FILE__, __LINE__, __func__); 1083 return -EFAULT; 1084 } 1085 1086 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1087 __func__)); 1088 1089 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1090 memcpy(karg.event_types, ioc->event_type, 1091 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1092 1093 if (copy_to_user(arg, &karg, sizeof(karg))) { 1094 pr_err("failure at %s:%d/%s()!\n", 1095 __FILE__, __LINE__, __func__); 1096 return -EFAULT; 1097 } 1098 return 0; 1099 } 1100 1101 /** 1102 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1103 * @ioc: per adapter object 1104 * @arg - user space buffer containing ioctl content 1105 */ 1106 static long 1107 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1108 { 1109 struct mpt3_ioctl_eventenable karg; 1110 1111 if (copy_from_user(&karg, arg, sizeof(karg))) { 1112 pr_err("failure at %s:%d/%s()!\n", 1113 __FILE__, __LINE__, __func__); 1114 return -EFAULT; 1115 } 1116 1117 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1118 __func__)); 1119 1120 memcpy(ioc->event_type, karg.event_types, 1121 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1122 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1123 1124 if (ioc->event_log) 1125 return 0; 1126 /* initialize event_log */ 1127 ioc->event_context = 0; 1128 ioc->aen_event_read_flag = 0; 1129 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1130 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1131 if (!ioc->event_log) { 1132 pr_err("failure at %s:%d/%s()!\n", 1133 __FILE__, __LINE__, __func__); 1134 return -ENOMEM; 1135 } 1136 return 0; 1137 } 1138 1139 /** 1140 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1141 * @ioc: per adapter object 1142 * @arg - user space buffer containing ioctl content 1143 */ 1144 static long 1145 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1146 { 1147 struct mpt3_ioctl_eventreport karg; 1148 u32 number_bytes, max_events, max; 1149 struct mpt3_ioctl_eventreport __user *uarg = arg; 1150 1151 if (copy_from_user(&karg, arg, sizeof(karg))) { 1152 pr_err("failure at %s:%d/%s()!\n", 1153 __FILE__, __LINE__, __func__); 1154 return -EFAULT; 1155 } 1156 1157 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1158 __func__)); 1159 1160 number_bytes = karg.hdr.max_data_size - 1161 sizeof(struct mpt3_ioctl_header); 1162 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1163 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1164 1165 /* If fewer than 1 event is requested, there must have 1166 * been some type of error. 1167 */ 1168 if (!max || !ioc->event_log) 1169 return -ENODATA; 1170 1171 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1172 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1173 pr_err("failure at %s:%d/%s()!\n", 1174 __FILE__, __LINE__, __func__); 1175 return -EFAULT; 1176 } 1177 1178 /* reset flag so SIGIO can restart */ 1179 ioc->aen_event_read_flag = 0; 1180 return 0; 1181 } 1182 1183 /** 1184 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1185 * @ioc: per adapter object 1186 * @arg - user space buffer containing ioctl content 1187 */ 1188 static long 1189 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1190 { 1191 struct mpt3_ioctl_diag_reset karg; 1192 int retval; 1193 1194 if (copy_from_user(&karg, arg, sizeof(karg))) { 1195 pr_err("failure at %s:%d/%s()!\n", 1196 __FILE__, __LINE__, __func__); 1197 return -EFAULT; 1198 } 1199 1200 if (ioc->shost_recovery || ioc->pci_error_recovery || 1201 ioc->is_driver_loading) 1202 return -EAGAIN; 1203 1204 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1205 __func__)); 1206 1207 retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1208 FORCE_BIG_HAMMER); 1209 pr_info(MPT3SAS_FMT "host reset: %s\n", 1210 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1211 return 0; 1212 } 1213 1214 /** 1215 * _ctl_btdh_search_sas_device - searching for sas device 1216 * @ioc: per adapter object 1217 * @btdh: btdh ioctl payload 1218 */ 1219 static int 1220 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1221 struct mpt3_ioctl_btdh_mapping *btdh) 1222 { 1223 struct _sas_device *sas_device; 1224 unsigned long flags; 1225 int rc = 0; 1226 1227 if (list_empty(&ioc->sas_device_list)) 1228 return rc; 1229 1230 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1231 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1232 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1233 btdh->handle == sas_device->handle) { 1234 btdh->bus = sas_device->channel; 1235 btdh->id = sas_device->id; 1236 rc = 1; 1237 goto out; 1238 } else if (btdh->bus == sas_device->channel && btdh->id == 1239 sas_device->id && btdh->handle == 0xFFFF) { 1240 btdh->handle = sas_device->handle; 1241 rc = 1; 1242 goto out; 1243 } 1244 } 1245 out: 1246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1247 return rc; 1248 } 1249 1250 /** 1251 * _ctl_btdh_search_raid_device - searching for raid device 1252 * @ioc: per adapter object 1253 * @btdh: btdh ioctl payload 1254 */ 1255 static int 1256 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1257 struct mpt3_ioctl_btdh_mapping *btdh) 1258 { 1259 struct _raid_device *raid_device; 1260 unsigned long flags; 1261 int rc = 0; 1262 1263 if (list_empty(&ioc->raid_device_list)) 1264 return rc; 1265 1266 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1267 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1268 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1269 btdh->handle == raid_device->handle) { 1270 btdh->bus = raid_device->channel; 1271 btdh->id = raid_device->id; 1272 rc = 1; 1273 goto out; 1274 } else if (btdh->bus == raid_device->channel && btdh->id == 1275 raid_device->id && btdh->handle == 0xFFFF) { 1276 btdh->handle = raid_device->handle; 1277 rc = 1; 1278 goto out; 1279 } 1280 } 1281 out: 1282 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1283 return rc; 1284 } 1285 1286 /** 1287 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1288 * @ioc: per adapter object 1289 * @arg - user space buffer containing ioctl content 1290 */ 1291 static long 1292 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1293 { 1294 struct mpt3_ioctl_btdh_mapping karg; 1295 int rc; 1296 1297 if (copy_from_user(&karg, arg, sizeof(karg))) { 1298 pr_err("failure at %s:%d/%s()!\n", 1299 __FILE__, __LINE__, __func__); 1300 return -EFAULT; 1301 } 1302 1303 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1304 __func__)); 1305 1306 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1307 if (!rc) 1308 _ctl_btdh_search_raid_device(ioc, &karg); 1309 1310 if (copy_to_user(arg, &karg, sizeof(karg))) { 1311 pr_err("failure at %s:%d/%s()!\n", 1312 __FILE__, __LINE__, __func__); 1313 return -EFAULT; 1314 } 1315 return 0; 1316 } 1317 1318 /** 1319 * _ctl_diag_capability - return diag buffer capability 1320 * @ioc: per adapter object 1321 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1322 * 1323 * returns 1 when diag buffer support is enabled in firmware 1324 */ 1325 static u8 1326 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1327 { 1328 u8 rc = 0; 1329 1330 switch (buffer_type) { 1331 case MPI2_DIAG_BUF_TYPE_TRACE: 1332 if (ioc->facts.IOCCapabilities & 1333 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1334 rc = 1; 1335 break; 1336 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1337 if (ioc->facts.IOCCapabilities & 1338 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1339 rc = 1; 1340 break; 1341 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1342 if (ioc->facts.IOCCapabilities & 1343 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1344 rc = 1; 1345 } 1346 1347 return rc; 1348 } 1349 1350 1351 /** 1352 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1353 * @ioc: per adapter object 1354 * @diag_register: the diag_register struct passed in from user space 1355 * 1356 */ 1357 static long 1358 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1359 struct mpt3_diag_register *diag_register) 1360 { 1361 int rc, i; 1362 void *request_data = NULL; 1363 dma_addr_t request_data_dma; 1364 u32 request_data_sz = 0; 1365 Mpi2DiagBufferPostRequest_t *mpi_request; 1366 Mpi2DiagBufferPostReply_t *mpi_reply; 1367 u8 buffer_type; 1368 unsigned long timeleft; 1369 u16 smid; 1370 u16 ioc_status; 1371 u32 ioc_state; 1372 u8 issue_reset = 0; 1373 1374 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1375 __func__)); 1376 1377 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1378 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1379 pr_err(MPT3SAS_FMT 1380 "%s: failed due to ioc not operational\n", 1381 ioc->name, __func__); 1382 rc = -EAGAIN; 1383 goto out; 1384 } 1385 1386 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1387 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1388 ioc->name, __func__); 1389 rc = -EAGAIN; 1390 goto out; 1391 } 1392 1393 buffer_type = diag_register->buffer_type; 1394 if (!_ctl_diag_capability(ioc, buffer_type)) { 1395 pr_err(MPT3SAS_FMT 1396 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1397 ioc->name, __func__, buffer_type); 1398 return -EPERM; 1399 } 1400 1401 if (ioc->diag_buffer_status[buffer_type] & 1402 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1403 pr_err(MPT3SAS_FMT 1404 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1405 ioc->name, __func__, 1406 buffer_type); 1407 return -EINVAL; 1408 } 1409 1410 if (diag_register->requested_buffer_size % 4) { 1411 pr_err(MPT3SAS_FMT 1412 "%s: the requested_buffer_size is not 4 byte aligned\n", 1413 ioc->name, __func__); 1414 return -EINVAL; 1415 } 1416 1417 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1418 if (!smid) { 1419 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1420 ioc->name, __func__); 1421 rc = -EAGAIN; 1422 goto out; 1423 } 1424 1425 rc = 0; 1426 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1427 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1428 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1429 ioc->ctl_cmds.smid = smid; 1430 1431 request_data = ioc->diag_buffer[buffer_type]; 1432 request_data_sz = diag_register->requested_buffer_size; 1433 ioc->unique_id[buffer_type] = diag_register->unique_id; 1434 ioc->diag_buffer_status[buffer_type] = 0; 1435 memcpy(ioc->product_specific[buffer_type], 1436 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1437 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1438 1439 if (request_data) { 1440 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1441 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1442 pci_free_consistent(ioc->pdev, 1443 ioc->diag_buffer_sz[buffer_type], 1444 request_data, request_data_dma); 1445 request_data = NULL; 1446 } 1447 } 1448 1449 if (request_data == NULL) { 1450 ioc->diag_buffer_sz[buffer_type] = 0; 1451 ioc->diag_buffer_dma[buffer_type] = 0; 1452 request_data = pci_alloc_consistent( 1453 ioc->pdev, request_data_sz, &request_data_dma); 1454 if (request_data == NULL) { 1455 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1456 " for diag buffers, requested size(%d)\n", 1457 ioc->name, __func__, request_data_sz); 1458 mpt3sas_base_free_smid(ioc, smid); 1459 return -ENOMEM; 1460 } 1461 ioc->diag_buffer[buffer_type] = request_data; 1462 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1463 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1464 } 1465 1466 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1467 mpi_request->BufferType = diag_register->buffer_type; 1468 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1469 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1470 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1471 mpi_request->VF_ID = 0; /* TODO */ 1472 mpi_request->VP_ID = 0; 1473 1474 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1475 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1476 ioc->name, __func__, request_data, 1477 (unsigned long long)request_data_dma, 1478 le32_to_cpu(mpi_request->BufferLength))); 1479 1480 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1481 mpi_request->ProductSpecific[i] = 1482 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1483 1484 init_completion(&ioc->ctl_cmds.done); 1485 mpt3sas_base_put_smid_default(ioc, smid); 1486 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1487 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1488 1489 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1490 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1491 __func__); 1492 _debug_dump_mf(mpi_request, 1493 sizeof(Mpi2DiagBufferPostRequest_t)/4); 1494 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1495 issue_reset = 1; 1496 goto issue_host_reset; 1497 } 1498 1499 /* process the completed Reply Message Frame */ 1500 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1501 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1502 ioc->name, __func__); 1503 rc = -EFAULT; 1504 goto out; 1505 } 1506 1507 mpi_reply = ioc->ctl_cmds.reply; 1508 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1509 1510 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1511 ioc->diag_buffer_status[buffer_type] |= 1512 MPT3_DIAG_BUFFER_IS_REGISTERED; 1513 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1514 ioc->name, __func__)); 1515 } else { 1516 pr_info(MPT3SAS_FMT 1517 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1518 ioc->name, __func__, 1519 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1520 rc = -EFAULT; 1521 } 1522 1523 issue_host_reset: 1524 if (issue_reset) 1525 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1526 FORCE_BIG_HAMMER); 1527 1528 out: 1529 1530 if (rc && request_data) 1531 pci_free_consistent(ioc->pdev, request_data_sz, 1532 request_data, request_data_dma); 1533 1534 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1535 return rc; 1536 } 1537 1538 /** 1539 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1540 * @ioc: per adapter object 1541 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1542 * 1543 * This is called when command line option diag_buffer_enable is enabled 1544 * at driver load time. 1545 */ 1546 void 1547 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1548 { 1549 struct mpt3_diag_register diag_register; 1550 1551 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1552 1553 if (bits_to_register & 1) { 1554 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1555 ioc->name); 1556 ioc->diag_trigger_master.MasterData = 1557 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1558 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1559 /* register for 2MB buffers */ 1560 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1561 diag_register.unique_id = 0x7075900; 1562 _ctl_diag_register_2(ioc, &diag_register); 1563 } 1564 1565 if (bits_to_register & 2) { 1566 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1567 ioc->name); 1568 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1569 /* register for 2MB buffers */ 1570 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1571 diag_register.unique_id = 0x7075901; 1572 _ctl_diag_register_2(ioc, &diag_register); 1573 } 1574 1575 if (bits_to_register & 4) { 1576 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1577 ioc->name); 1578 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1579 /* register for 2MB buffers */ 1580 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1581 diag_register.unique_id = 0x7075901; 1582 _ctl_diag_register_2(ioc, &diag_register); 1583 } 1584 } 1585 1586 /** 1587 * _ctl_diag_register - application register with driver 1588 * @ioc: per adapter object 1589 * @arg - user space buffer containing ioctl content 1590 * 1591 * This will allow the driver to setup any required buffers that will be 1592 * needed by firmware to communicate with the driver. 1593 */ 1594 static long 1595 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1596 { 1597 struct mpt3_diag_register karg; 1598 long rc; 1599 1600 if (copy_from_user(&karg, arg, sizeof(karg))) { 1601 pr_err("failure at %s:%d/%s()!\n", 1602 __FILE__, __LINE__, __func__); 1603 return -EFAULT; 1604 } 1605 1606 rc = _ctl_diag_register_2(ioc, &karg); 1607 return rc; 1608 } 1609 1610 /** 1611 * _ctl_diag_unregister - application unregister with driver 1612 * @ioc: per adapter object 1613 * @arg - user space buffer containing ioctl content 1614 * 1615 * This will allow the driver to cleanup any memory allocated for diag 1616 * messages and to free up any resources. 1617 */ 1618 static long 1619 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1620 { 1621 struct mpt3_diag_unregister karg; 1622 void *request_data; 1623 dma_addr_t request_data_dma; 1624 u32 request_data_sz; 1625 u8 buffer_type; 1626 1627 if (copy_from_user(&karg, arg, sizeof(karg))) { 1628 pr_err("failure at %s:%d/%s()!\n", 1629 __FILE__, __LINE__, __func__); 1630 return -EFAULT; 1631 } 1632 1633 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1634 __func__)); 1635 1636 buffer_type = karg.unique_id & 0x000000ff; 1637 if (!_ctl_diag_capability(ioc, buffer_type)) { 1638 pr_err(MPT3SAS_FMT 1639 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1640 ioc->name, __func__, buffer_type); 1641 return -EPERM; 1642 } 1643 1644 if ((ioc->diag_buffer_status[buffer_type] & 1645 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1646 pr_err(MPT3SAS_FMT 1647 "%s: buffer_type(0x%02x) is not registered\n", 1648 ioc->name, __func__, buffer_type); 1649 return -EINVAL; 1650 } 1651 if ((ioc->diag_buffer_status[buffer_type] & 1652 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1653 pr_err(MPT3SAS_FMT 1654 "%s: buffer_type(0x%02x) has not been released\n", 1655 ioc->name, __func__, buffer_type); 1656 return -EINVAL; 1657 } 1658 1659 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1660 pr_err(MPT3SAS_FMT 1661 "%s: unique_id(0x%08x) is not registered\n", 1662 ioc->name, __func__, karg.unique_id); 1663 return -EINVAL; 1664 } 1665 1666 request_data = ioc->diag_buffer[buffer_type]; 1667 if (!request_data) { 1668 pr_err(MPT3SAS_FMT 1669 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1670 ioc->name, __func__, buffer_type); 1671 return -ENOMEM; 1672 } 1673 1674 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1675 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1676 pci_free_consistent(ioc->pdev, request_data_sz, 1677 request_data, request_data_dma); 1678 ioc->diag_buffer[buffer_type] = NULL; 1679 ioc->diag_buffer_status[buffer_type] = 0; 1680 return 0; 1681 } 1682 1683 /** 1684 * _ctl_diag_query - query relevant info associated with diag buffers 1685 * @ioc: per adapter object 1686 * @arg - user space buffer containing ioctl content 1687 * 1688 * The application will send only buffer_type and unique_id. Driver will 1689 * inspect unique_id first, if valid, fill in all the info. If unique_id is 1690 * 0x00, the driver will return info specified by Buffer Type. 1691 */ 1692 static long 1693 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1694 { 1695 struct mpt3_diag_query karg; 1696 void *request_data; 1697 int i; 1698 u8 buffer_type; 1699 1700 if (copy_from_user(&karg, arg, sizeof(karg))) { 1701 pr_err("failure at %s:%d/%s()!\n", 1702 __FILE__, __LINE__, __func__); 1703 return -EFAULT; 1704 } 1705 1706 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1707 __func__)); 1708 1709 karg.application_flags = 0; 1710 buffer_type = karg.buffer_type; 1711 1712 if (!_ctl_diag_capability(ioc, buffer_type)) { 1713 pr_err(MPT3SAS_FMT 1714 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1715 ioc->name, __func__, buffer_type); 1716 return -EPERM; 1717 } 1718 1719 if ((ioc->diag_buffer_status[buffer_type] & 1720 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1721 pr_err(MPT3SAS_FMT 1722 "%s: buffer_type(0x%02x) is not registered\n", 1723 ioc->name, __func__, buffer_type); 1724 return -EINVAL; 1725 } 1726 1727 if (karg.unique_id & 0xffffff00) { 1728 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1729 pr_err(MPT3SAS_FMT 1730 "%s: unique_id(0x%08x) is not registered\n", 1731 ioc->name, __func__, karg.unique_id); 1732 return -EINVAL; 1733 } 1734 } 1735 1736 request_data = ioc->diag_buffer[buffer_type]; 1737 if (!request_data) { 1738 pr_err(MPT3SAS_FMT 1739 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1740 ioc->name, __func__, buffer_type); 1741 return -ENOMEM; 1742 } 1743 1744 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) 1745 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1746 MPT3_APP_FLAGS_BUFFER_VALID); 1747 else 1748 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1749 MPT3_APP_FLAGS_BUFFER_VALID | 1750 MPT3_APP_FLAGS_FW_BUFFER_ACCESS); 1751 1752 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1753 karg.product_specific[i] = 1754 ioc->product_specific[buffer_type][i]; 1755 1756 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 1757 karg.driver_added_buffer_size = 0; 1758 karg.unique_id = ioc->unique_id[buffer_type]; 1759 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1760 1761 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1762 pr_err(MPT3SAS_FMT 1763 "%s: unable to write mpt3_diag_query data @ %p\n", 1764 ioc->name, __func__, arg); 1765 return -EFAULT; 1766 } 1767 return 0; 1768 } 1769 1770 /** 1771 * mpt3sas_send_diag_release - Diag Release Message 1772 * @ioc: per adapter object 1773 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED 1774 * @issue_reset - specifies whether host reset is required. 1775 * 1776 */ 1777 int 1778 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1779 u8 *issue_reset) 1780 { 1781 Mpi2DiagReleaseRequest_t *mpi_request; 1782 Mpi2DiagReleaseReply_t *mpi_reply; 1783 u16 smid; 1784 u16 ioc_status; 1785 u32 ioc_state; 1786 int rc; 1787 unsigned long timeleft; 1788 1789 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1790 __func__)); 1791 1792 rc = 0; 1793 *issue_reset = 0; 1794 1795 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1796 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1797 if (ioc->diag_buffer_status[buffer_type] & 1798 MPT3_DIAG_BUFFER_IS_REGISTERED) 1799 ioc->diag_buffer_status[buffer_type] |= 1800 MPT3_DIAG_BUFFER_IS_RELEASED; 1801 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1802 "%s: skipping due to FAULT state\n", ioc->name, 1803 __func__)); 1804 rc = -EAGAIN; 1805 goto out; 1806 } 1807 1808 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1809 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1810 ioc->name, __func__); 1811 rc = -EAGAIN; 1812 goto out; 1813 } 1814 1815 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1816 if (!smid) { 1817 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1818 ioc->name, __func__); 1819 rc = -EAGAIN; 1820 goto out; 1821 } 1822 1823 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1824 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1825 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1826 ioc->ctl_cmds.smid = smid; 1827 1828 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1829 mpi_request->BufferType = buffer_type; 1830 mpi_request->VF_ID = 0; /* TODO */ 1831 mpi_request->VP_ID = 0; 1832 1833 init_completion(&ioc->ctl_cmds.done); 1834 mpt3sas_base_put_smid_default(ioc, smid); 1835 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1836 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1837 1838 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1839 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1840 __func__); 1841 _debug_dump_mf(mpi_request, 1842 sizeof(Mpi2DiagReleaseRequest_t)/4); 1843 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1844 *issue_reset = 1; 1845 rc = -EFAULT; 1846 goto out; 1847 } 1848 1849 /* process the completed Reply Message Frame */ 1850 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1851 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1852 ioc->name, __func__); 1853 rc = -EFAULT; 1854 goto out; 1855 } 1856 1857 mpi_reply = ioc->ctl_cmds.reply; 1858 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1859 1860 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1861 ioc->diag_buffer_status[buffer_type] |= 1862 MPT3_DIAG_BUFFER_IS_RELEASED; 1863 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1864 ioc->name, __func__)); 1865 } else { 1866 pr_info(MPT3SAS_FMT 1867 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1868 ioc->name, __func__, 1869 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1870 rc = -EFAULT; 1871 } 1872 1873 out: 1874 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1875 return rc; 1876 } 1877 1878 /** 1879 * _ctl_diag_release - request to send Diag Release Message to firmware 1880 * @arg - user space buffer containing ioctl content 1881 * 1882 * This allows ownership of the specified buffer to returned to the driver, 1883 * allowing an application to read the buffer without fear that firmware is 1884 * overwritting information in the buffer. 1885 */ 1886 static long 1887 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1888 { 1889 struct mpt3_diag_release karg; 1890 void *request_data; 1891 int rc; 1892 u8 buffer_type; 1893 u8 issue_reset = 0; 1894 1895 if (copy_from_user(&karg, arg, sizeof(karg))) { 1896 pr_err("failure at %s:%d/%s()!\n", 1897 __FILE__, __LINE__, __func__); 1898 return -EFAULT; 1899 } 1900 1901 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1902 __func__)); 1903 1904 buffer_type = karg.unique_id & 0x000000ff; 1905 if (!_ctl_diag_capability(ioc, buffer_type)) { 1906 pr_err(MPT3SAS_FMT 1907 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1908 ioc->name, __func__, buffer_type); 1909 return -EPERM; 1910 } 1911 1912 if ((ioc->diag_buffer_status[buffer_type] & 1913 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1914 pr_err(MPT3SAS_FMT 1915 "%s: buffer_type(0x%02x) is not registered\n", 1916 ioc->name, __func__, buffer_type); 1917 return -EINVAL; 1918 } 1919 1920 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1921 pr_err(MPT3SAS_FMT 1922 "%s: unique_id(0x%08x) is not registered\n", 1923 ioc->name, __func__, karg.unique_id); 1924 return -EINVAL; 1925 } 1926 1927 if (ioc->diag_buffer_status[buffer_type] & 1928 MPT3_DIAG_BUFFER_IS_RELEASED) { 1929 pr_err(MPT3SAS_FMT 1930 "%s: buffer_type(0x%02x) is already released\n", 1931 ioc->name, __func__, 1932 buffer_type); 1933 return 0; 1934 } 1935 1936 request_data = ioc->diag_buffer[buffer_type]; 1937 1938 if (!request_data) { 1939 pr_err(MPT3SAS_FMT 1940 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1941 ioc->name, __func__, buffer_type); 1942 return -ENOMEM; 1943 } 1944 1945 /* buffers were released by due to host reset */ 1946 if ((ioc->diag_buffer_status[buffer_type] & 1947 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 1948 ioc->diag_buffer_status[buffer_type] |= 1949 MPT3_DIAG_BUFFER_IS_RELEASED; 1950 ioc->diag_buffer_status[buffer_type] &= 1951 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 1952 pr_err(MPT3SAS_FMT 1953 "%s: buffer_type(0x%02x) was released due to host reset\n", 1954 ioc->name, __func__, buffer_type); 1955 return 0; 1956 } 1957 1958 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 1959 1960 if (issue_reset) 1961 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1962 FORCE_BIG_HAMMER); 1963 1964 return rc; 1965 } 1966 1967 /** 1968 * _ctl_diag_read_buffer - request for copy of the diag buffer 1969 * @ioc: per adapter object 1970 * @arg - user space buffer containing ioctl content 1971 */ 1972 static long 1973 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1974 { 1975 struct mpt3_diag_read_buffer karg; 1976 struct mpt3_diag_read_buffer __user *uarg = arg; 1977 void *request_data, *diag_data; 1978 Mpi2DiagBufferPostRequest_t *mpi_request; 1979 Mpi2DiagBufferPostReply_t *mpi_reply; 1980 int rc, i; 1981 u8 buffer_type; 1982 unsigned long timeleft, request_size, copy_size; 1983 u16 smid; 1984 u16 ioc_status; 1985 u8 issue_reset = 0; 1986 1987 if (copy_from_user(&karg, arg, sizeof(karg))) { 1988 pr_err("failure at %s:%d/%s()!\n", 1989 __FILE__, __LINE__, __func__); 1990 return -EFAULT; 1991 } 1992 1993 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1994 __func__)); 1995 1996 buffer_type = karg.unique_id & 0x000000ff; 1997 if (!_ctl_diag_capability(ioc, buffer_type)) { 1998 pr_err(MPT3SAS_FMT 1999 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2000 ioc->name, __func__, buffer_type); 2001 return -EPERM; 2002 } 2003 2004 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2005 pr_err(MPT3SAS_FMT 2006 "%s: unique_id(0x%08x) is not registered\n", 2007 ioc->name, __func__, karg.unique_id); 2008 return -EINVAL; 2009 } 2010 2011 request_data = ioc->diag_buffer[buffer_type]; 2012 if (!request_data) { 2013 pr_err(MPT3SAS_FMT 2014 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2015 ioc->name, __func__, buffer_type); 2016 return -ENOMEM; 2017 } 2018 2019 request_size = ioc->diag_buffer_sz[buffer_type]; 2020 2021 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2022 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2023 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2024 __func__); 2025 return -EINVAL; 2026 } 2027 2028 if (karg.starting_offset > request_size) 2029 return -EINVAL; 2030 2031 diag_data = (void *)(request_data + karg.starting_offset); 2032 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2033 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2034 ioc->name, __func__, 2035 diag_data, karg.starting_offset, karg.bytes_to_read)); 2036 2037 /* Truncate data on requests that are too large */ 2038 if ((diag_data + karg.bytes_to_read < diag_data) || 2039 (diag_data + karg.bytes_to_read > request_data + request_size)) 2040 copy_size = request_size - karg.starting_offset; 2041 else 2042 copy_size = karg.bytes_to_read; 2043 2044 if (copy_to_user((void __user *)uarg->diagnostic_data, 2045 diag_data, copy_size)) { 2046 pr_err(MPT3SAS_FMT 2047 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2048 ioc->name, __func__, diag_data); 2049 return -EFAULT; 2050 } 2051 2052 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2053 return 0; 2054 2055 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2056 "%s: Reregister buffer_type(0x%02x)\n", 2057 ioc->name, __func__, buffer_type)); 2058 if ((ioc->diag_buffer_status[buffer_type] & 2059 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2060 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2061 "%s: buffer_type(0x%02x) is still registered\n", 2062 ioc->name, __func__, buffer_type)); 2063 return 0; 2064 } 2065 /* Get a free request frame and save the message context. 2066 */ 2067 2068 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2069 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2070 ioc->name, __func__); 2071 rc = -EAGAIN; 2072 goto out; 2073 } 2074 2075 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2076 if (!smid) { 2077 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2078 ioc->name, __func__); 2079 rc = -EAGAIN; 2080 goto out; 2081 } 2082 2083 rc = 0; 2084 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2085 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2086 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2087 ioc->ctl_cmds.smid = smid; 2088 2089 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2090 mpi_request->BufferType = buffer_type; 2091 mpi_request->BufferLength = 2092 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2093 mpi_request->BufferAddress = 2094 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2095 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2096 mpi_request->ProductSpecific[i] = 2097 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2098 mpi_request->VF_ID = 0; /* TODO */ 2099 mpi_request->VP_ID = 0; 2100 2101 init_completion(&ioc->ctl_cmds.done); 2102 mpt3sas_base_put_smid_default(ioc, smid); 2103 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 2104 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2105 2106 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2107 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 2108 __func__); 2109 _debug_dump_mf(mpi_request, 2110 sizeof(Mpi2DiagBufferPostRequest_t)/4); 2111 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 2112 issue_reset = 1; 2113 goto issue_host_reset; 2114 } 2115 2116 /* process the completed Reply Message Frame */ 2117 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2118 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2119 ioc->name, __func__); 2120 rc = -EFAULT; 2121 goto out; 2122 } 2123 2124 mpi_reply = ioc->ctl_cmds.reply; 2125 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2126 2127 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2128 ioc->diag_buffer_status[buffer_type] |= 2129 MPT3_DIAG_BUFFER_IS_REGISTERED; 2130 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2131 ioc->name, __func__)); 2132 } else { 2133 pr_info(MPT3SAS_FMT 2134 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2135 ioc->name, __func__, 2136 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2137 rc = -EFAULT; 2138 } 2139 2140 issue_host_reset: 2141 if (issue_reset) 2142 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2143 FORCE_BIG_HAMMER); 2144 2145 out: 2146 2147 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2148 return rc; 2149 } 2150 2151 2152 2153 #ifdef CONFIG_COMPAT 2154 /** 2155 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2156 * @ioc: per adapter object 2157 * @cmd - ioctl opcode 2158 * @arg - (struct mpt3_ioctl_command32) 2159 * 2160 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2161 */ 2162 static long 2163 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2164 void __user *arg) 2165 { 2166 struct mpt3_ioctl_command32 karg32; 2167 struct mpt3_ioctl_command32 __user *uarg; 2168 struct mpt3_ioctl_command karg; 2169 2170 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2171 return -EINVAL; 2172 2173 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2174 2175 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2176 pr_err("failure at %s:%d/%s()!\n", 2177 __FILE__, __LINE__, __func__); 2178 return -EFAULT; 2179 } 2180 2181 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2182 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2183 karg.hdr.port_number = karg32.hdr.port_number; 2184 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2185 karg.timeout = karg32.timeout; 2186 karg.max_reply_bytes = karg32.max_reply_bytes; 2187 karg.data_in_size = karg32.data_in_size; 2188 karg.data_out_size = karg32.data_out_size; 2189 karg.max_sense_bytes = karg32.max_sense_bytes; 2190 karg.data_sge_offset = karg32.data_sge_offset; 2191 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2192 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2193 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2194 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2195 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2196 } 2197 #endif 2198 2199 /** 2200 * _ctl_ioctl_main - main ioctl entry point 2201 * @file - (struct file) 2202 * @cmd - ioctl opcode 2203 * @arg - user space data buffer 2204 * @compat - handles 32 bit applications in 64bit os 2205 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2206 * MPI25_VERSION for mpt3ctl ioctl device. 2207 */ 2208 static long 2209 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2210 u8 compat, u16 mpi_version) 2211 { 2212 struct MPT3SAS_ADAPTER *ioc; 2213 struct mpt3_ioctl_header ioctl_header; 2214 enum block_state state; 2215 long ret = -EINVAL; 2216 2217 /* get IOCTL header */ 2218 if (copy_from_user(&ioctl_header, (char __user *)arg, 2219 sizeof(struct mpt3_ioctl_header))) { 2220 pr_err("failure at %s:%d/%s()!\n", 2221 __FILE__, __LINE__, __func__); 2222 return -EFAULT; 2223 } 2224 2225 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2226 &ioc, mpi_version) == -1 || !ioc) 2227 return -ENODEV; 2228 2229 /* pci_access_mutex lock acquired by ioctl path */ 2230 mutex_lock(&ioc->pci_access_mutex); 2231 2232 if (ioc->shost_recovery || ioc->pci_error_recovery || 2233 ioc->is_driver_loading || ioc->remove_host) { 2234 ret = -EAGAIN; 2235 goto out_unlock_pciaccess; 2236 } 2237 2238 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2239 if (state == NON_BLOCKING) { 2240 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2241 ret = -EAGAIN; 2242 goto out_unlock_pciaccess; 2243 } 2244 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2245 ret = -ERESTARTSYS; 2246 goto out_unlock_pciaccess; 2247 } 2248 2249 2250 switch (cmd) { 2251 case MPT3IOCINFO: 2252 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2253 ret = _ctl_getiocinfo(ioc, arg); 2254 break; 2255 #ifdef CONFIG_COMPAT 2256 case MPT3COMMAND32: 2257 #endif 2258 case MPT3COMMAND: 2259 { 2260 struct mpt3_ioctl_command __user *uarg; 2261 struct mpt3_ioctl_command karg; 2262 2263 #ifdef CONFIG_COMPAT 2264 if (compat) { 2265 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2266 break; 2267 } 2268 #endif 2269 if (copy_from_user(&karg, arg, sizeof(karg))) { 2270 pr_err("failure at %s:%d/%s()!\n", 2271 __FILE__, __LINE__, __func__); 2272 ret = -EFAULT; 2273 break; 2274 } 2275 2276 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2277 uarg = arg; 2278 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2279 } 2280 break; 2281 } 2282 case MPT3EVENTQUERY: 2283 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2284 ret = _ctl_eventquery(ioc, arg); 2285 break; 2286 case MPT3EVENTENABLE: 2287 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2288 ret = _ctl_eventenable(ioc, arg); 2289 break; 2290 case MPT3EVENTREPORT: 2291 ret = _ctl_eventreport(ioc, arg); 2292 break; 2293 case MPT3HARDRESET: 2294 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2295 ret = _ctl_do_reset(ioc, arg); 2296 break; 2297 case MPT3BTDHMAPPING: 2298 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2299 ret = _ctl_btdh_mapping(ioc, arg); 2300 break; 2301 case MPT3DIAGREGISTER: 2302 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2303 ret = _ctl_diag_register(ioc, arg); 2304 break; 2305 case MPT3DIAGUNREGISTER: 2306 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2307 ret = _ctl_diag_unregister(ioc, arg); 2308 break; 2309 case MPT3DIAGQUERY: 2310 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2311 ret = _ctl_diag_query(ioc, arg); 2312 break; 2313 case MPT3DIAGRELEASE: 2314 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2315 ret = _ctl_diag_release(ioc, arg); 2316 break; 2317 case MPT3DIAGREADBUFFER: 2318 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2319 ret = _ctl_diag_read_buffer(ioc, arg); 2320 break; 2321 default: 2322 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2323 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2324 break; 2325 } 2326 2327 mutex_unlock(&ioc->ctl_cmds.mutex); 2328 out_unlock_pciaccess: 2329 mutex_unlock(&ioc->pci_access_mutex); 2330 return ret; 2331 } 2332 2333 /** 2334 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 2335 * @file - (struct file) 2336 * @cmd - ioctl opcode 2337 * @arg - 2338 */ 2339 long 2340 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2341 { 2342 long ret; 2343 2344 /* pass MPI25_VERSION value, to indicate that this ioctl cmd 2345 * came from mpt3ctl ioctl device. 2346 */ 2347 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION); 2348 return ret; 2349 } 2350 2351 /** 2352 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 2353 * @file - (struct file) 2354 * @cmd - ioctl opcode 2355 * @arg - 2356 */ 2357 long 2358 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2359 { 2360 long ret; 2361 2362 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 2363 * came from mpt2ctl ioctl device. 2364 */ 2365 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 2366 return ret; 2367 } 2368 #ifdef CONFIG_COMPAT 2369 /** 2370 *_ ctl_ioctl_compat - main ioctl entry point (compat) 2371 * @file - 2372 * @cmd - 2373 * @arg - 2374 * 2375 * This routine handles 32 bit applications in 64bit os. 2376 */ 2377 long 2378 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2379 { 2380 long ret; 2381 2382 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI25_VERSION); 2383 return ret; 2384 } 2385 2386 /** 2387 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 2388 * @file - 2389 * @cmd - 2390 * @arg - 2391 * 2392 * This routine handles 32 bit applications in 64bit os. 2393 */ 2394 long 2395 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2396 { 2397 long ret; 2398 2399 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 2400 return ret; 2401 } 2402 #endif 2403 2404 /* scsi host attributes */ 2405 /** 2406 * _ctl_version_fw_show - firmware version 2407 * @cdev - pointer to embedded class device 2408 * @buf - the buffer returned 2409 * 2410 * A sysfs 'read-only' shost attribute. 2411 */ 2412 static ssize_t 2413 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, 2414 char *buf) 2415 { 2416 struct Scsi_Host *shost = class_to_shost(cdev); 2417 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2418 2419 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2420 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2421 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2422 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2423 ioc->facts.FWVersion.Word & 0x000000FF); 2424 } 2425 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); 2426 2427 /** 2428 * _ctl_version_bios_show - bios version 2429 * @cdev - pointer to embedded class device 2430 * @buf - the buffer returned 2431 * 2432 * A sysfs 'read-only' shost attribute. 2433 */ 2434 static ssize_t 2435 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, 2436 char *buf) 2437 { 2438 struct Scsi_Host *shost = class_to_shost(cdev); 2439 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2440 2441 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2442 2443 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2444 (version & 0xFF000000) >> 24, 2445 (version & 0x00FF0000) >> 16, 2446 (version & 0x0000FF00) >> 8, 2447 version & 0x000000FF); 2448 } 2449 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); 2450 2451 /** 2452 * _ctl_version_mpi_show - MPI (message passing interface) version 2453 * @cdev - pointer to embedded class device 2454 * @buf - the buffer returned 2455 * 2456 * A sysfs 'read-only' shost attribute. 2457 */ 2458 static ssize_t 2459 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, 2460 char *buf) 2461 { 2462 struct Scsi_Host *shost = class_to_shost(cdev); 2463 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2464 2465 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 2466 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 2467 } 2468 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); 2469 2470 /** 2471 * _ctl_version_product_show - product name 2472 * @cdev - pointer to embedded class device 2473 * @buf - the buffer returned 2474 * 2475 * A sysfs 'read-only' shost attribute. 2476 */ 2477 static ssize_t 2478 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, 2479 char *buf) 2480 { 2481 struct Scsi_Host *shost = class_to_shost(cdev); 2482 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2483 2484 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 2485 } 2486 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); 2487 2488 /** 2489 * _ctl_version_nvdata_persistent_show - ndvata persistent version 2490 * @cdev - pointer to embedded class device 2491 * @buf - the buffer returned 2492 * 2493 * A sysfs 'read-only' shost attribute. 2494 */ 2495 static ssize_t 2496 _ctl_version_nvdata_persistent_show(struct device *cdev, 2497 struct device_attribute *attr, char *buf) 2498 { 2499 struct Scsi_Host *shost = class_to_shost(cdev); 2500 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2501 2502 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2503 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2504 } 2505 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2506 _ctl_version_nvdata_persistent_show, NULL); 2507 2508 /** 2509 * _ctl_version_nvdata_default_show - nvdata default version 2510 * @cdev - pointer to embedded class device 2511 * @buf - the buffer returned 2512 * 2513 * A sysfs 'read-only' shost attribute. 2514 */ 2515 static ssize_t 2516 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute 2517 *attr, char *buf) 2518 { 2519 struct Scsi_Host *shost = class_to_shost(cdev); 2520 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2521 2522 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2523 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2524 } 2525 static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2526 _ctl_version_nvdata_default_show, NULL); 2527 2528 /** 2529 * _ctl_board_name_show - board name 2530 * @cdev - pointer to embedded class device 2531 * @buf - the buffer returned 2532 * 2533 * A sysfs 'read-only' shost attribute. 2534 */ 2535 static ssize_t 2536 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, 2537 char *buf) 2538 { 2539 struct Scsi_Host *shost = class_to_shost(cdev); 2540 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2541 2542 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 2543 } 2544 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); 2545 2546 /** 2547 * _ctl_board_assembly_show - board assembly name 2548 * @cdev - pointer to embedded class device 2549 * @buf - the buffer returned 2550 * 2551 * A sysfs 'read-only' shost attribute. 2552 */ 2553 static ssize_t 2554 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, 2555 char *buf) 2556 { 2557 struct Scsi_Host *shost = class_to_shost(cdev); 2558 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2559 2560 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 2561 } 2562 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); 2563 2564 /** 2565 * _ctl_board_tracer_show - board tracer number 2566 * @cdev - pointer to embedded class device 2567 * @buf - the buffer returned 2568 * 2569 * A sysfs 'read-only' shost attribute. 2570 */ 2571 static ssize_t 2572 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, 2573 char *buf) 2574 { 2575 struct Scsi_Host *shost = class_to_shost(cdev); 2576 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2577 2578 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 2579 } 2580 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); 2581 2582 /** 2583 * _ctl_io_delay_show - io missing delay 2584 * @cdev - pointer to embedded class device 2585 * @buf - the buffer returned 2586 * 2587 * This is for firmware implemention for deboucing device 2588 * removal events. 2589 * 2590 * A sysfs 'read-only' shost attribute. 2591 */ 2592 static ssize_t 2593 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, 2594 char *buf) 2595 { 2596 struct Scsi_Host *shost = class_to_shost(cdev); 2597 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2598 2599 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 2600 } 2601 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); 2602 2603 /** 2604 * _ctl_device_delay_show - device missing delay 2605 * @cdev - pointer to embedded class device 2606 * @buf - the buffer returned 2607 * 2608 * This is for firmware implemention for deboucing device 2609 * removal events. 2610 * 2611 * A sysfs 'read-only' shost attribute. 2612 */ 2613 static ssize_t 2614 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, 2615 char *buf) 2616 { 2617 struct Scsi_Host *shost = class_to_shost(cdev); 2618 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2619 2620 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 2621 } 2622 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); 2623 2624 /** 2625 * _ctl_fw_queue_depth_show - global credits 2626 * @cdev - pointer to embedded class device 2627 * @buf - the buffer returned 2628 * 2629 * This is firmware queue depth limit 2630 * 2631 * A sysfs 'read-only' shost attribute. 2632 */ 2633 static ssize_t 2634 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 2635 char *buf) 2636 { 2637 struct Scsi_Host *shost = class_to_shost(cdev); 2638 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2639 2640 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 2641 } 2642 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); 2643 2644 /** 2645 * _ctl_sas_address_show - sas address 2646 * @cdev - pointer to embedded class device 2647 * @buf - the buffer returned 2648 * 2649 * This is the controller sas address 2650 * 2651 * A sysfs 'read-only' shost attribute. 2652 */ 2653 static ssize_t 2654 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, 2655 char *buf) 2656 2657 { 2658 struct Scsi_Host *shost = class_to_shost(cdev); 2659 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2660 2661 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 2662 (unsigned long long)ioc->sas_hba.sas_address); 2663 } 2664 static DEVICE_ATTR(host_sas_address, S_IRUGO, 2665 _ctl_host_sas_address_show, NULL); 2666 2667 /** 2668 * _ctl_logging_level_show - logging level 2669 * @cdev - pointer to embedded class device 2670 * @buf - the buffer returned 2671 * 2672 * A sysfs 'read/write' shost attribute. 2673 */ 2674 static ssize_t 2675 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, 2676 char *buf) 2677 { 2678 struct Scsi_Host *shost = class_to_shost(cdev); 2679 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2680 2681 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 2682 } 2683 static ssize_t 2684 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, 2685 const char *buf, size_t count) 2686 { 2687 struct Scsi_Host *shost = class_to_shost(cdev); 2688 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2689 int val = 0; 2690 2691 if (sscanf(buf, "%x", &val) != 1) 2692 return -EINVAL; 2693 2694 ioc->logging_level = val; 2695 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2696 ioc->logging_level); 2697 return strlen(buf); 2698 } 2699 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2700 _ctl_logging_level_store); 2701 2702 /** 2703 * _ctl_fwfault_debug_show - show/store fwfault_debug 2704 * @cdev - pointer to embedded class device 2705 * @buf - the buffer returned 2706 * 2707 * mpt3sas_fwfault_debug is command line option 2708 * A sysfs 'read/write' shost attribute. 2709 */ 2710 static ssize_t 2711 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 2712 char *buf) 2713 { 2714 struct Scsi_Host *shost = class_to_shost(cdev); 2715 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2716 2717 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 2718 } 2719 static ssize_t 2720 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 2721 const char *buf, size_t count) 2722 { 2723 struct Scsi_Host *shost = class_to_shost(cdev); 2724 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2725 int val = 0; 2726 2727 if (sscanf(buf, "%d", &val) != 1) 2728 return -EINVAL; 2729 2730 ioc->fwfault_debug = val; 2731 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2732 ioc->fwfault_debug); 2733 return strlen(buf); 2734 } 2735 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2736 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2737 2738 /** 2739 * _ctl_ioc_reset_count_show - ioc reset count 2740 * @cdev - pointer to embedded class device 2741 * @buf - the buffer returned 2742 * 2743 * This is firmware queue depth limit 2744 * 2745 * A sysfs 'read-only' shost attribute. 2746 */ 2747 static ssize_t 2748 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 2749 char *buf) 2750 { 2751 struct Scsi_Host *shost = class_to_shost(cdev); 2752 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2753 2754 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 2755 } 2756 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); 2757 2758 /** 2759 * _ctl_ioc_reply_queue_count_show - number of reply queues 2760 * @cdev - pointer to embedded class device 2761 * @buf - the buffer returned 2762 * 2763 * This is number of reply queues 2764 * 2765 * A sysfs 'read-only' shost attribute. 2766 */ 2767 static ssize_t 2768 _ctl_ioc_reply_queue_count_show(struct device *cdev, 2769 struct device_attribute *attr, char *buf) 2770 { 2771 u8 reply_queue_count; 2772 struct Scsi_Host *shost = class_to_shost(cdev); 2773 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2774 2775 if ((ioc->facts.IOCCapabilities & 2776 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 2777 reply_queue_count = ioc->reply_queue_count; 2778 else 2779 reply_queue_count = 1; 2780 2781 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 2782 } 2783 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, 2784 NULL); 2785 2786 /** 2787 * _ctl_BRM_status_show - Backup Rail Monitor Status 2788 * @cdev - pointer to embedded class device 2789 * @buf - the buffer returned 2790 * 2791 * This is number of reply queues 2792 * 2793 * A sysfs 'read-only' shost attribute. 2794 */ 2795 static ssize_t 2796 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, 2797 char *buf) 2798 { 2799 struct Scsi_Host *shost = class_to_shost(cdev); 2800 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2801 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; 2802 Mpi2ConfigReply_t mpi_reply; 2803 u16 backup_rail_monitor_status = 0; 2804 u16 ioc_status; 2805 int sz; 2806 ssize_t rc = 0; 2807 2808 if (!ioc->is_warpdrive) { 2809 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2810 " warpdrive\n", ioc->name, __func__); 2811 goto out; 2812 } 2813 /* pci_access_mutex lock acquired by sysfs show path */ 2814 mutex_lock(&ioc->pci_access_mutex); 2815 if (ioc->pci_error_recovery || ioc->remove_host) { 2816 mutex_unlock(&ioc->pci_access_mutex); 2817 return 0; 2818 } 2819 2820 /* allocate upto GPIOVal 36 entries */ 2821 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2822 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2823 if (!io_unit_pg3) { 2824 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2825 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2826 goto out; 2827 } 2828 2829 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2830 0) { 2831 pr_err(MPT3SAS_FMT 2832 "%s: failed reading iounit_pg3\n", ioc->name, 2833 __func__); 2834 goto out; 2835 } 2836 2837 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2838 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2839 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2840 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2841 goto out; 2842 } 2843 2844 if (io_unit_pg3->GPIOCount < 25) { 2845 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2846 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2847 io_unit_pg3->GPIOCount); 2848 goto out; 2849 } 2850 2851 /* BRM status is in bit zero of GPIOVal[24] */ 2852 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); 2853 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 2854 2855 out: 2856 kfree(io_unit_pg3); 2857 mutex_unlock(&ioc->pci_access_mutex); 2858 return rc; 2859 } 2860 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2861 2862 struct DIAG_BUFFER_START { 2863 __le32 Size; 2864 __le32 DiagVersion; 2865 u8 BufferType; 2866 u8 Reserved[3]; 2867 __le32 Reserved1; 2868 __le32 Reserved2; 2869 __le32 Reserved3; 2870 }; 2871 2872 /** 2873 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 2874 * @cdev - pointer to embedded class device 2875 * @buf - the buffer returned 2876 * 2877 * A sysfs 'read-only' shost attribute. 2878 */ 2879 static ssize_t 2880 _ctl_host_trace_buffer_size_show(struct device *cdev, 2881 struct device_attribute *attr, char *buf) 2882 { 2883 struct Scsi_Host *shost = class_to_shost(cdev); 2884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2885 u32 size = 0; 2886 struct DIAG_BUFFER_START *request_data; 2887 2888 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2889 pr_err(MPT3SAS_FMT 2890 "%s: host_trace_buffer is not registered\n", 2891 ioc->name, __func__); 2892 return 0; 2893 } 2894 2895 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2896 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2897 pr_err(MPT3SAS_FMT 2898 "%s: host_trace_buffer is not registered\n", 2899 ioc->name, __func__); 2900 return 0; 2901 } 2902 2903 request_data = (struct DIAG_BUFFER_START *) 2904 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 2905 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 2906 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 2907 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 2908 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 2909 size = le32_to_cpu(request_data->Size); 2910 2911 ioc->ring_buffer_sz = size; 2912 return snprintf(buf, PAGE_SIZE, "%d\n", size); 2913 } 2914 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, 2915 _ctl_host_trace_buffer_size_show, NULL); 2916 2917 /** 2918 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) 2919 * @cdev - pointer to embedded class device 2920 * @buf - the buffer returned 2921 * 2922 * A sysfs 'read/write' shost attribute. 2923 * 2924 * You will only be able to read 4k bytes of ring buffer at a time. 2925 * In order to read beyond 4k bytes, you will have to write out the 2926 * offset to the same attribute, it will move the pointer. 2927 */ 2928 static ssize_t 2929 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 2930 char *buf) 2931 { 2932 struct Scsi_Host *shost = class_to_shost(cdev); 2933 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2934 void *request_data; 2935 u32 size; 2936 2937 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2938 pr_err(MPT3SAS_FMT 2939 "%s: host_trace_buffer is not registered\n", 2940 ioc->name, __func__); 2941 return 0; 2942 } 2943 2944 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2945 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2946 pr_err(MPT3SAS_FMT 2947 "%s: host_trace_buffer is not registered\n", 2948 ioc->name, __func__); 2949 return 0; 2950 } 2951 2952 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 2953 return 0; 2954 2955 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 2956 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2957 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 2958 memcpy(buf, request_data, size); 2959 return size; 2960 } 2961 2962 static ssize_t 2963 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 2964 const char *buf, size_t count) 2965 { 2966 struct Scsi_Host *shost = class_to_shost(cdev); 2967 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2968 int val = 0; 2969 2970 if (sscanf(buf, "%d", &val) != 1) 2971 return -EINVAL; 2972 2973 ioc->ring_buffer_offset = val; 2974 return strlen(buf); 2975 } 2976 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, 2977 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); 2978 2979 2980 /*****************************************/ 2981 2982 /** 2983 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) 2984 * @cdev - pointer to embedded class device 2985 * @buf - the buffer returned 2986 * 2987 * A sysfs 'read/write' shost attribute. 2988 * 2989 * This is a mechnism to post/release host_trace_buffers 2990 */ 2991 static ssize_t 2992 _ctl_host_trace_buffer_enable_show(struct device *cdev, 2993 struct device_attribute *attr, char *buf) 2994 { 2995 struct Scsi_Host *shost = class_to_shost(cdev); 2996 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2997 2998 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 2999 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3000 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3001 return snprintf(buf, PAGE_SIZE, "off\n"); 3002 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3003 MPT3_DIAG_BUFFER_IS_RELEASED)) 3004 return snprintf(buf, PAGE_SIZE, "release\n"); 3005 else 3006 return snprintf(buf, PAGE_SIZE, "post\n"); 3007 } 3008 3009 static ssize_t 3010 _ctl_host_trace_buffer_enable_store(struct device *cdev, 3011 struct device_attribute *attr, const char *buf, size_t count) 3012 { 3013 struct Scsi_Host *shost = class_to_shost(cdev); 3014 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3015 char str[10] = ""; 3016 struct mpt3_diag_register diag_register; 3017 u8 issue_reset = 0; 3018 3019 /* don't allow post/release occurr while recovery is active */ 3020 if (ioc->shost_recovery || ioc->remove_host || 3021 ioc->pci_error_recovery || ioc->is_driver_loading) 3022 return -EBUSY; 3023 3024 if (sscanf(buf, "%9s", str) != 1) 3025 return -EINVAL; 3026 3027 if (!strcmp(str, "post")) { 3028 /* exit out if host buffers are already posted */ 3029 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3030 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3031 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3032 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3033 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3034 goto out; 3035 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3036 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3037 ioc->name); 3038 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3039 diag_register.requested_buffer_size = (1024 * 1024); 3040 diag_register.unique_id = 0x7075900; 3041 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3042 _ctl_diag_register_2(ioc, &diag_register); 3043 } else if (!strcmp(str, "release")) { 3044 /* exit out if host buffers are already released */ 3045 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3046 goto out; 3047 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3048 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3049 goto out; 3050 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3051 MPT3_DIAG_BUFFER_IS_RELEASED)) 3052 goto out; 3053 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3054 ioc->name); 3055 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3056 &issue_reset); 3057 } 3058 3059 out: 3060 return strlen(buf); 3061 } 3062 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, 3063 _ctl_host_trace_buffer_enable_show, 3064 _ctl_host_trace_buffer_enable_store); 3065 3066 /*********** diagnostic trigger suppport *********************************/ 3067 3068 /** 3069 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute 3070 * @cdev - pointer to embedded class device 3071 * @buf - the buffer returned 3072 * 3073 * A sysfs 'read/write' shost attribute. 3074 */ 3075 static ssize_t 3076 _ctl_diag_trigger_master_show(struct device *cdev, 3077 struct device_attribute *attr, char *buf) 3078 3079 { 3080 struct Scsi_Host *shost = class_to_shost(cdev); 3081 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3082 unsigned long flags; 3083 ssize_t rc; 3084 3085 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3086 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3087 memcpy(buf, &ioc->diag_trigger_master, rc); 3088 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3089 return rc; 3090 } 3091 3092 /** 3093 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute 3094 * @cdev - pointer to embedded class device 3095 * @buf - the buffer returned 3096 * 3097 * A sysfs 'read/write' shost attribute. 3098 */ 3099 static ssize_t 3100 _ctl_diag_trigger_master_store(struct device *cdev, 3101 struct device_attribute *attr, const char *buf, size_t count) 3102 3103 { 3104 struct Scsi_Host *shost = class_to_shost(cdev); 3105 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3106 unsigned long flags; 3107 ssize_t rc; 3108 3109 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3110 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3111 memset(&ioc->diag_trigger_master, 0, 3112 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3113 memcpy(&ioc->diag_trigger_master, buf, rc); 3114 ioc->diag_trigger_master.MasterData |= 3115 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3116 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3117 return rc; 3118 } 3119 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, 3120 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); 3121 3122 3123 /** 3124 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute 3125 * @cdev - pointer to embedded class device 3126 * @buf - the buffer returned 3127 * 3128 * A sysfs 'read/write' shost attribute. 3129 */ 3130 static ssize_t 3131 _ctl_diag_trigger_event_show(struct device *cdev, 3132 struct device_attribute *attr, char *buf) 3133 { 3134 struct Scsi_Host *shost = class_to_shost(cdev); 3135 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3136 unsigned long flags; 3137 ssize_t rc; 3138 3139 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3140 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3141 memcpy(buf, &ioc->diag_trigger_event, rc); 3142 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3143 return rc; 3144 } 3145 3146 /** 3147 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute 3148 * @cdev - pointer to embedded class device 3149 * @buf - the buffer returned 3150 * 3151 * A sysfs 'read/write' shost attribute. 3152 */ 3153 static ssize_t 3154 _ctl_diag_trigger_event_store(struct device *cdev, 3155 struct device_attribute *attr, const char *buf, size_t count) 3156 3157 { 3158 struct Scsi_Host *shost = class_to_shost(cdev); 3159 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3160 unsigned long flags; 3161 ssize_t sz; 3162 3163 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3164 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3165 memset(&ioc->diag_trigger_event, 0, 3166 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3167 memcpy(&ioc->diag_trigger_event, buf, sz); 3168 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3169 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3170 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3171 return sz; 3172 } 3173 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, 3174 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); 3175 3176 3177 /** 3178 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3179 * @cdev - pointer to embedded class device 3180 * @buf - the buffer returned 3181 * 3182 * A sysfs 'read/write' shost attribute. 3183 */ 3184 static ssize_t 3185 _ctl_diag_trigger_scsi_show(struct device *cdev, 3186 struct device_attribute *attr, char *buf) 3187 { 3188 struct Scsi_Host *shost = class_to_shost(cdev); 3189 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3190 unsigned long flags; 3191 ssize_t rc; 3192 3193 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3194 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 3195 memcpy(buf, &ioc->diag_trigger_scsi, rc); 3196 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3197 return rc; 3198 } 3199 3200 /** 3201 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute 3202 * @cdev - pointer to embedded class device 3203 * @buf - the buffer returned 3204 * 3205 * A sysfs 'read/write' shost attribute. 3206 */ 3207 static ssize_t 3208 _ctl_diag_trigger_scsi_store(struct device *cdev, 3209 struct device_attribute *attr, const char *buf, size_t count) 3210 { 3211 struct Scsi_Host *shost = class_to_shost(cdev); 3212 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3213 unsigned long flags; 3214 ssize_t sz; 3215 3216 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3217 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3218 memset(&ioc->diag_trigger_scsi, 0, 3219 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3220 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3221 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3222 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3223 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3224 return sz; 3225 } 3226 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, 3227 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); 3228 3229 3230 /** 3231 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute 3232 * @cdev - pointer to embedded class device 3233 * @buf - the buffer returned 3234 * 3235 * A sysfs 'read/write' shost attribute. 3236 */ 3237 static ssize_t 3238 _ctl_diag_trigger_mpi_show(struct device *cdev, 3239 struct device_attribute *attr, char *buf) 3240 { 3241 struct Scsi_Host *shost = class_to_shost(cdev); 3242 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3243 unsigned long flags; 3244 ssize_t rc; 3245 3246 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3247 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 3248 memcpy(buf, &ioc->diag_trigger_mpi, rc); 3249 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3250 return rc; 3251 } 3252 3253 /** 3254 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute 3255 * @cdev - pointer to embedded class device 3256 * @buf - the buffer returned 3257 * 3258 * A sysfs 'read/write' shost attribute. 3259 */ 3260 static ssize_t 3261 _ctl_diag_trigger_mpi_store(struct device *cdev, 3262 struct device_attribute *attr, const char *buf, size_t count) 3263 { 3264 struct Scsi_Host *shost = class_to_shost(cdev); 3265 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3266 unsigned long flags; 3267 ssize_t sz; 3268 3269 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3270 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3271 memset(&ioc->diag_trigger_mpi, 0, 3272 sizeof(ioc->diag_trigger_mpi)); 3273 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3274 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3275 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3276 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3277 return sz; 3278 } 3279 3280 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, 3281 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); 3282 3283 /*********** diagnostic trigger suppport *** END ****************************/ 3284 3285 3286 3287 /*****************************************/ 3288 3289 struct device_attribute *mpt3sas_host_attrs[] = { 3290 &dev_attr_version_fw, 3291 &dev_attr_version_bios, 3292 &dev_attr_version_mpi, 3293 &dev_attr_version_product, 3294 &dev_attr_version_nvdata_persistent, 3295 &dev_attr_version_nvdata_default, 3296 &dev_attr_board_name, 3297 &dev_attr_board_assembly, 3298 &dev_attr_board_tracer, 3299 &dev_attr_io_delay, 3300 &dev_attr_device_delay, 3301 &dev_attr_logging_level, 3302 &dev_attr_fwfault_debug, 3303 &dev_attr_fw_queue_depth, 3304 &dev_attr_host_sas_address, 3305 &dev_attr_ioc_reset_count, 3306 &dev_attr_host_trace_buffer_size, 3307 &dev_attr_host_trace_buffer, 3308 &dev_attr_host_trace_buffer_enable, 3309 &dev_attr_reply_queue_count, 3310 &dev_attr_diag_trigger_master, 3311 &dev_attr_diag_trigger_event, 3312 &dev_attr_diag_trigger_scsi, 3313 &dev_attr_diag_trigger_mpi, 3314 &dev_attr_BRM_status, 3315 NULL, 3316 }; 3317 3318 /* device attributes */ 3319 3320 /** 3321 * _ctl_device_sas_address_show - sas address 3322 * @cdev - pointer to embedded class device 3323 * @buf - the buffer returned 3324 * 3325 * This is the sas address for the target 3326 * 3327 * A sysfs 'read-only' shost attribute. 3328 */ 3329 static ssize_t 3330 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, 3331 char *buf) 3332 { 3333 struct scsi_device *sdev = to_scsi_device(dev); 3334 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3335 3336 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3337 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 3338 } 3339 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); 3340 3341 /** 3342 * _ctl_device_handle_show - device handle 3343 * @cdev - pointer to embedded class device 3344 * @buf - the buffer returned 3345 * 3346 * This is the firmware assigned device handle 3347 * 3348 * A sysfs 'read-only' shost attribute. 3349 */ 3350 static ssize_t 3351 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, 3352 char *buf) 3353 { 3354 struct scsi_device *sdev = to_scsi_device(dev); 3355 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3356 3357 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 3358 sas_device_priv_data->sas_target->handle); 3359 } 3360 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3361 3362 struct device_attribute *mpt3sas_dev_attrs[] = { 3363 &dev_attr_sas_address, 3364 &dev_attr_sas_device_handle, 3365 NULL, 3366 }; 3367 3368 /* file operations table for mpt3ctl device */ 3369 static const struct file_operations ctl_fops = { 3370 .owner = THIS_MODULE, 3371 .unlocked_ioctl = _ctl_ioctl, 3372 .poll = _ctl_poll, 3373 .fasync = _ctl_fasync, 3374 #ifdef CONFIG_COMPAT 3375 .compat_ioctl = _ctl_ioctl_compat, 3376 #endif 3377 }; 3378 3379 /* file operations table for mpt2ctl device */ 3380 static const struct file_operations ctl_gen2_fops = { 3381 .owner = THIS_MODULE, 3382 .unlocked_ioctl = _ctl_mpt2_ioctl, 3383 .poll = _ctl_poll, 3384 .fasync = _ctl_fasync, 3385 #ifdef CONFIG_COMPAT 3386 .compat_ioctl = _ctl_mpt2_ioctl_compat, 3387 #endif 3388 }; 3389 3390 static struct miscdevice ctl_dev = { 3391 .minor = MPT3SAS_MINOR, 3392 .name = MPT3SAS_DEV_NAME, 3393 .fops = &ctl_fops, 3394 }; 3395 3396 static struct miscdevice gen2_ctl_dev = { 3397 .minor = MPT2SAS_MINOR, 3398 .name = MPT2SAS_DEV_NAME, 3399 .fops = &ctl_gen2_fops, 3400 }; 3401 3402 /** 3403 * mpt3sas_ctl_init - main entry point for ctl. 3404 * 3405 */ 3406 void 3407 mpt3sas_ctl_init(ushort hbas_to_enumerate) 3408 { 3409 async_queue = NULL; 3410 3411 /* Don't register mpt3ctl ioctl device if 3412 * hbas_to_enumarate is one. 3413 */ 3414 if (hbas_to_enumerate != 1) 3415 if (misc_register(&ctl_dev) < 0) 3416 pr_err("%s can't register misc device [minor=%d]\n", 3417 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 3418 3419 /* Don't register mpt3ctl ioctl device if 3420 * hbas_to_enumarate is two. 3421 */ 3422 if (hbas_to_enumerate != 2) 3423 if (misc_register(&gen2_ctl_dev) < 0) 3424 pr_err("%s can't register misc device [minor=%d]\n", 3425 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 3426 3427 init_waitqueue_head(&ctl_poll_wait); 3428 } 3429 3430 /** 3431 * mpt3sas_ctl_exit - exit point for ctl 3432 * 3433 */ 3434 void 3435 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 3436 { 3437 struct MPT3SAS_ADAPTER *ioc; 3438 int i; 3439 3440 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 3441 3442 /* free memory associated to diag buffers */ 3443 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 3444 if (!ioc->diag_buffer[i]) 3445 continue; 3446 if (!(ioc->diag_buffer_status[i] & 3447 MPT3_DIAG_BUFFER_IS_REGISTERED)) 3448 continue; 3449 if ((ioc->diag_buffer_status[i] & 3450 MPT3_DIAG_BUFFER_IS_RELEASED)) 3451 continue; 3452 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3453 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3454 ioc->diag_buffer[i] = NULL; 3455 ioc->diag_buffer_status[i] = 0; 3456 } 3457 3458 kfree(ioc->event_log); 3459 } 3460 if (hbas_to_enumerate != 1) 3461 misc_deregister(&ctl_dev); 3462 if (hbas_to_enumerate != 2) 3463 misc_deregister(&gen2_ctl_dev); 3464 } 3465