1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_display_some_debug - debug routine 83 * @ioc: per adapter object 84 * @smid: system request message index 85 * @calling_function_name: string pass from calling function 86 * @mpi_reply: reply message frame 87 * Context: none. 88 * 89 * Function for displaying debug info helpful when debugging issues 90 * in this module. 91 */ 92 static void 93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 95 { 96 Mpi2ConfigRequest_t *mpi_request; 97 char *desc = NULL; 98 99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 100 return; 101 102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 103 switch (mpi_request->Function) { 104 case MPI2_FUNCTION_SCSI_IO_REQUEST: 105 { 106 Mpi2SCSIIORequest_t *scsi_request = 107 (Mpi2SCSIIORequest_t *)mpi_request; 108 109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 110 "scsi_io, cmd(0x%02x), cdb_len(%d)", 111 scsi_request->CDB.CDB32[0], 112 le16_to_cpu(scsi_request->IoFlags) & 0xF); 113 desc = ioc->tmp_string; 114 break; 115 } 116 case MPI2_FUNCTION_SCSI_TASK_MGMT: 117 desc = "task_mgmt"; 118 break; 119 case MPI2_FUNCTION_IOC_INIT: 120 desc = "ioc_init"; 121 break; 122 case MPI2_FUNCTION_IOC_FACTS: 123 desc = "ioc_facts"; 124 break; 125 case MPI2_FUNCTION_CONFIG: 126 { 127 Mpi2ConfigRequest_t *config_request = 128 (Mpi2ConfigRequest_t *)mpi_request; 129 130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 131 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 132 (config_request->Header.PageType & 133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 134 config_request->Header.PageNumber); 135 desc = ioc->tmp_string; 136 break; 137 } 138 case MPI2_FUNCTION_PORT_FACTS: 139 desc = "port_facts"; 140 break; 141 case MPI2_FUNCTION_PORT_ENABLE: 142 desc = "port_enable"; 143 break; 144 case MPI2_FUNCTION_EVENT_NOTIFICATION: 145 desc = "event_notification"; 146 break; 147 case MPI2_FUNCTION_FW_DOWNLOAD: 148 desc = "fw_download"; 149 break; 150 case MPI2_FUNCTION_FW_UPLOAD: 151 desc = "fw_upload"; 152 break; 153 case MPI2_FUNCTION_RAID_ACTION: 154 desc = "raid_action"; 155 break; 156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 157 { 158 Mpi2SCSIIORequest_t *scsi_request = 159 (Mpi2SCSIIORequest_t *)mpi_request; 160 161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 162 "raid_pass, cmd(0x%02x), cdb_len(%d)", 163 scsi_request->CDB.CDB32[0], 164 le16_to_cpu(scsi_request->IoFlags) & 0xF); 165 desc = ioc->tmp_string; 166 break; 167 } 168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 169 desc = "sas_iounit_cntl"; 170 break; 171 case MPI2_FUNCTION_SATA_PASSTHROUGH: 172 desc = "sata_pass"; 173 break; 174 case MPI2_FUNCTION_DIAG_BUFFER_POST: 175 desc = "diag_buffer_post"; 176 break; 177 case MPI2_FUNCTION_DIAG_RELEASE: 178 desc = "diag_release"; 179 break; 180 case MPI2_FUNCTION_SMP_PASSTHROUGH: 181 desc = "smp_passthrough"; 182 break; 183 } 184 185 if (!desc) 186 return; 187 188 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 189 ioc->name, calling_function_name, desc, smid); 190 191 if (!mpi_reply) 192 return; 193 194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 195 pr_info(MPT3SAS_FMT 196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 198 le32_to_cpu(mpi_reply->IOCLogInfo)); 199 200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 201 mpi_request->Function == 202 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 203 Mpi2SCSIIOReply_t *scsi_reply = 204 (Mpi2SCSIIOReply_t *)mpi_reply; 205 struct _sas_device *sas_device = NULL; 206 struct _pcie_device *pcie_device = NULL; 207 208 sas_device = mpt3sas_get_sdev_by_handle(ioc, 209 le16_to_cpu(scsi_reply->DevHandle)); 210 if (sas_device) { 211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 212 ioc->name, (unsigned long long) 213 sas_device->sas_address, sas_device->phy); 214 pr_warn(MPT3SAS_FMT 215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 216 ioc->name, (unsigned long long) 217 sas_device->enclosure_logical_id, sas_device->slot); 218 sas_device_put(sas_device); 219 } 220 if (!sas_device) { 221 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 222 le16_to_cpu(scsi_reply->DevHandle)); 223 if (pcie_device) { 224 pr_warn(MPT3SAS_FMT 225 "\tWWID(0x%016llx), port(%d)\n", ioc->name, 226 (unsigned long long)pcie_device->wwid, 227 pcie_device->port_num); 228 if (pcie_device->enclosure_handle != 0) 229 pr_warn(MPT3SAS_FMT 230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 231 ioc->name, (unsigned long long) 232 pcie_device->enclosure_logical_id, 233 pcie_device->slot); 234 pcie_device_put(pcie_device); 235 } 236 } 237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 238 pr_info(MPT3SAS_FMT 239 "\tscsi_state(0x%02x), scsi_status" 240 "(0x%02x)\n", ioc->name, 241 scsi_reply->SCSIState, 242 scsi_reply->SCSIStatus); 243 } 244 } 245 246 /** 247 * mpt3sas_ctl_done - ctl module completion routine 248 * @ioc: per adapter object 249 * @smid: system request message index 250 * @msix_index: MSIX table index supplied by the OS 251 * @reply: reply message frame(lower 32bit addr) 252 * Context: none. 253 * 254 * The callback handler when using ioc->ctl_cb_idx. 255 * 256 * Return 1 meaning mf should be freed from _base_interrupt 257 * 0 means the mf is freed from this function. 258 */ 259 u8 260 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 261 u32 reply) 262 { 263 MPI2DefaultReply_t *mpi_reply; 264 Mpi2SCSIIOReply_t *scsiio_reply; 265 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply; 266 const void *sense_data; 267 u32 sz; 268 269 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 270 return 1; 271 if (ioc->ctl_cmds.smid != smid) 272 return 1; 273 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 274 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 275 if (mpi_reply) { 276 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 277 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 278 /* get sense data */ 279 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 280 mpi_reply->Function == 281 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 282 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 283 if (scsiio_reply->SCSIState & 284 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 285 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 286 le32_to_cpu(scsiio_reply->SenseCount)); 287 sense_data = mpt3sas_base_get_sense_buffer(ioc, 288 smid); 289 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 290 } 291 } 292 /* 293 * Get Error Response data for NVMe device. The ctl_cmds.sense 294 * buffer is used to store the Error Response data. 295 */ 296 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 297 nvme_error_reply = 298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; 299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, 300 le32_to_cpu(nvme_error_reply->ErrorResponseCount)); 301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); 302 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 303 } 304 } 305 306 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 307 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 308 complete(&ioc->ctl_cmds.done); 309 return 1; 310 } 311 312 /** 313 * _ctl_check_event_type - determines when an event needs logging 314 * @ioc: per adapter object 315 * @event: firmware event 316 * 317 * The bitmask in ioc->event_type[] indicates which events should be 318 * be saved in the driver event_log. This bitmask is set by application. 319 * 320 * Returns 1 when event should be captured, or zero means no match. 321 */ 322 static int 323 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 324 { 325 u16 i; 326 u32 desired_event; 327 328 if (event >= 128 || !event || !ioc->event_log) 329 return 0; 330 331 desired_event = (1 << (event % 32)); 332 if (!desired_event) 333 desired_event = 1; 334 i = event / 32; 335 return desired_event & ioc->event_type[i]; 336 } 337 338 /** 339 * mpt3sas_ctl_add_to_event_log - add event 340 * @ioc: per adapter object 341 * @mpi_reply: reply message frame 342 * 343 * Return nothing. 344 */ 345 void 346 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 347 Mpi2EventNotificationReply_t *mpi_reply) 348 { 349 struct MPT3_IOCTL_EVENTS *event_log; 350 u16 event; 351 int i; 352 u32 sz, event_data_sz; 353 u8 send_aen = 0; 354 355 if (!ioc->event_log) 356 return; 357 358 event = le16_to_cpu(mpi_reply->Event); 359 360 if (_ctl_check_event_type(ioc, event)) { 361 362 /* insert entry into circular event_log */ 363 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 364 event_log = ioc->event_log; 365 event_log[i].event = event; 366 event_log[i].context = ioc->event_context++; 367 368 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 369 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 370 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 371 memcpy(event_log[i].data, mpi_reply->EventData, sz); 372 send_aen = 1; 373 } 374 375 /* This aen_event_read_flag flag is set until the 376 * application has read the event log. 377 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 378 */ 379 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 380 (send_aen && !ioc->aen_event_read_flag)) { 381 ioc->aen_event_read_flag = 1; 382 wake_up_interruptible(&ctl_poll_wait); 383 if (async_queue) 384 kill_fasync(&async_queue, SIGIO, POLL_IN); 385 } 386 } 387 388 /** 389 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 390 * @ioc: per adapter object 391 * @msix_index: MSIX table index supplied by the OS 392 * @reply: reply message frame(lower 32bit addr) 393 * Context: interrupt. 394 * 395 * This function merely adds a new work task into ioc->firmware_event_thread. 396 * The tasks are worked from _firmware_event_work in user context. 397 * 398 * Return 1 meaning mf should be freed from _base_interrupt 399 * 0 means the mf is freed from this function. 400 */ 401 u8 402 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 403 u32 reply) 404 { 405 Mpi2EventNotificationReply_t *mpi_reply; 406 407 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 408 if (mpi_reply) 409 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 410 return 1; 411 } 412 413 /** 414 * _ctl_verify_adapter - validates ioc_number passed from application 415 * @ioc: per adapter object 416 * @iocpp: The ioc pointer is returned in this. 417 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 418 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 419 * 420 * Return (-1) means error, else ioc_number. 421 */ 422 static int 423 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 424 int mpi_version) 425 { 426 struct MPT3SAS_ADAPTER *ioc; 427 int version = 0; 428 /* global ioc lock to protect controller on list operations */ 429 spin_lock(&gioc_lock); 430 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 431 if (ioc->id != ioc_number) 432 continue; 433 /* Check whether this ioctl command is from right 434 * ioctl device or not, if not continue the search. 435 */ 436 version = ioc->hba_mpi_version_belonged; 437 /* MPI25_VERSION and MPI26_VERSION uses same ioctl 438 * device. 439 */ 440 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { 441 if ((version == MPI25_VERSION) || 442 (version == MPI26_VERSION)) 443 goto out; 444 else 445 continue; 446 } else { 447 if (version != mpi_version) 448 continue; 449 } 450 out: 451 spin_unlock(&gioc_lock); 452 *iocpp = ioc; 453 return ioc_number; 454 } 455 spin_unlock(&gioc_lock); 456 *iocpp = NULL; 457 return -1; 458 } 459 460 /** 461 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) 462 * @ioc: per adapter object 463 * @reset_phase: phase 464 * 465 * The handler for doing any required cleanup or initialization. 466 * 467 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 468 * MPT3_IOC_DONE_RESET 469 */ 470 void 471 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 472 { 473 int i; 474 u8 issue_reset; 475 476 switch (reset_phase) { 477 case MPT3_IOC_PRE_RESET: 478 dtmprintk(ioc, pr_info(MPT3SAS_FMT 479 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 480 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 481 if (!(ioc->diag_buffer_status[i] & 482 MPT3_DIAG_BUFFER_IS_REGISTERED)) 483 continue; 484 if ((ioc->diag_buffer_status[i] & 485 MPT3_DIAG_BUFFER_IS_RELEASED)) 486 continue; 487 mpt3sas_send_diag_release(ioc, i, &issue_reset); 488 } 489 break; 490 case MPT3_IOC_AFTER_RESET: 491 dtmprintk(ioc, pr_info(MPT3SAS_FMT 492 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 493 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 494 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 495 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 496 complete(&ioc->ctl_cmds.done); 497 } 498 break; 499 case MPT3_IOC_DONE_RESET: 500 dtmprintk(ioc, pr_info(MPT3SAS_FMT 501 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 502 503 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 504 if (!(ioc->diag_buffer_status[i] & 505 MPT3_DIAG_BUFFER_IS_REGISTERED)) 506 continue; 507 if ((ioc->diag_buffer_status[i] & 508 MPT3_DIAG_BUFFER_IS_RELEASED)) 509 continue; 510 ioc->diag_buffer_status[i] |= 511 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 512 } 513 break; 514 } 515 } 516 517 /** 518 * _ctl_fasync - 519 * @fd - 520 * @filep - 521 * @mode - 522 * 523 * Called when application request fasyn callback handler. 524 */ 525 static int 526 _ctl_fasync(int fd, struct file *filep, int mode) 527 { 528 return fasync_helper(fd, filep, mode, &async_queue); 529 } 530 531 /** 532 * _ctl_poll - 533 * @file - 534 * @wait - 535 * 536 */ 537 static unsigned int 538 _ctl_poll(struct file *filep, poll_table *wait) 539 { 540 struct MPT3SAS_ADAPTER *ioc; 541 542 poll_wait(filep, &ctl_poll_wait, wait); 543 544 /* global ioc lock to protect controller on list operations */ 545 spin_lock(&gioc_lock); 546 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 547 if (ioc->aen_event_read_flag) { 548 spin_unlock(&gioc_lock); 549 return POLLIN | POLLRDNORM; 550 } 551 } 552 spin_unlock(&gioc_lock); 553 return 0; 554 } 555 556 /** 557 * _ctl_set_task_mid - assign an active smid to tm request 558 * @ioc: per adapter object 559 * @karg - (struct mpt3_ioctl_command) 560 * @tm_request - pointer to mf from user space 561 * 562 * Returns 0 when an smid if found, else fail. 563 * during failure, the reply frame is filled. 564 */ 565 static int 566 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 567 Mpi2SCSITaskManagementRequest_t *tm_request) 568 { 569 u8 found = 0; 570 u16 i; 571 u16 handle; 572 struct scsi_cmnd *scmd; 573 struct MPT3SAS_DEVICE *priv_data; 574 unsigned long flags; 575 Mpi2SCSITaskManagementReply_t *tm_reply; 576 u32 sz; 577 u32 lun; 578 char *desc = NULL; 579 580 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 581 desc = "abort_task"; 582 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 583 desc = "query_task"; 584 else 585 return 0; 586 587 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 588 589 handle = le16_to_cpu(tm_request->DevHandle); 590 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 591 for (i = ioc->scsiio_depth; i && !found; i--) { 592 scmd = ioc->scsi_lookup[i - 1].scmd; 593 if (scmd == NULL || scmd->device == NULL || 594 scmd->device->hostdata == NULL) 595 continue; 596 if (lun != scmd->device->lun) 597 continue; 598 priv_data = scmd->device->hostdata; 599 if (priv_data->sas_target == NULL) 600 continue; 601 if (priv_data->sas_target->handle != handle) 602 continue; 603 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); 604 found = 1; 605 } 606 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 607 608 if (!found) { 609 dctlprintk(ioc, pr_info(MPT3SAS_FMT 610 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 611 ioc->name, 612 desc, le16_to_cpu(tm_request->DevHandle), lun)); 613 tm_reply = ioc->ctl_cmds.reply; 614 tm_reply->DevHandle = tm_request->DevHandle; 615 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 616 tm_reply->TaskType = tm_request->TaskType; 617 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 618 tm_reply->VP_ID = tm_request->VP_ID; 619 tm_reply->VF_ID = tm_request->VF_ID; 620 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 621 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 622 sz)) 623 pr_err("failure at %s:%d/%s()!\n", __FILE__, 624 __LINE__, __func__); 625 return 1; 626 } 627 628 dctlprintk(ioc, pr_info(MPT3SAS_FMT 629 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 630 desc, le16_to_cpu(tm_request->DevHandle), lun, 631 le16_to_cpu(tm_request->TaskMID))); 632 return 0; 633 } 634 635 /** 636 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 637 * @ioc: per adapter object 638 * @karg - (struct mpt3_ioctl_command) 639 * @mf - pointer to mf in user space 640 */ 641 static long 642 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 643 void __user *mf) 644 { 645 MPI2RequestHeader_t *mpi_request = NULL, *request; 646 MPI2DefaultReply_t *mpi_reply; 647 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 648 u32 ioc_state; 649 u16 smid; 650 unsigned long timeout; 651 u8 issue_reset; 652 u32 sz, sz_arg; 653 void *psge; 654 void *data_out = NULL; 655 dma_addr_t data_out_dma = 0; 656 size_t data_out_sz = 0; 657 void *data_in = NULL; 658 dma_addr_t data_in_dma = 0; 659 size_t data_in_sz = 0; 660 long ret; 661 u16 wait_state_count; 662 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 663 664 issue_reset = 0; 665 666 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 667 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 668 ioc->name, __func__); 669 ret = -EAGAIN; 670 goto out; 671 } 672 673 wait_state_count = 0; 674 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 675 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 676 if (wait_state_count++ == 10) { 677 pr_err(MPT3SAS_FMT 678 "%s: failed due to ioc not operational\n", 679 ioc->name, __func__); 680 ret = -EFAULT; 681 goto out; 682 } 683 ssleep(1); 684 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 685 pr_info(MPT3SAS_FMT 686 "%s: waiting for operational state(count=%d)\n", 687 ioc->name, 688 __func__, wait_state_count); 689 } 690 if (wait_state_count) 691 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 692 ioc->name, __func__); 693 694 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 695 if (!mpi_request) { 696 pr_err(MPT3SAS_FMT 697 "%s: failed obtaining a memory for mpi_request\n", 698 ioc->name, __func__); 699 ret = -ENOMEM; 700 goto out; 701 } 702 703 /* Check for overflow and wraparound */ 704 if (karg.data_sge_offset * 4 > ioc->request_sz || 705 karg.data_sge_offset > (UINT_MAX / 4)) { 706 ret = -EINVAL; 707 goto out; 708 } 709 710 /* copy in request message frame from user */ 711 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 712 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 713 __func__); 714 ret = -EFAULT; 715 goto out; 716 } 717 718 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 719 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 720 if (!smid) { 721 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 722 ioc->name, __func__); 723 ret = -EAGAIN; 724 goto out; 725 } 726 } else { 727 728 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 729 if (!smid) { 730 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 731 ioc->name, __func__); 732 ret = -EAGAIN; 733 goto out; 734 } 735 } 736 737 ret = 0; 738 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 739 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 740 request = mpt3sas_base_get_msg_frame(ioc, smid); 741 memcpy(request, mpi_request, karg.data_sge_offset*4); 742 ioc->ctl_cmds.smid = smid; 743 data_out_sz = karg.data_out_size; 744 data_in_sz = karg.data_in_size; 745 746 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 747 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 748 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || 749 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || 750 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 751 752 device_handle = le16_to_cpu(mpi_request->FunctionDependent1); 753 if (!device_handle || (device_handle > 754 ioc->facts.MaxDevHandle)) { 755 ret = -EINVAL; 756 mpt3sas_base_free_smid(ioc, smid); 757 goto out; 758 } 759 } 760 761 /* obtain dma-able memory for data transfer */ 762 if (data_out_sz) /* WRITE */ { 763 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 764 &data_out_dma); 765 if (!data_out) { 766 pr_err("failure at %s:%d/%s()!\n", __FILE__, 767 __LINE__, __func__); 768 ret = -ENOMEM; 769 mpt3sas_base_free_smid(ioc, smid); 770 goto out; 771 } 772 if (copy_from_user(data_out, karg.data_out_buf_ptr, 773 data_out_sz)) { 774 pr_err("failure at %s:%d/%s()!\n", __FILE__, 775 __LINE__, __func__); 776 ret = -EFAULT; 777 mpt3sas_base_free_smid(ioc, smid); 778 goto out; 779 } 780 } 781 782 if (data_in_sz) /* READ */ { 783 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 784 &data_in_dma); 785 if (!data_in) { 786 pr_err("failure at %s:%d/%s()!\n", __FILE__, 787 __LINE__, __func__); 788 ret = -ENOMEM; 789 mpt3sas_base_free_smid(ioc, smid); 790 goto out; 791 } 792 } 793 794 psge = (void *)request + (karg.data_sge_offset*4); 795 796 /* send command to firmware */ 797 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 798 799 init_completion(&ioc->ctl_cmds.done); 800 switch (mpi_request->Function) { 801 case MPI2_FUNCTION_NVME_ENCAPSULATED: 802 { 803 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; 804 /* 805 * Get the Physical Address of the sense buffer. 806 * Use Error Response buffer address field to hold the sense 807 * buffer address. 808 * Clear the internal sense buffer, which will potentially hold 809 * the Completion Queue Entry on return, or 0 if no Entry. 810 * Build the PRPs and set direction bits. 811 * Send the request. 812 */ 813 nvme_encap_request->ErrorResponseBaseAddress = ioc->sense_dma & 814 0xFFFFFFFF00000000; 815 nvme_encap_request->ErrorResponseBaseAddress |= 816 (U64)mpt3sas_base_get_sense_buffer_dma(ioc, smid); 817 nvme_encap_request->ErrorResponseAllocationLength = 818 NVME_ERROR_RESPONSE_SIZE; 819 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); 820 ioc->build_nvme_prp(ioc, smid, nvme_encap_request, 821 data_out_dma, data_out_sz, data_in_dma, data_in_sz); 822 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 823 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :" 824 "ioctl failed due to device removal in progress\n", 825 ioc->name, device_handle)); 826 mpt3sas_base_free_smid(ioc, smid); 827 ret = -EINVAL; 828 goto out; 829 } 830 ioc->put_smid_nvme_encap(ioc, smid); 831 break; 832 } 833 case MPI2_FUNCTION_SCSI_IO_REQUEST: 834 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 835 { 836 Mpi2SCSIIORequest_t *scsiio_request = 837 (Mpi2SCSIIORequest_t *)request; 838 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 839 scsiio_request->SenseBufferLowAddress = 840 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 841 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 842 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 843 dtmprintk(ioc, pr_info(MPT3SAS_FMT 844 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 845 ioc->name, device_handle)); 846 mpt3sas_base_free_smid(ioc, smid); 847 ret = -EINVAL; 848 goto out; 849 } 850 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 851 data_in_dma, data_in_sz); 852 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 853 ioc->put_smid_scsi_io(ioc, smid, device_handle); 854 else 855 ioc->put_smid_default(ioc, smid); 856 break; 857 } 858 case MPI2_FUNCTION_SCSI_TASK_MGMT: 859 { 860 Mpi2SCSITaskManagementRequest_t *tm_request = 861 (Mpi2SCSITaskManagementRequest_t *)request; 862 863 dtmprintk(ioc, pr_info(MPT3SAS_FMT 864 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 865 ioc->name, 866 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 867 ioc->got_task_abort_from_ioctl = 1; 868 if (tm_request->TaskType == 869 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 870 tm_request->TaskType == 871 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 872 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 873 mpt3sas_base_free_smid(ioc, smid); 874 ioc->got_task_abort_from_ioctl = 0; 875 goto out; 876 } 877 } 878 ioc->got_task_abort_from_ioctl = 0; 879 880 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 881 dtmprintk(ioc, pr_info(MPT3SAS_FMT 882 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 883 ioc->name, device_handle)); 884 mpt3sas_base_free_smid(ioc, smid); 885 ret = -EINVAL; 886 goto out; 887 } 888 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 889 tm_request->DevHandle)); 890 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 891 data_in_dma, data_in_sz); 892 ioc->put_smid_hi_priority(ioc, smid, 0); 893 break; 894 } 895 case MPI2_FUNCTION_SMP_PASSTHROUGH: 896 { 897 Mpi2SmpPassthroughRequest_t *smp_request = 898 (Mpi2SmpPassthroughRequest_t *)mpi_request; 899 u8 *data; 900 901 /* ioc determines which port to use */ 902 smp_request->PhysicalPort = 0xFF; 903 if (smp_request->PassthroughFlags & 904 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 905 data = (u8 *)&smp_request->SGL; 906 else { 907 if (unlikely(data_out == NULL)) { 908 pr_err("failure at %s:%d/%s()!\n", 909 __FILE__, __LINE__, __func__); 910 mpt3sas_base_free_smid(ioc, smid); 911 ret = -EINVAL; 912 goto out; 913 } 914 data = data_out; 915 } 916 917 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 918 ioc->ioc_link_reset_in_progress = 1; 919 ioc->ignore_loginfos = 1; 920 } 921 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 922 data_in_sz); 923 ioc->put_smid_default(ioc, smid); 924 break; 925 } 926 case MPI2_FUNCTION_SATA_PASSTHROUGH: 927 { 928 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 929 dtmprintk(ioc, pr_info(MPT3SAS_FMT 930 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 931 ioc->name, device_handle)); 932 mpt3sas_base_free_smid(ioc, smid); 933 ret = -EINVAL; 934 goto out; 935 } 936 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 937 data_in_sz); 938 ioc->put_smid_default(ioc, smid); 939 break; 940 } 941 case MPI2_FUNCTION_FW_DOWNLOAD: 942 case MPI2_FUNCTION_FW_UPLOAD: 943 { 944 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 945 data_in_sz); 946 ioc->put_smid_default(ioc, smid); 947 break; 948 } 949 case MPI2_FUNCTION_TOOLBOX: 950 { 951 Mpi2ToolboxCleanRequest_t *toolbox_request = 952 (Mpi2ToolboxCleanRequest_t *)mpi_request; 953 954 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 955 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 956 data_in_dma, data_in_sz); 957 } else { 958 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 959 data_in_dma, data_in_sz); 960 } 961 ioc->put_smid_default(ioc, smid); 962 break; 963 } 964 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 965 { 966 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 967 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 968 969 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 970 || sasiounit_request->Operation == 971 MPI2_SAS_OP_PHY_LINK_RESET) { 972 ioc->ioc_link_reset_in_progress = 1; 973 ioc->ignore_loginfos = 1; 974 } 975 /* drop to default case for posting the request */ 976 } 977 default: 978 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 979 data_in_dma, data_in_sz); 980 ioc->put_smid_default(ioc, smid); 981 break; 982 } 983 984 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 985 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 986 else 987 timeout = karg.timeout; 988 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); 989 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 990 Mpi2SCSITaskManagementRequest_t *tm_request = 991 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 992 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 993 tm_request->DevHandle)); 994 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 995 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 996 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 997 ioc->ioc_link_reset_in_progress) { 998 ioc->ioc_link_reset_in_progress = 0; 999 ioc->ignore_loginfos = 0; 1000 } 1001 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1002 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1003 __func__); 1004 _debug_dump_mf(mpi_request, karg.data_sge_offset); 1005 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1006 issue_reset = 1; 1007 goto issue_host_reset; 1008 } 1009 1010 mpi_reply = ioc->ctl_cmds.reply; 1011 1012 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 1013 (ioc->logging_level & MPT_DEBUG_TM)) { 1014 Mpi2SCSITaskManagementReply_t *tm_reply = 1015 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 1016 1017 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 1018 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 1019 "TerminationCount(0x%08x)\n", ioc->name, 1020 le16_to_cpu(tm_reply->IOCStatus), 1021 le32_to_cpu(tm_reply->IOCLogInfo), 1022 le32_to_cpu(tm_reply->TerminationCount)); 1023 } 1024 1025 /* copy out xdata to user */ 1026 if (data_in_sz) { 1027 if (copy_to_user(karg.data_in_buf_ptr, data_in, 1028 data_in_sz)) { 1029 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1030 __LINE__, __func__); 1031 ret = -ENODATA; 1032 goto out; 1033 } 1034 } 1035 1036 /* copy out reply message frame to user */ 1037 if (karg.max_reply_bytes) { 1038 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 1039 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 1040 sz)) { 1041 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1042 __LINE__, __func__); 1043 ret = -ENODATA; 1044 goto out; 1045 } 1046 } 1047 1048 /* copy out sense/NVMe Error Response to user */ 1049 if (karg.max_sense_bytes && (mpi_request->Function == 1050 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 1051 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == 1052 MPI2_FUNCTION_NVME_ENCAPSULATED)) { 1053 if (karg.sense_data_ptr == NULL) { 1054 pr_info(MPT3SAS_FMT "Response buffer provided" 1055 " by application is NULL; Response data will" 1056 " not be returned.\n", ioc->name); 1057 goto out; 1058 } 1059 sz_arg = (mpi_request->Function == 1060 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : 1061 SCSI_SENSE_BUFFERSIZE; 1062 sz = min_t(u32, karg.max_sense_bytes, sz_arg); 1063 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 1064 sz)) { 1065 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1066 __LINE__, __func__); 1067 ret = -ENODATA; 1068 goto out; 1069 } 1070 } 1071 1072 issue_host_reset: 1073 if (issue_reset) { 1074 ret = -ENODATA; 1075 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 1076 mpi_request->Function == 1077 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 1078 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 1079 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 1080 ioc->name, 1081 le16_to_cpu(mpi_request->FunctionDependent1)); 1082 mpt3sas_halt_firmware(ioc); 1083 mpt3sas_scsih_issue_locked_tm(ioc, 1084 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 1085 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30); 1086 } else 1087 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1088 } 1089 1090 out: 1091 1092 /* free memory associated with sg buffers */ 1093 if (data_in) 1094 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1095 data_in_dma); 1096 1097 if (data_out) 1098 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1099 data_out_dma); 1100 1101 kfree(mpi_request); 1102 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1103 return ret; 1104 } 1105 1106 /** 1107 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1108 * @ioc: per adapter object 1109 * @arg - user space buffer containing ioctl content 1110 */ 1111 static long 1112 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1113 { 1114 struct mpt3_ioctl_iocinfo karg; 1115 1116 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1117 __func__)); 1118 1119 memset(&karg, 0 , sizeof(karg)); 1120 if (ioc->pfacts) 1121 karg.port_number = ioc->pfacts[0].PortNumber; 1122 karg.hw_rev = ioc->pdev->revision; 1123 karg.pci_id = ioc->pdev->device; 1124 karg.subsystem_device = ioc->pdev->subsystem_device; 1125 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1126 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1127 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1128 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1129 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1130 karg.firmware_version = ioc->facts.FWVersion.Word; 1131 strcpy(karg.driver_version, ioc->driver_name); 1132 strcat(karg.driver_version, "-"); 1133 switch (ioc->hba_mpi_version_belonged) { 1134 case MPI2_VERSION: 1135 if (ioc->is_warpdrive) 1136 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1137 else 1138 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1139 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1140 break; 1141 case MPI25_VERSION: 1142 case MPI26_VERSION: 1143 if (ioc->is_gen35_ioc) 1144 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; 1145 else 1146 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1147 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1148 break; 1149 } 1150 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1151 1152 if (copy_to_user(arg, &karg, sizeof(karg))) { 1153 pr_err("failure at %s:%d/%s()!\n", 1154 __FILE__, __LINE__, __func__); 1155 return -EFAULT; 1156 } 1157 return 0; 1158 } 1159 1160 /** 1161 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1162 * @ioc: per adapter object 1163 * @arg - user space buffer containing ioctl content 1164 */ 1165 static long 1166 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1167 { 1168 struct mpt3_ioctl_eventquery karg; 1169 1170 if (copy_from_user(&karg, arg, sizeof(karg))) { 1171 pr_err("failure at %s:%d/%s()!\n", 1172 __FILE__, __LINE__, __func__); 1173 return -EFAULT; 1174 } 1175 1176 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1177 __func__)); 1178 1179 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1180 memcpy(karg.event_types, ioc->event_type, 1181 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1182 1183 if (copy_to_user(arg, &karg, sizeof(karg))) { 1184 pr_err("failure at %s:%d/%s()!\n", 1185 __FILE__, __LINE__, __func__); 1186 return -EFAULT; 1187 } 1188 return 0; 1189 } 1190 1191 /** 1192 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1193 * @ioc: per adapter object 1194 * @arg - user space buffer containing ioctl content 1195 */ 1196 static long 1197 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1198 { 1199 struct mpt3_ioctl_eventenable karg; 1200 1201 if (copy_from_user(&karg, arg, sizeof(karg))) { 1202 pr_err("failure at %s:%d/%s()!\n", 1203 __FILE__, __LINE__, __func__); 1204 return -EFAULT; 1205 } 1206 1207 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1208 __func__)); 1209 1210 memcpy(ioc->event_type, karg.event_types, 1211 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1212 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1213 1214 if (ioc->event_log) 1215 return 0; 1216 /* initialize event_log */ 1217 ioc->event_context = 0; 1218 ioc->aen_event_read_flag = 0; 1219 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1220 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1221 if (!ioc->event_log) { 1222 pr_err("failure at %s:%d/%s()!\n", 1223 __FILE__, __LINE__, __func__); 1224 return -ENOMEM; 1225 } 1226 return 0; 1227 } 1228 1229 /** 1230 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1231 * @ioc: per adapter object 1232 * @arg - user space buffer containing ioctl content 1233 */ 1234 static long 1235 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1236 { 1237 struct mpt3_ioctl_eventreport karg; 1238 u32 number_bytes, max_events, max; 1239 struct mpt3_ioctl_eventreport __user *uarg = arg; 1240 1241 if (copy_from_user(&karg, arg, sizeof(karg))) { 1242 pr_err("failure at %s:%d/%s()!\n", 1243 __FILE__, __LINE__, __func__); 1244 return -EFAULT; 1245 } 1246 1247 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1248 __func__)); 1249 1250 number_bytes = karg.hdr.max_data_size - 1251 sizeof(struct mpt3_ioctl_header); 1252 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1253 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1254 1255 /* If fewer than 1 event is requested, there must have 1256 * been some type of error. 1257 */ 1258 if (!max || !ioc->event_log) 1259 return -ENODATA; 1260 1261 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1262 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1263 pr_err("failure at %s:%d/%s()!\n", 1264 __FILE__, __LINE__, __func__); 1265 return -EFAULT; 1266 } 1267 1268 /* reset flag so SIGIO can restart */ 1269 ioc->aen_event_read_flag = 0; 1270 return 0; 1271 } 1272 1273 /** 1274 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1275 * @ioc: per adapter object 1276 * @arg - user space buffer containing ioctl content 1277 */ 1278 static long 1279 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1280 { 1281 struct mpt3_ioctl_diag_reset karg; 1282 int retval; 1283 1284 if (copy_from_user(&karg, arg, sizeof(karg))) { 1285 pr_err("failure at %s:%d/%s()!\n", 1286 __FILE__, __LINE__, __func__); 1287 return -EFAULT; 1288 } 1289 1290 if (ioc->shost_recovery || ioc->pci_error_recovery || 1291 ioc->is_driver_loading) 1292 return -EAGAIN; 1293 1294 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1295 __func__)); 1296 1297 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1298 pr_info(MPT3SAS_FMT "host reset: %s\n", 1299 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1300 return 0; 1301 } 1302 1303 /** 1304 * _ctl_btdh_search_sas_device - searching for sas device 1305 * @ioc: per adapter object 1306 * @btdh: btdh ioctl payload 1307 */ 1308 static int 1309 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1310 struct mpt3_ioctl_btdh_mapping *btdh) 1311 { 1312 struct _sas_device *sas_device; 1313 unsigned long flags; 1314 int rc = 0; 1315 1316 if (list_empty(&ioc->sas_device_list)) 1317 return rc; 1318 1319 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1320 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1321 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1322 btdh->handle == sas_device->handle) { 1323 btdh->bus = sas_device->channel; 1324 btdh->id = sas_device->id; 1325 rc = 1; 1326 goto out; 1327 } else if (btdh->bus == sas_device->channel && btdh->id == 1328 sas_device->id && btdh->handle == 0xFFFF) { 1329 btdh->handle = sas_device->handle; 1330 rc = 1; 1331 goto out; 1332 } 1333 } 1334 out: 1335 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1336 return rc; 1337 } 1338 1339 /** 1340 * _ctl_btdh_search_pcie_device - searching for pcie device 1341 * @ioc: per adapter object 1342 * @btdh: btdh ioctl payload 1343 */ 1344 static int 1345 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, 1346 struct mpt3_ioctl_btdh_mapping *btdh) 1347 { 1348 struct _pcie_device *pcie_device; 1349 unsigned long flags; 1350 int rc = 0; 1351 1352 if (list_empty(&ioc->pcie_device_list)) 1353 return rc; 1354 1355 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1356 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1357 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1358 btdh->handle == pcie_device->handle) { 1359 btdh->bus = pcie_device->channel; 1360 btdh->id = pcie_device->id; 1361 rc = 1; 1362 goto out; 1363 } else if (btdh->bus == pcie_device->channel && btdh->id == 1364 pcie_device->id && btdh->handle == 0xFFFF) { 1365 btdh->handle = pcie_device->handle; 1366 rc = 1; 1367 goto out; 1368 } 1369 } 1370 out: 1371 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1372 return rc; 1373 } 1374 1375 /** 1376 * _ctl_btdh_search_raid_device - searching for raid device 1377 * @ioc: per adapter object 1378 * @btdh: btdh ioctl payload 1379 */ 1380 static int 1381 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1382 struct mpt3_ioctl_btdh_mapping *btdh) 1383 { 1384 struct _raid_device *raid_device; 1385 unsigned long flags; 1386 int rc = 0; 1387 1388 if (list_empty(&ioc->raid_device_list)) 1389 return rc; 1390 1391 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1392 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1393 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1394 btdh->handle == raid_device->handle) { 1395 btdh->bus = raid_device->channel; 1396 btdh->id = raid_device->id; 1397 rc = 1; 1398 goto out; 1399 } else if (btdh->bus == raid_device->channel && btdh->id == 1400 raid_device->id && btdh->handle == 0xFFFF) { 1401 btdh->handle = raid_device->handle; 1402 rc = 1; 1403 goto out; 1404 } 1405 } 1406 out: 1407 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1408 return rc; 1409 } 1410 1411 /** 1412 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1413 * @ioc: per adapter object 1414 * @arg - user space buffer containing ioctl content 1415 */ 1416 static long 1417 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1418 { 1419 struct mpt3_ioctl_btdh_mapping karg; 1420 int rc; 1421 1422 if (copy_from_user(&karg, arg, sizeof(karg))) { 1423 pr_err("failure at %s:%d/%s()!\n", 1424 __FILE__, __LINE__, __func__); 1425 return -EFAULT; 1426 } 1427 1428 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1429 __func__)); 1430 1431 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1432 if (!rc) 1433 rc = _ctl_btdh_search_pcie_device(ioc, &karg); 1434 if (!rc) 1435 _ctl_btdh_search_raid_device(ioc, &karg); 1436 1437 if (copy_to_user(arg, &karg, sizeof(karg))) { 1438 pr_err("failure at %s:%d/%s()!\n", 1439 __FILE__, __LINE__, __func__); 1440 return -EFAULT; 1441 } 1442 return 0; 1443 } 1444 1445 /** 1446 * _ctl_diag_capability - return diag buffer capability 1447 * @ioc: per adapter object 1448 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1449 * 1450 * returns 1 when diag buffer support is enabled in firmware 1451 */ 1452 static u8 1453 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1454 { 1455 u8 rc = 0; 1456 1457 switch (buffer_type) { 1458 case MPI2_DIAG_BUF_TYPE_TRACE: 1459 if (ioc->facts.IOCCapabilities & 1460 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1461 rc = 1; 1462 break; 1463 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1464 if (ioc->facts.IOCCapabilities & 1465 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1466 rc = 1; 1467 break; 1468 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1469 if (ioc->facts.IOCCapabilities & 1470 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1471 rc = 1; 1472 } 1473 1474 return rc; 1475 } 1476 1477 1478 /** 1479 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1480 * @ioc: per adapter object 1481 * @diag_register: the diag_register struct passed in from user space 1482 * 1483 */ 1484 static long 1485 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1486 struct mpt3_diag_register *diag_register) 1487 { 1488 int rc, i; 1489 void *request_data = NULL; 1490 dma_addr_t request_data_dma; 1491 u32 request_data_sz = 0; 1492 Mpi2DiagBufferPostRequest_t *mpi_request; 1493 Mpi2DiagBufferPostReply_t *mpi_reply; 1494 u8 buffer_type; 1495 u16 smid; 1496 u16 ioc_status; 1497 u32 ioc_state; 1498 u8 issue_reset = 0; 1499 1500 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1501 __func__)); 1502 1503 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1504 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1505 pr_err(MPT3SAS_FMT 1506 "%s: failed due to ioc not operational\n", 1507 ioc->name, __func__); 1508 rc = -EAGAIN; 1509 goto out; 1510 } 1511 1512 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1513 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1514 ioc->name, __func__); 1515 rc = -EAGAIN; 1516 goto out; 1517 } 1518 1519 buffer_type = diag_register->buffer_type; 1520 if (!_ctl_diag_capability(ioc, buffer_type)) { 1521 pr_err(MPT3SAS_FMT 1522 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1523 ioc->name, __func__, buffer_type); 1524 return -EPERM; 1525 } 1526 1527 if (ioc->diag_buffer_status[buffer_type] & 1528 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1529 pr_err(MPT3SAS_FMT 1530 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1531 ioc->name, __func__, 1532 buffer_type); 1533 return -EINVAL; 1534 } 1535 1536 if (diag_register->requested_buffer_size % 4) { 1537 pr_err(MPT3SAS_FMT 1538 "%s: the requested_buffer_size is not 4 byte aligned\n", 1539 ioc->name, __func__); 1540 return -EINVAL; 1541 } 1542 1543 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1544 if (!smid) { 1545 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1546 ioc->name, __func__); 1547 rc = -EAGAIN; 1548 goto out; 1549 } 1550 1551 rc = 0; 1552 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1553 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1554 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1555 ioc->ctl_cmds.smid = smid; 1556 1557 request_data = ioc->diag_buffer[buffer_type]; 1558 request_data_sz = diag_register->requested_buffer_size; 1559 ioc->unique_id[buffer_type] = diag_register->unique_id; 1560 ioc->diag_buffer_status[buffer_type] = 0; 1561 memcpy(ioc->product_specific[buffer_type], 1562 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1563 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1564 1565 if (request_data) { 1566 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1567 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1568 pci_free_consistent(ioc->pdev, 1569 ioc->diag_buffer_sz[buffer_type], 1570 request_data, request_data_dma); 1571 request_data = NULL; 1572 } 1573 } 1574 1575 if (request_data == NULL) { 1576 ioc->diag_buffer_sz[buffer_type] = 0; 1577 ioc->diag_buffer_dma[buffer_type] = 0; 1578 request_data = pci_alloc_consistent( 1579 ioc->pdev, request_data_sz, &request_data_dma); 1580 if (request_data == NULL) { 1581 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1582 " for diag buffers, requested size(%d)\n", 1583 ioc->name, __func__, request_data_sz); 1584 mpt3sas_base_free_smid(ioc, smid); 1585 return -ENOMEM; 1586 } 1587 ioc->diag_buffer[buffer_type] = request_data; 1588 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1589 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1590 } 1591 1592 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1593 mpi_request->BufferType = diag_register->buffer_type; 1594 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1595 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1596 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1597 mpi_request->VF_ID = 0; /* TODO */ 1598 mpi_request->VP_ID = 0; 1599 1600 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1601 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1602 ioc->name, __func__, request_data, 1603 (unsigned long long)request_data_dma, 1604 le32_to_cpu(mpi_request->BufferLength))); 1605 1606 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1607 mpi_request->ProductSpecific[i] = 1608 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1609 1610 init_completion(&ioc->ctl_cmds.done); 1611 ioc->put_smid_default(ioc, smid); 1612 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1613 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1614 1615 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1616 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1617 __func__); 1618 _debug_dump_mf(mpi_request, 1619 sizeof(Mpi2DiagBufferPostRequest_t)/4); 1620 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1621 issue_reset = 1; 1622 goto issue_host_reset; 1623 } 1624 1625 /* process the completed Reply Message Frame */ 1626 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1627 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1628 ioc->name, __func__); 1629 rc = -EFAULT; 1630 goto out; 1631 } 1632 1633 mpi_reply = ioc->ctl_cmds.reply; 1634 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1635 1636 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1637 ioc->diag_buffer_status[buffer_type] |= 1638 MPT3_DIAG_BUFFER_IS_REGISTERED; 1639 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1640 ioc->name, __func__)); 1641 } else { 1642 pr_info(MPT3SAS_FMT 1643 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1644 ioc->name, __func__, 1645 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1646 rc = -EFAULT; 1647 } 1648 1649 issue_host_reset: 1650 if (issue_reset) 1651 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1652 1653 out: 1654 1655 if (rc && request_data) 1656 pci_free_consistent(ioc->pdev, request_data_sz, 1657 request_data, request_data_dma); 1658 1659 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1660 return rc; 1661 } 1662 1663 /** 1664 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1665 * @ioc: per adapter object 1666 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1667 * 1668 * This is called when command line option diag_buffer_enable is enabled 1669 * at driver load time. 1670 */ 1671 void 1672 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1673 { 1674 struct mpt3_diag_register diag_register; 1675 1676 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1677 1678 if (bits_to_register & 1) { 1679 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1680 ioc->name); 1681 ioc->diag_trigger_master.MasterData = 1682 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1683 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1684 /* register for 2MB buffers */ 1685 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1686 diag_register.unique_id = 0x7075900; 1687 _ctl_diag_register_2(ioc, &diag_register); 1688 } 1689 1690 if (bits_to_register & 2) { 1691 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1692 ioc->name); 1693 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1694 /* register for 2MB buffers */ 1695 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1696 diag_register.unique_id = 0x7075901; 1697 _ctl_diag_register_2(ioc, &diag_register); 1698 } 1699 1700 if (bits_to_register & 4) { 1701 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1702 ioc->name); 1703 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1704 /* register for 2MB buffers */ 1705 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1706 diag_register.unique_id = 0x7075901; 1707 _ctl_diag_register_2(ioc, &diag_register); 1708 } 1709 } 1710 1711 /** 1712 * _ctl_diag_register - application register with driver 1713 * @ioc: per adapter object 1714 * @arg - user space buffer containing ioctl content 1715 * 1716 * This will allow the driver to setup any required buffers that will be 1717 * needed by firmware to communicate with the driver. 1718 */ 1719 static long 1720 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1721 { 1722 struct mpt3_diag_register karg; 1723 long rc; 1724 1725 if (copy_from_user(&karg, arg, sizeof(karg))) { 1726 pr_err("failure at %s:%d/%s()!\n", 1727 __FILE__, __LINE__, __func__); 1728 return -EFAULT; 1729 } 1730 1731 rc = _ctl_diag_register_2(ioc, &karg); 1732 return rc; 1733 } 1734 1735 /** 1736 * _ctl_diag_unregister - application unregister with driver 1737 * @ioc: per adapter object 1738 * @arg - user space buffer containing ioctl content 1739 * 1740 * This will allow the driver to cleanup any memory allocated for diag 1741 * messages and to free up any resources. 1742 */ 1743 static long 1744 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1745 { 1746 struct mpt3_diag_unregister karg; 1747 void *request_data; 1748 dma_addr_t request_data_dma; 1749 u32 request_data_sz; 1750 u8 buffer_type; 1751 1752 if (copy_from_user(&karg, arg, sizeof(karg))) { 1753 pr_err("failure at %s:%d/%s()!\n", 1754 __FILE__, __LINE__, __func__); 1755 return -EFAULT; 1756 } 1757 1758 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1759 __func__)); 1760 1761 buffer_type = karg.unique_id & 0x000000ff; 1762 if (!_ctl_diag_capability(ioc, buffer_type)) { 1763 pr_err(MPT3SAS_FMT 1764 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1765 ioc->name, __func__, buffer_type); 1766 return -EPERM; 1767 } 1768 1769 if ((ioc->diag_buffer_status[buffer_type] & 1770 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1771 pr_err(MPT3SAS_FMT 1772 "%s: buffer_type(0x%02x) is not registered\n", 1773 ioc->name, __func__, buffer_type); 1774 return -EINVAL; 1775 } 1776 if ((ioc->diag_buffer_status[buffer_type] & 1777 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1778 pr_err(MPT3SAS_FMT 1779 "%s: buffer_type(0x%02x) has not been released\n", 1780 ioc->name, __func__, buffer_type); 1781 return -EINVAL; 1782 } 1783 1784 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1785 pr_err(MPT3SAS_FMT 1786 "%s: unique_id(0x%08x) is not registered\n", 1787 ioc->name, __func__, karg.unique_id); 1788 return -EINVAL; 1789 } 1790 1791 request_data = ioc->diag_buffer[buffer_type]; 1792 if (!request_data) { 1793 pr_err(MPT3SAS_FMT 1794 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1795 ioc->name, __func__, buffer_type); 1796 return -ENOMEM; 1797 } 1798 1799 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1800 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1801 pci_free_consistent(ioc->pdev, request_data_sz, 1802 request_data, request_data_dma); 1803 ioc->diag_buffer[buffer_type] = NULL; 1804 ioc->diag_buffer_status[buffer_type] = 0; 1805 return 0; 1806 } 1807 1808 /** 1809 * _ctl_diag_query - query relevant info associated with diag buffers 1810 * @ioc: per adapter object 1811 * @arg - user space buffer containing ioctl content 1812 * 1813 * The application will send only buffer_type and unique_id. Driver will 1814 * inspect unique_id first, if valid, fill in all the info. If unique_id is 1815 * 0x00, the driver will return info specified by Buffer Type. 1816 */ 1817 static long 1818 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1819 { 1820 struct mpt3_diag_query karg; 1821 void *request_data; 1822 int i; 1823 u8 buffer_type; 1824 1825 if (copy_from_user(&karg, arg, sizeof(karg))) { 1826 pr_err("failure at %s:%d/%s()!\n", 1827 __FILE__, __LINE__, __func__); 1828 return -EFAULT; 1829 } 1830 1831 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1832 __func__)); 1833 1834 karg.application_flags = 0; 1835 buffer_type = karg.buffer_type; 1836 1837 if (!_ctl_diag_capability(ioc, buffer_type)) { 1838 pr_err(MPT3SAS_FMT 1839 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1840 ioc->name, __func__, buffer_type); 1841 return -EPERM; 1842 } 1843 1844 if ((ioc->diag_buffer_status[buffer_type] & 1845 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1846 pr_err(MPT3SAS_FMT 1847 "%s: buffer_type(0x%02x) is not registered\n", 1848 ioc->name, __func__, buffer_type); 1849 return -EINVAL; 1850 } 1851 1852 if (karg.unique_id & 0xffffff00) { 1853 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1854 pr_err(MPT3SAS_FMT 1855 "%s: unique_id(0x%08x) is not registered\n", 1856 ioc->name, __func__, karg.unique_id); 1857 return -EINVAL; 1858 } 1859 } 1860 1861 request_data = ioc->diag_buffer[buffer_type]; 1862 if (!request_data) { 1863 pr_err(MPT3SAS_FMT 1864 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1865 ioc->name, __func__, buffer_type); 1866 return -ENOMEM; 1867 } 1868 1869 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) 1870 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1871 MPT3_APP_FLAGS_BUFFER_VALID); 1872 else 1873 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1874 MPT3_APP_FLAGS_BUFFER_VALID | 1875 MPT3_APP_FLAGS_FW_BUFFER_ACCESS); 1876 1877 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1878 karg.product_specific[i] = 1879 ioc->product_specific[buffer_type][i]; 1880 1881 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 1882 karg.driver_added_buffer_size = 0; 1883 karg.unique_id = ioc->unique_id[buffer_type]; 1884 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1885 1886 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1887 pr_err(MPT3SAS_FMT 1888 "%s: unable to write mpt3_diag_query data @ %p\n", 1889 ioc->name, __func__, arg); 1890 return -EFAULT; 1891 } 1892 return 0; 1893 } 1894 1895 /** 1896 * mpt3sas_send_diag_release - Diag Release Message 1897 * @ioc: per adapter object 1898 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED 1899 * @issue_reset - specifies whether host reset is required. 1900 * 1901 */ 1902 int 1903 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1904 u8 *issue_reset) 1905 { 1906 Mpi2DiagReleaseRequest_t *mpi_request; 1907 Mpi2DiagReleaseReply_t *mpi_reply; 1908 u16 smid; 1909 u16 ioc_status; 1910 u32 ioc_state; 1911 int rc; 1912 1913 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1914 __func__)); 1915 1916 rc = 0; 1917 *issue_reset = 0; 1918 1919 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1920 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1921 if (ioc->diag_buffer_status[buffer_type] & 1922 MPT3_DIAG_BUFFER_IS_REGISTERED) 1923 ioc->diag_buffer_status[buffer_type] |= 1924 MPT3_DIAG_BUFFER_IS_RELEASED; 1925 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1926 "%s: skipping due to FAULT state\n", ioc->name, 1927 __func__)); 1928 rc = -EAGAIN; 1929 goto out; 1930 } 1931 1932 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1933 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1934 ioc->name, __func__); 1935 rc = -EAGAIN; 1936 goto out; 1937 } 1938 1939 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1940 if (!smid) { 1941 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1942 ioc->name, __func__); 1943 rc = -EAGAIN; 1944 goto out; 1945 } 1946 1947 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1948 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1949 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1950 ioc->ctl_cmds.smid = smid; 1951 1952 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1953 mpi_request->BufferType = buffer_type; 1954 mpi_request->VF_ID = 0; /* TODO */ 1955 mpi_request->VP_ID = 0; 1956 1957 init_completion(&ioc->ctl_cmds.done); 1958 ioc->put_smid_default(ioc, smid); 1959 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1960 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1961 1962 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1963 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1964 __func__); 1965 _debug_dump_mf(mpi_request, 1966 sizeof(Mpi2DiagReleaseRequest_t)/4); 1967 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1968 *issue_reset = 1; 1969 rc = -EFAULT; 1970 goto out; 1971 } 1972 1973 /* process the completed Reply Message Frame */ 1974 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1975 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1976 ioc->name, __func__); 1977 rc = -EFAULT; 1978 goto out; 1979 } 1980 1981 mpi_reply = ioc->ctl_cmds.reply; 1982 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1983 1984 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1985 ioc->diag_buffer_status[buffer_type] |= 1986 MPT3_DIAG_BUFFER_IS_RELEASED; 1987 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1988 ioc->name, __func__)); 1989 } else { 1990 pr_info(MPT3SAS_FMT 1991 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1992 ioc->name, __func__, 1993 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1994 rc = -EFAULT; 1995 } 1996 1997 out: 1998 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1999 return rc; 2000 } 2001 2002 /** 2003 * _ctl_diag_release - request to send Diag Release Message to firmware 2004 * @arg - user space buffer containing ioctl content 2005 * 2006 * This allows ownership of the specified buffer to returned to the driver, 2007 * allowing an application to read the buffer without fear that firmware is 2008 * overwriting information in the buffer. 2009 */ 2010 static long 2011 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2012 { 2013 struct mpt3_diag_release karg; 2014 void *request_data; 2015 int rc; 2016 u8 buffer_type; 2017 u8 issue_reset = 0; 2018 2019 if (copy_from_user(&karg, arg, sizeof(karg))) { 2020 pr_err("failure at %s:%d/%s()!\n", 2021 __FILE__, __LINE__, __func__); 2022 return -EFAULT; 2023 } 2024 2025 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2026 __func__)); 2027 2028 buffer_type = karg.unique_id & 0x000000ff; 2029 if (!_ctl_diag_capability(ioc, buffer_type)) { 2030 pr_err(MPT3SAS_FMT 2031 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2032 ioc->name, __func__, buffer_type); 2033 return -EPERM; 2034 } 2035 2036 if ((ioc->diag_buffer_status[buffer_type] & 2037 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2038 pr_err(MPT3SAS_FMT 2039 "%s: buffer_type(0x%02x) is not registered\n", 2040 ioc->name, __func__, buffer_type); 2041 return -EINVAL; 2042 } 2043 2044 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2045 pr_err(MPT3SAS_FMT 2046 "%s: unique_id(0x%08x) is not registered\n", 2047 ioc->name, __func__, karg.unique_id); 2048 return -EINVAL; 2049 } 2050 2051 if (ioc->diag_buffer_status[buffer_type] & 2052 MPT3_DIAG_BUFFER_IS_RELEASED) { 2053 pr_err(MPT3SAS_FMT 2054 "%s: buffer_type(0x%02x) is already released\n", 2055 ioc->name, __func__, 2056 buffer_type); 2057 return 0; 2058 } 2059 2060 request_data = ioc->diag_buffer[buffer_type]; 2061 2062 if (!request_data) { 2063 pr_err(MPT3SAS_FMT 2064 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 2065 ioc->name, __func__, buffer_type); 2066 return -ENOMEM; 2067 } 2068 2069 /* buffers were released by due to host reset */ 2070 if ((ioc->diag_buffer_status[buffer_type] & 2071 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 2072 ioc->diag_buffer_status[buffer_type] |= 2073 MPT3_DIAG_BUFFER_IS_RELEASED; 2074 ioc->diag_buffer_status[buffer_type] &= 2075 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 2076 pr_err(MPT3SAS_FMT 2077 "%s: buffer_type(0x%02x) was released due to host reset\n", 2078 ioc->name, __func__, buffer_type); 2079 return 0; 2080 } 2081 2082 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 2083 2084 if (issue_reset) 2085 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2086 2087 return rc; 2088 } 2089 2090 /** 2091 * _ctl_diag_read_buffer - request for copy of the diag buffer 2092 * @ioc: per adapter object 2093 * @arg - user space buffer containing ioctl content 2094 */ 2095 static long 2096 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2097 { 2098 struct mpt3_diag_read_buffer karg; 2099 struct mpt3_diag_read_buffer __user *uarg = arg; 2100 void *request_data, *diag_data; 2101 Mpi2DiagBufferPostRequest_t *mpi_request; 2102 Mpi2DiagBufferPostReply_t *mpi_reply; 2103 int rc, i; 2104 u8 buffer_type; 2105 unsigned long request_size, copy_size; 2106 u16 smid; 2107 u16 ioc_status; 2108 u8 issue_reset = 0; 2109 2110 if (copy_from_user(&karg, arg, sizeof(karg))) { 2111 pr_err("failure at %s:%d/%s()!\n", 2112 __FILE__, __LINE__, __func__); 2113 return -EFAULT; 2114 } 2115 2116 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2117 __func__)); 2118 2119 buffer_type = karg.unique_id & 0x000000ff; 2120 if (!_ctl_diag_capability(ioc, buffer_type)) { 2121 pr_err(MPT3SAS_FMT 2122 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2123 ioc->name, __func__, buffer_type); 2124 return -EPERM; 2125 } 2126 2127 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2128 pr_err(MPT3SAS_FMT 2129 "%s: unique_id(0x%08x) is not registered\n", 2130 ioc->name, __func__, karg.unique_id); 2131 return -EINVAL; 2132 } 2133 2134 request_data = ioc->diag_buffer[buffer_type]; 2135 if (!request_data) { 2136 pr_err(MPT3SAS_FMT 2137 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2138 ioc->name, __func__, buffer_type); 2139 return -ENOMEM; 2140 } 2141 2142 request_size = ioc->diag_buffer_sz[buffer_type]; 2143 2144 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2145 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2146 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2147 __func__); 2148 return -EINVAL; 2149 } 2150 2151 if (karg.starting_offset > request_size) 2152 return -EINVAL; 2153 2154 diag_data = (void *)(request_data + karg.starting_offset); 2155 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2156 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2157 ioc->name, __func__, 2158 diag_data, karg.starting_offset, karg.bytes_to_read)); 2159 2160 /* Truncate data on requests that are too large */ 2161 if ((diag_data + karg.bytes_to_read < diag_data) || 2162 (diag_data + karg.bytes_to_read > request_data + request_size)) 2163 copy_size = request_size - karg.starting_offset; 2164 else 2165 copy_size = karg.bytes_to_read; 2166 2167 if (copy_to_user((void __user *)uarg->diagnostic_data, 2168 diag_data, copy_size)) { 2169 pr_err(MPT3SAS_FMT 2170 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2171 ioc->name, __func__, diag_data); 2172 return -EFAULT; 2173 } 2174 2175 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2176 return 0; 2177 2178 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2179 "%s: Reregister buffer_type(0x%02x)\n", 2180 ioc->name, __func__, buffer_type)); 2181 if ((ioc->diag_buffer_status[buffer_type] & 2182 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2183 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2184 "%s: buffer_type(0x%02x) is still registered\n", 2185 ioc->name, __func__, buffer_type)); 2186 return 0; 2187 } 2188 /* Get a free request frame and save the message context. 2189 */ 2190 2191 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2192 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2193 ioc->name, __func__); 2194 rc = -EAGAIN; 2195 goto out; 2196 } 2197 2198 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2199 if (!smid) { 2200 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2201 ioc->name, __func__); 2202 rc = -EAGAIN; 2203 goto out; 2204 } 2205 2206 rc = 0; 2207 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2208 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2209 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2210 ioc->ctl_cmds.smid = smid; 2211 2212 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2213 mpi_request->BufferType = buffer_type; 2214 mpi_request->BufferLength = 2215 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2216 mpi_request->BufferAddress = 2217 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2218 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2219 mpi_request->ProductSpecific[i] = 2220 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2221 mpi_request->VF_ID = 0; /* TODO */ 2222 mpi_request->VP_ID = 0; 2223 2224 init_completion(&ioc->ctl_cmds.done); 2225 ioc->put_smid_default(ioc, smid); 2226 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2227 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2228 2229 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2230 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 2231 __func__); 2232 _debug_dump_mf(mpi_request, 2233 sizeof(Mpi2DiagBufferPostRequest_t)/4); 2234 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 2235 issue_reset = 1; 2236 goto issue_host_reset; 2237 } 2238 2239 /* process the completed Reply Message Frame */ 2240 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2241 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2242 ioc->name, __func__); 2243 rc = -EFAULT; 2244 goto out; 2245 } 2246 2247 mpi_reply = ioc->ctl_cmds.reply; 2248 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2249 2250 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2251 ioc->diag_buffer_status[buffer_type] |= 2252 MPT3_DIAG_BUFFER_IS_REGISTERED; 2253 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2254 ioc->name, __func__)); 2255 } else { 2256 pr_info(MPT3SAS_FMT 2257 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2258 ioc->name, __func__, 2259 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2260 rc = -EFAULT; 2261 } 2262 2263 issue_host_reset: 2264 if (issue_reset) 2265 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2266 2267 out: 2268 2269 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2270 return rc; 2271 } 2272 2273 2274 2275 #ifdef CONFIG_COMPAT 2276 /** 2277 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2278 * @ioc: per adapter object 2279 * @cmd - ioctl opcode 2280 * @arg - (struct mpt3_ioctl_command32) 2281 * 2282 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2283 */ 2284 static long 2285 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2286 void __user *arg) 2287 { 2288 struct mpt3_ioctl_command32 karg32; 2289 struct mpt3_ioctl_command32 __user *uarg; 2290 struct mpt3_ioctl_command karg; 2291 2292 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2293 return -EINVAL; 2294 2295 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2296 2297 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2298 pr_err("failure at %s:%d/%s()!\n", 2299 __FILE__, __LINE__, __func__); 2300 return -EFAULT; 2301 } 2302 2303 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2304 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2305 karg.hdr.port_number = karg32.hdr.port_number; 2306 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2307 karg.timeout = karg32.timeout; 2308 karg.max_reply_bytes = karg32.max_reply_bytes; 2309 karg.data_in_size = karg32.data_in_size; 2310 karg.data_out_size = karg32.data_out_size; 2311 karg.max_sense_bytes = karg32.max_sense_bytes; 2312 karg.data_sge_offset = karg32.data_sge_offset; 2313 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2314 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2315 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2316 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2317 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2318 } 2319 #endif 2320 2321 /** 2322 * _ctl_ioctl_main - main ioctl entry point 2323 * @file - (struct file) 2324 * @cmd - ioctl opcode 2325 * @arg - user space data buffer 2326 * @compat - handles 32 bit applications in 64bit os 2327 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2328 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 2329 */ 2330 static long 2331 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2332 u8 compat, u16 mpi_version) 2333 { 2334 struct MPT3SAS_ADAPTER *ioc; 2335 struct mpt3_ioctl_header ioctl_header; 2336 enum block_state state; 2337 long ret = -EINVAL; 2338 2339 /* get IOCTL header */ 2340 if (copy_from_user(&ioctl_header, (char __user *)arg, 2341 sizeof(struct mpt3_ioctl_header))) { 2342 pr_err("failure at %s:%d/%s()!\n", 2343 __FILE__, __LINE__, __func__); 2344 return -EFAULT; 2345 } 2346 2347 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2348 &ioc, mpi_version) == -1 || !ioc) 2349 return -ENODEV; 2350 2351 /* pci_access_mutex lock acquired by ioctl path */ 2352 mutex_lock(&ioc->pci_access_mutex); 2353 2354 if (ioc->shost_recovery || ioc->pci_error_recovery || 2355 ioc->is_driver_loading || ioc->remove_host) { 2356 ret = -EAGAIN; 2357 goto out_unlock_pciaccess; 2358 } 2359 2360 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2361 if (state == NON_BLOCKING) { 2362 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2363 ret = -EAGAIN; 2364 goto out_unlock_pciaccess; 2365 } 2366 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2367 ret = -ERESTARTSYS; 2368 goto out_unlock_pciaccess; 2369 } 2370 2371 2372 switch (cmd) { 2373 case MPT3IOCINFO: 2374 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2375 ret = _ctl_getiocinfo(ioc, arg); 2376 break; 2377 #ifdef CONFIG_COMPAT 2378 case MPT3COMMAND32: 2379 #endif 2380 case MPT3COMMAND: 2381 { 2382 struct mpt3_ioctl_command __user *uarg; 2383 struct mpt3_ioctl_command karg; 2384 2385 #ifdef CONFIG_COMPAT 2386 if (compat) { 2387 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2388 break; 2389 } 2390 #endif 2391 if (copy_from_user(&karg, arg, sizeof(karg))) { 2392 pr_err("failure at %s:%d/%s()!\n", 2393 __FILE__, __LINE__, __func__); 2394 ret = -EFAULT; 2395 break; 2396 } 2397 2398 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2399 uarg = arg; 2400 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2401 } 2402 break; 2403 } 2404 case MPT3EVENTQUERY: 2405 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2406 ret = _ctl_eventquery(ioc, arg); 2407 break; 2408 case MPT3EVENTENABLE: 2409 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2410 ret = _ctl_eventenable(ioc, arg); 2411 break; 2412 case MPT3EVENTREPORT: 2413 ret = _ctl_eventreport(ioc, arg); 2414 break; 2415 case MPT3HARDRESET: 2416 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2417 ret = _ctl_do_reset(ioc, arg); 2418 break; 2419 case MPT3BTDHMAPPING: 2420 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2421 ret = _ctl_btdh_mapping(ioc, arg); 2422 break; 2423 case MPT3DIAGREGISTER: 2424 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2425 ret = _ctl_diag_register(ioc, arg); 2426 break; 2427 case MPT3DIAGUNREGISTER: 2428 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2429 ret = _ctl_diag_unregister(ioc, arg); 2430 break; 2431 case MPT3DIAGQUERY: 2432 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2433 ret = _ctl_diag_query(ioc, arg); 2434 break; 2435 case MPT3DIAGRELEASE: 2436 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2437 ret = _ctl_diag_release(ioc, arg); 2438 break; 2439 case MPT3DIAGREADBUFFER: 2440 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2441 ret = _ctl_diag_read_buffer(ioc, arg); 2442 break; 2443 default: 2444 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2445 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2446 break; 2447 } 2448 2449 mutex_unlock(&ioc->ctl_cmds.mutex); 2450 out_unlock_pciaccess: 2451 mutex_unlock(&ioc->pci_access_mutex); 2452 return ret; 2453 } 2454 2455 /** 2456 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 2457 * @file - (struct file) 2458 * @cmd - ioctl opcode 2459 * @arg - 2460 */ 2461 static long 2462 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2463 { 2464 long ret; 2465 2466 /* pass MPI25_VERSION | MPI26_VERSION value, 2467 * to indicate that this ioctl cmd 2468 * came from mpt3ctl ioctl device. 2469 */ 2470 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, 2471 MPI25_VERSION | MPI26_VERSION); 2472 return ret; 2473 } 2474 2475 /** 2476 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 2477 * @file - (struct file) 2478 * @cmd - ioctl opcode 2479 * @arg - 2480 */ 2481 static long 2482 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2483 { 2484 long ret; 2485 2486 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 2487 * came from mpt2ctl ioctl device. 2488 */ 2489 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 2490 return ret; 2491 } 2492 #ifdef CONFIG_COMPAT 2493 /** 2494 *_ ctl_ioctl_compat - main ioctl entry point (compat) 2495 * @file - 2496 * @cmd - 2497 * @arg - 2498 * 2499 * This routine handles 32 bit applications in 64bit os. 2500 */ 2501 static long 2502 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2503 { 2504 long ret; 2505 2506 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, 2507 MPI25_VERSION | MPI26_VERSION); 2508 return ret; 2509 } 2510 2511 /** 2512 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 2513 * @file - 2514 * @cmd - 2515 * @arg - 2516 * 2517 * This routine handles 32 bit applications in 64bit os. 2518 */ 2519 static long 2520 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2521 { 2522 long ret; 2523 2524 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 2525 return ret; 2526 } 2527 #endif 2528 2529 /* scsi host attributes */ 2530 /** 2531 * _ctl_version_fw_show - firmware version 2532 * @cdev - pointer to embedded class device 2533 * @buf - the buffer returned 2534 * 2535 * A sysfs 'read-only' shost attribute. 2536 */ 2537 static ssize_t 2538 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, 2539 char *buf) 2540 { 2541 struct Scsi_Host *shost = class_to_shost(cdev); 2542 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2543 2544 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2545 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2546 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2547 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2548 ioc->facts.FWVersion.Word & 0x000000FF); 2549 } 2550 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); 2551 2552 /** 2553 * _ctl_version_bios_show - bios version 2554 * @cdev - pointer to embedded class device 2555 * @buf - the buffer returned 2556 * 2557 * A sysfs 'read-only' shost attribute. 2558 */ 2559 static ssize_t 2560 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, 2561 char *buf) 2562 { 2563 struct Scsi_Host *shost = class_to_shost(cdev); 2564 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2565 2566 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2567 2568 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2569 (version & 0xFF000000) >> 24, 2570 (version & 0x00FF0000) >> 16, 2571 (version & 0x0000FF00) >> 8, 2572 version & 0x000000FF); 2573 } 2574 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); 2575 2576 /** 2577 * _ctl_version_mpi_show - MPI (message passing interface) version 2578 * @cdev - pointer to embedded class device 2579 * @buf - the buffer returned 2580 * 2581 * A sysfs 'read-only' shost attribute. 2582 */ 2583 static ssize_t 2584 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, 2585 char *buf) 2586 { 2587 struct Scsi_Host *shost = class_to_shost(cdev); 2588 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2589 2590 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 2591 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 2592 } 2593 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); 2594 2595 /** 2596 * _ctl_version_product_show - product name 2597 * @cdev - pointer to embedded class device 2598 * @buf - the buffer returned 2599 * 2600 * A sysfs 'read-only' shost attribute. 2601 */ 2602 static ssize_t 2603 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, 2604 char *buf) 2605 { 2606 struct Scsi_Host *shost = class_to_shost(cdev); 2607 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2608 2609 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 2610 } 2611 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); 2612 2613 /** 2614 * _ctl_version_nvdata_persistent_show - ndvata persistent version 2615 * @cdev - pointer to embedded class device 2616 * @buf - the buffer returned 2617 * 2618 * A sysfs 'read-only' shost attribute. 2619 */ 2620 static ssize_t 2621 _ctl_version_nvdata_persistent_show(struct device *cdev, 2622 struct device_attribute *attr, char *buf) 2623 { 2624 struct Scsi_Host *shost = class_to_shost(cdev); 2625 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2626 2627 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2628 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2629 } 2630 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2631 _ctl_version_nvdata_persistent_show, NULL); 2632 2633 /** 2634 * _ctl_version_nvdata_default_show - nvdata default version 2635 * @cdev - pointer to embedded class device 2636 * @buf - the buffer returned 2637 * 2638 * A sysfs 'read-only' shost attribute. 2639 */ 2640 static ssize_t 2641 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute 2642 *attr, char *buf) 2643 { 2644 struct Scsi_Host *shost = class_to_shost(cdev); 2645 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2646 2647 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2648 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2649 } 2650 static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2651 _ctl_version_nvdata_default_show, NULL); 2652 2653 /** 2654 * _ctl_board_name_show - board name 2655 * @cdev - pointer to embedded class device 2656 * @buf - the buffer returned 2657 * 2658 * A sysfs 'read-only' shost attribute. 2659 */ 2660 static ssize_t 2661 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, 2662 char *buf) 2663 { 2664 struct Scsi_Host *shost = class_to_shost(cdev); 2665 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2666 2667 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 2668 } 2669 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); 2670 2671 /** 2672 * _ctl_board_assembly_show - board assembly name 2673 * @cdev - pointer to embedded class device 2674 * @buf - the buffer returned 2675 * 2676 * A sysfs 'read-only' shost attribute. 2677 */ 2678 static ssize_t 2679 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, 2680 char *buf) 2681 { 2682 struct Scsi_Host *shost = class_to_shost(cdev); 2683 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2684 2685 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 2686 } 2687 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); 2688 2689 /** 2690 * _ctl_board_tracer_show - board tracer number 2691 * @cdev - pointer to embedded class device 2692 * @buf - the buffer returned 2693 * 2694 * A sysfs 'read-only' shost attribute. 2695 */ 2696 static ssize_t 2697 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, 2698 char *buf) 2699 { 2700 struct Scsi_Host *shost = class_to_shost(cdev); 2701 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2702 2703 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 2704 } 2705 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); 2706 2707 /** 2708 * _ctl_io_delay_show - io missing delay 2709 * @cdev - pointer to embedded class device 2710 * @buf - the buffer returned 2711 * 2712 * This is for firmware implemention for deboucing device 2713 * removal events. 2714 * 2715 * A sysfs 'read-only' shost attribute. 2716 */ 2717 static ssize_t 2718 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, 2719 char *buf) 2720 { 2721 struct Scsi_Host *shost = class_to_shost(cdev); 2722 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2723 2724 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 2725 } 2726 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); 2727 2728 /** 2729 * _ctl_device_delay_show - device missing delay 2730 * @cdev - pointer to embedded class device 2731 * @buf - the buffer returned 2732 * 2733 * This is for firmware implemention for deboucing device 2734 * removal events. 2735 * 2736 * A sysfs 'read-only' shost attribute. 2737 */ 2738 static ssize_t 2739 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, 2740 char *buf) 2741 { 2742 struct Scsi_Host *shost = class_to_shost(cdev); 2743 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2744 2745 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 2746 } 2747 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); 2748 2749 /** 2750 * _ctl_fw_queue_depth_show - global credits 2751 * @cdev - pointer to embedded class device 2752 * @buf - the buffer returned 2753 * 2754 * This is firmware queue depth limit 2755 * 2756 * A sysfs 'read-only' shost attribute. 2757 */ 2758 static ssize_t 2759 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 2760 char *buf) 2761 { 2762 struct Scsi_Host *shost = class_to_shost(cdev); 2763 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2764 2765 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 2766 } 2767 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); 2768 2769 /** 2770 * _ctl_sas_address_show - sas address 2771 * @cdev - pointer to embedded class device 2772 * @buf - the buffer returned 2773 * 2774 * This is the controller sas address 2775 * 2776 * A sysfs 'read-only' shost attribute. 2777 */ 2778 static ssize_t 2779 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, 2780 char *buf) 2781 2782 { 2783 struct Scsi_Host *shost = class_to_shost(cdev); 2784 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2785 2786 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 2787 (unsigned long long)ioc->sas_hba.sas_address); 2788 } 2789 static DEVICE_ATTR(host_sas_address, S_IRUGO, 2790 _ctl_host_sas_address_show, NULL); 2791 2792 /** 2793 * _ctl_logging_level_show - logging level 2794 * @cdev - pointer to embedded class device 2795 * @buf - the buffer returned 2796 * 2797 * A sysfs 'read/write' shost attribute. 2798 */ 2799 static ssize_t 2800 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, 2801 char *buf) 2802 { 2803 struct Scsi_Host *shost = class_to_shost(cdev); 2804 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2805 2806 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 2807 } 2808 static ssize_t 2809 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, 2810 const char *buf, size_t count) 2811 { 2812 struct Scsi_Host *shost = class_to_shost(cdev); 2813 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2814 int val = 0; 2815 2816 if (sscanf(buf, "%x", &val) != 1) 2817 return -EINVAL; 2818 2819 ioc->logging_level = val; 2820 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2821 ioc->logging_level); 2822 return strlen(buf); 2823 } 2824 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2825 _ctl_logging_level_store); 2826 2827 /** 2828 * _ctl_fwfault_debug_show - show/store fwfault_debug 2829 * @cdev - pointer to embedded class device 2830 * @buf - the buffer returned 2831 * 2832 * mpt3sas_fwfault_debug is command line option 2833 * A sysfs 'read/write' shost attribute. 2834 */ 2835 static ssize_t 2836 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 2837 char *buf) 2838 { 2839 struct Scsi_Host *shost = class_to_shost(cdev); 2840 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2841 2842 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 2843 } 2844 static ssize_t 2845 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 2846 const char *buf, size_t count) 2847 { 2848 struct Scsi_Host *shost = class_to_shost(cdev); 2849 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2850 int val = 0; 2851 2852 if (sscanf(buf, "%d", &val) != 1) 2853 return -EINVAL; 2854 2855 ioc->fwfault_debug = val; 2856 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2857 ioc->fwfault_debug); 2858 return strlen(buf); 2859 } 2860 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2861 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2862 2863 /** 2864 * _ctl_ioc_reset_count_show - ioc reset count 2865 * @cdev - pointer to embedded class device 2866 * @buf - the buffer returned 2867 * 2868 * This is firmware queue depth limit 2869 * 2870 * A sysfs 'read-only' shost attribute. 2871 */ 2872 static ssize_t 2873 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 2874 char *buf) 2875 { 2876 struct Scsi_Host *shost = class_to_shost(cdev); 2877 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2878 2879 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 2880 } 2881 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); 2882 2883 /** 2884 * _ctl_ioc_reply_queue_count_show - number of reply queues 2885 * @cdev - pointer to embedded class device 2886 * @buf - the buffer returned 2887 * 2888 * This is number of reply queues 2889 * 2890 * A sysfs 'read-only' shost attribute. 2891 */ 2892 static ssize_t 2893 _ctl_ioc_reply_queue_count_show(struct device *cdev, 2894 struct device_attribute *attr, char *buf) 2895 { 2896 u8 reply_queue_count; 2897 struct Scsi_Host *shost = class_to_shost(cdev); 2898 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2899 2900 if ((ioc->facts.IOCCapabilities & 2901 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 2902 reply_queue_count = ioc->reply_queue_count; 2903 else 2904 reply_queue_count = 1; 2905 2906 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 2907 } 2908 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, 2909 NULL); 2910 2911 /** 2912 * _ctl_BRM_status_show - Backup Rail Monitor Status 2913 * @cdev - pointer to embedded class device 2914 * @buf - the buffer returned 2915 * 2916 * This is number of reply queues 2917 * 2918 * A sysfs 'read-only' shost attribute. 2919 */ 2920 static ssize_t 2921 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, 2922 char *buf) 2923 { 2924 struct Scsi_Host *shost = class_to_shost(cdev); 2925 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2926 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; 2927 Mpi2ConfigReply_t mpi_reply; 2928 u16 backup_rail_monitor_status = 0; 2929 u16 ioc_status; 2930 int sz; 2931 ssize_t rc = 0; 2932 2933 if (!ioc->is_warpdrive) { 2934 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2935 " warpdrive\n", ioc->name, __func__); 2936 goto out; 2937 } 2938 /* pci_access_mutex lock acquired by sysfs show path */ 2939 mutex_lock(&ioc->pci_access_mutex); 2940 if (ioc->pci_error_recovery || ioc->remove_host) { 2941 mutex_unlock(&ioc->pci_access_mutex); 2942 return 0; 2943 } 2944 2945 /* allocate upto GPIOVal 36 entries */ 2946 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2947 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2948 if (!io_unit_pg3) { 2949 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2950 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2951 goto out; 2952 } 2953 2954 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2955 0) { 2956 pr_err(MPT3SAS_FMT 2957 "%s: failed reading iounit_pg3\n", ioc->name, 2958 __func__); 2959 goto out; 2960 } 2961 2962 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2963 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2964 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2965 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2966 goto out; 2967 } 2968 2969 if (io_unit_pg3->GPIOCount < 25) { 2970 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2971 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2972 io_unit_pg3->GPIOCount); 2973 goto out; 2974 } 2975 2976 /* BRM status is in bit zero of GPIOVal[24] */ 2977 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); 2978 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 2979 2980 out: 2981 kfree(io_unit_pg3); 2982 mutex_unlock(&ioc->pci_access_mutex); 2983 return rc; 2984 } 2985 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2986 2987 struct DIAG_BUFFER_START { 2988 __le32 Size; 2989 __le32 DiagVersion; 2990 u8 BufferType; 2991 u8 Reserved[3]; 2992 __le32 Reserved1; 2993 __le32 Reserved2; 2994 __le32 Reserved3; 2995 }; 2996 2997 /** 2998 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 2999 * @cdev - pointer to embedded class device 3000 * @buf - the buffer returned 3001 * 3002 * A sysfs 'read-only' shost attribute. 3003 */ 3004 static ssize_t 3005 _ctl_host_trace_buffer_size_show(struct device *cdev, 3006 struct device_attribute *attr, char *buf) 3007 { 3008 struct Scsi_Host *shost = class_to_shost(cdev); 3009 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3010 u32 size = 0; 3011 struct DIAG_BUFFER_START *request_data; 3012 3013 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3014 pr_err(MPT3SAS_FMT 3015 "%s: host_trace_buffer is not registered\n", 3016 ioc->name, __func__); 3017 return 0; 3018 } 3019 3020 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3021 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3022 pr_err(MPT3SAS_FMT 3023 "%s: host_trace_buffer is not registered\n", 3024 ioc->name, __func__); 3025 return 0; 3026 } 3027 3028 request_data = (struct DIAG_BUFFER_START *) 3029 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 3030 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 3031 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 3032 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 3033 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 3034 size = le32_to_cpu(request_data->Size); 3035 3036 ioc->ring_buffer_sz = size; 3037 return snprintf(buf, PAGE_SIZE, "%d\n", size); 3038 } 3039 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, 3040 _ctl_host_trace_buffer_size_show, NULL); 3041 3042 /** 3043 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) 3044 * @cdev - pointer to embedded class device 3045 * @buf - the buffer returned 3046 * 3047 * A sysfs 'read/write' shost attribute. 3048 * 3049 * You will only be able to read 4k bytes of ring buffer at a time. 3050 * In order to read beyond 4k bytes, you will have to write out the 3051 * offset to the same attribute, it will move the pointer. 3052 */ 3053 static ssize_t 3054 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 3055 char *buf) 3056 { 3057 struct Scsi_Host *shost = class_to_shost(cdev); 3058 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3059 void *request_data; 3060 u32 size; 3061 3062 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3063 pr_err(MPT3SAS_FMT 3064 "%s: host_trace_buffer is not registered\n", 3065 ioc->name, __func__); 3066 return 0; 3067 } 3068 3069 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3070 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3071 pr_err(MPT3SAS_FMT 3072 "%s: host_trace_buffer is not registered\n", 3073 ioc->name, __func__); 3074 return 0; 3075 } 3076 3077 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 3078 return 0; 3079 3080 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 3081 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3082 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 3083 memcpy(buf, request_data, size); 3084 return size; 3085 } 3086 3087 static ssize_t 3088 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 3089 const char *buf, size_t count) 3090 { 3091 struct Scsi_Host *shost = class_to_shost(cdev); 3092 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3093 int val = 0; 3094 3095 if (sscanf(buf, "%d", &val) != 1) 3096 return -EINVAL; 3097 3098 ioc->ring_buffer_offset = val; 3099 return strlen(buf); 3100 } 3101 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, 3102 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); 3103 3104 3105 /*****************************************/ 3106 3107 /** 3108 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) 3109 * @cdev - pointer to embedded class device 3110 * @buf - the buffer returned 3111 * 3112 * A sysfs 'read/write' shost attribute. 3113 * 3114 * This is a mechnism to post/release host_trace_buffers 3115 */ 3116 static ssize_t 3117 _ctl_host_trace_buffer_enable_show(struct device *cdev, 3118 struct device_attribute *attr, char *buf) 3119 { 3120 struct Scsi_Host *shost = class_to_shost(cdev); 3121 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3122 3123 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 3124 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3125 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3126 return snprintf(buf, PAGE_SIZE, "off\n"); 3127 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3128 MPT3_DIAG_BUFFER_IS_RELEASED)) 3129 return snprintf(buf, PAGE_SIZE, "release\n"); 3130 else 3131 return snprintf(buf, PAGE_SIZE, "post\n"); 3132 } 3133 3134 static ssize_t 3135 _ctl_host_trace_buffer_enable_store(struct device *cdev, 3136 struct device_attribute *attr, const char *buf, size_t count) 3137 { 3138 struct Scsi_Host *shost = class_to_shost(cdev); 3139 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3140 char str[10] = ""; 3141 struct mpt3_diag_register diag_register; 3142 u8 issue_reset = 0; 3143 3144 /* don't allow post/release occurr while recovery is active */ 3145 if (ioc->shost_recovery || ioc->remove_host || 3146 ioc->pci_error_recovery || ioc->is_driver_loading) 3147 return -EBUSY; 3148 3149 if (sscanf(buf, "%9s", str) != 1) 3150 return -EINVAL; 3151 3152 if (!strcmp(str, "post")) { 3153 /* exit out if host buffers are already posted */ 3154 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3155 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3156 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3157 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3158 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3159 goto out; 3160 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3161 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3162 ioc->name); 3163 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3164 diag_register.requested_buffer_size = (1024 * 1024); 3165 diag_register.unique_id = 0x7075900; 3166 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3167 _ctl_diag_register_2(ioc, &diag_register); 3168 } else if (!strcmp(str, "release")) { 3169 /* exit out if host buffers are already released */ 3170 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3171 goto out; 3172 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3173 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3174 goto out; 3175 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3176 MPT3_DIAG_BUFFER_IS_RELEASED)) 3177 goto out; 3178 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3179 ioc->name); 3180 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3181 &issue_reset); 3182 } 3183 3184 out: 3185 return strlen(buf); 3186 } 3187 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, 3188 _ctl_host_trace_buffer_enable_show, 3189 _ctl_host_trace_buffer_enable_store); 3190 3191 /*********** diagnostic trigger suppport *********************************/ 3192 3193 /** 3194 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute 3195 * @cdev - pointer to embedded class device 3196 * @buf - the buffer returned 3197 * 3198 * A sysfs 'read/write' shost attribute. 3199 */ 3200 static ssize_t 3201 _ctl_diag_trigger_master_show(struct device *cdev, 3202 struct device_attribute *attr, char *buf) 3203 3204 { 3205 struct Scsi_Host *shost = class_to_shost(cdev); 3206 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3207 unsigned long flags; 3208 ssize_t rc; 3209 3210 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3211 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3212 memcpy(buf, &ioc->diag_trigger_master, rc); 3213 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3214 return rc; 3215 } 3216 3217 /** 3218 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute 3219 * @cdev - pointer to embedded class device 3220 * @buf - the buffer returned 3221 * 3222 * A sysfs 'read/write' shost attribute. 3223 */ 3224 static ssize_t 3225 _ctl_diag_trigger_master_store(struct device *cdev, 3226 struct device_attribute *attr, const char *buf, size_t count) 3227 3228 { 3229 struct Scsi_Host *shost = class_to_shost(cdev); 3230 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3231 unsigned long flags; 3232 ssize_t rc; 3233 3234 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3235 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3236 memset(&ioc->diag_trigger_master, 0, 3237 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3238 memcpy(&ioc->diag_trigger_master, buf, rc); 3239 ioc->diag_trigger_master.MasterData |= 3240 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3241 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3242 return rc; 3243 } 3244 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, 3245 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); 3246 3247 3248 /** 3249 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute 3250 * @cdev - pointer to embedded class device 3251 * @buf - the buffer returned 3252 * 3253 * A sysfs 'read/write' shost attribute. 3254 */ 3255 static ssize_t 3256 _ctl_diag_trigger_event_show(struct device *cdev, 3257 struct device_attribute *attr, char *buf) 3258 { 3259 struct Scsi_Host *shost = class_to_shost(cdev); 3260 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3261 unsigned long flags; 3262 ssize_t rc; 3263 3264 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3265 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3266 memcpy(buf, &ioc->diag_trigger_event, rc); 3267 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3268 return rc; 3269 } 3270 3271 /** 3272 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute 3273 * @cdev - pointer to embedded class device 3274 * @buf - the buffer returned 3275 * 3276 * A sysfs 'read/write' shost attribute. 3277 */ 3278 static ssize_t 3279 _ctl_diag_trigger_event_store(struct device *cdev, 3280 struct device_attribute *attr, const char *buf, size_t count) 3281 3282 { 3283 struct Scsi_Host *shost = class_to_shost(cdev); 3284 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3285 unsigned long flags; 3286 ssize_t sz; 3287 3288 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3289 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3290 memset(&ioc->diag_trigger_event, 0, 3291 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3292 memcpy(&ioc->diag_trigger_event, buf, sz); 3293 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3294 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3295 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3296 return sz; 3297 } 3298 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, 3299 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); 3300 3301 3302 /** 3303 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3304 * @cdev - pointer to embedded class device 3305 * @buf - the buffer returned 3306 * 3307 * A sysfs 'read/write' shost attribute. 3308 */ 3309 static ssize_t 3310 _ctl_diag_trigger_scsi_show(struct device *cdev, 3311 struct device_attribute *attr, char *buf) 3312 { 3313 struct Scsi_Host *shost = class_to_shost(cdev); 3314 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3315 unsigned long flags; 3316 ssize_t rc; 3317 3318 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3319 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 3320 memcpy(buf, &ioc->diag_trigger_scsi, rc); 3321 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3322 return rc; 3323 } 3324 3325 /** 3326 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute 3327 * @cdev - pointer to embedded class device 3328 * @buf - the buffer returned 3329 * 3330 * A sysfs 'read/write' shost attribute. 3331 */ 3332 static ssize_t 3333 _ctl_diag_trigger_scsi_store(struct device *cdev, 3334 struct device_attribute *attr, const char *buf, size_t count) 3335 { 3336 struct Scsi_Host *shost = class_to_shost(cdev); 3337 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3338 unsigned long flags; 3339 ssize_t sz; 3340 3341 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3342 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3343 memset(&ioc->diag_trigger_scsi, 0, 3344 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3345 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3346 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3347 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3348 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3349 return sz; 3350 } 3351 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, 3352 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); 3353 3354 3355 /** 3356 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute 3357 * @cdev - pointer to embedded class device 3358 * @buf - the buffer returned 3359 * 3360 * A sysfs 'read/write' shost attribute. 3361 */ 3362 static ssize_t 3363 _ctl_diag_trigger_mpi_show(struct device *cdev, 3364 struct device_attribute *attr, char *buf) 3365 { 3366 struct Scsi_Host *shost = class_to_shost(cdev); 3367 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3368 unsigned long flags; 3369 ssize_t rc; 3370 3371 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3372 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 3373 memcpy(buf, &ioc->diag_trigger_mpi, rc); 3374 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3375 return rc; 3376 } 3377 3378 /** 3379 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute 3380 * @cdev - pointer to embedded class device 3381 * @buf - the buffer returned 3382 * 3383 * A sysfs 'read/write' shost attribute. 3384 */ 3385 static ssize_t 3386 _ctl_diag_trigger_mpi_store(struct device *cdev, 3387 struct device_attribute *attr, const char *buf, size_t count) 3388 { 3389 struct Scsi_Host *shost = class_to_shost(cdev); 3390 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3391 unsigned long flags; 3392 ssize_t sz; 3393 3394 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3395 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3396 memset(&ioc->diag_trigger_mpi, 0, 3397 sizeof(ioc->diag_trigger_mpi)); 3398 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3399 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3400 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3401 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3402 return sz; 3403 } 3404 3405 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, 3406 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); 3407 3408 /*********** diagnostic trigger suppport *** END ****************************/ 3409 3410 /*****************************************/ 3411 3412 struct device_attribute *mpt3sas_host_attrs[] = { 3413 &dev_attr_version_fw, 3414 &dev_attr_version_bios, 3415 &dev_attr_version_mpi, 3416 &dev_attr_version_product, 3417 &dev_attr_version_nvdata_persistent, 3418 &dev_attr_version_nvdata_default, 3419 &dev_attr_board_name, 3420 &dev_attr_board_assembly, 3421 &dev_attr_board_tracer, 3422 &dev_attr_io_delay, 3423 &dev_attr_device_delay, 3424 &dev_attr_logging_level, 3425 &dev_attr_fwfault_debug, 3426 &dev_attr_fw_queue_depth, 3427 &dev_attr_host_sas_address, 3428 &dev_attr_ioc_reset_count, 3429 &dev_attr_host_trace_buffer_size, 3430 &dev_attr_host_trace_buffer, 3431 &dev_attr_host_trace_buffer_enable, 3432 &dev_attr_reply_queue_count, 3433 &dev_attr_diag_trigger_master, 3434 &dev_attr_diag_trigger_event, 3435 &dev_attr_diag_trigger_scsi, 3436 &dev_attr_diag_trigger_mpi, 3437 &dev_attr_BRM_status, 3438 NULL, 3439 }; 3440 3441 /* device attributes */ 3442 3443 /** 3444 * _ctl_device_sas_address_show - sas address 3445 * @cdev - pointer to embedded class device 3446 * @buf - the buffer returned 3447 * 3448 * This is the sas address for the target 3449 * 3450 * A sysfs 'read-only' shost attribute. 3451 */ 3452 static ssize_t 3453 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, 3454 char *buf) 3455 { 3456 struct scsi_device *sdev = to_scsi_device(dev); 3457 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3458 3459 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3460 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 3461 } 3462 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); 3463 3464 /** 3465 * _ctl_device_handle_show - device handle 3466 * @cdev - pointer to embedded class device 3467 * @buf - the buffer returned 3468 * 3469 * This is the firmware assigned device handle 3470 * 3471 * A sysfs 'read-only' shost attribute. 3472 */ 3473 static ssize_t 3474 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, 3475 char *buf) 3476 { 3477 struct scsi_device *sdev = to_scsi_device(dev); 3478 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3479 3480 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 3481 sas_device_priv_data->sas_target->handle); 3482 } 3483 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3484 3485 /** 3486 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device 3487 * @dev - pointer to embedded device 3488 * @buf - the buffer returned 3489 * 3490 * A sysfs 'read/write' sdev attribute, only works with SATA 3491 */ 3492 static ssize_t 3493 _ctl_device_ncq_prio_enable_show(struct device *dev, 3494 struct device_attribute *attr, char *buf) 3495 { 3496 struct scsi_device *sdev = to_scsi_device(dev); 3497 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3498 3499 return snprintf(buf, PAGE_SIZE, "%d\n", 3500 sas_device_priv_data->ncq_prio_enable); 3501 } 3502 3503 static ssize_t 3504 _ctl_device_ncq_prio_enable_store(struct device *dev, 3505 struct device_attribute *attr, 3506 const char *buf, size_t count) 3507 { 3508 struct scsi_device *sdev = to_scsi_device(dev); 3509 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3510 bool ncq_prio_enable = 0; 3511 3512 if (kstrtobool(buf, &ncq_prio_enable)) 3513 return -EINVAL; 3514 3515 if (!scsih_ncq_prio_supp(sdev)) 3516 return -EINVAL; 3517 3518 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; 3519 return strlen(buf); 3520 } 3521 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR, 3522 _ctl_device_ncq_prio_enable_show, 3523 _ctl_device_ncq_prio_enable_store); 3524 3525 struct device_attribute *mpt3sas_dev_attrs[] = { 3526 &dev_attr_sas_address, 3527 &dev_attr_sas_device_handle, 3528 &dev_attr_sas_ncq_prio_enable, 3529 NULL, 3530 }; 3531 3532 /* file operations table for mpt3ctl device */ 3533 static const struct file_operations ctl_fops = { 3534 .owner = THIS_MODULE, 3535 .unlocked_ioctl = _ctl_ioctl, 3536 .poll = _ctl_poll, 3537 .fasync = _ctl_fasync, 3538 #ifdef CONFIG_COMPAT 3539 .compat_ioctl = _ctl_ioctl_compat, 3540 #endif 3541 }; 3542 3543 /* file operations table for mpt2ctl device */ 3544 static const struct file_operations ctl_gen2_fops = { 3545 .owner = THIS_MODULE, 3546 .unlocked_ioctl = _ctl_mpt2_ioctl, 3547 .poll = _ctl_poll, 3548 .fasync = _ctl_fasync, 3549 #ifdef CONFIG_COMPAT 3550 .compat_ioctl = _ctl_mpt2_ioctl_compat, 3551 #endif 3552 }; 3553 3554 static struct miscdevice ctl_dev = { 3555 .minor = MPT3SAS_MINOR, 3556 .name = MPT3SAS_DEV_NAME, 3557 .fops = &ctl_fops, 3558 }; 3559 3560 static struct miscdevice gen2_ctl_dev = { 3561 .minor = MPT2SAS_MINOR, 3562 .name = MPT2SAS_DEV_NAME, 3563 .fops = &ctl_gen2_fops, 3564 }; 3565 3566 /** 3567 * mpt3sas_ctl_init - main entry point for ctl. 3568 * 3569 */ 3570 void 3571 mpt3sas_ctl_init(ushort hbas_to_enumerate) 3572 { 3573 async_queue = NULL; 3574 3575 /* Don't register mpt3ctl ioctl device if 3576 * hbas_to_enumarate is one. 3577 */ 3578 if (hbas_to_enumerate != 1) 3579 if (misc_register(&ctl_dev) < 0) 3580 pr_err("%s can't register misc device [minor=%d]\n", 3581 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 3582 3583 /* Don't register mpt3ctl ioctl device if 3584 * hbas_to_enumarate is two. 3585 */ 3586 if (hbas_to_enumerate != 2) 3587 if (misc_register(&gen2_ctl_dev) < 0) 3588 pr_err("%s can't register misc device [minor=%d]\n", 3589 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 3590 3591 init_waitqueue_head(&ctl_poll_wait); 3592 } 3593 3594 /** 3595 * mpt3sas_ctl_exit - exit point for ctl 3596 * 3597 */ 3598 void 3599 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 3600 { 3601 struct MPT3SAS_ADAPTER *ioc; 3602 int i; 3603 3604 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 3605 3606 /* free memory associated to diag buffers */ 3607 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 3608 if (!ioc->diag_buffer[i]) 3609 continue; 3610 if (!(ioc->diag_buffer_status[i] & 3611 MPT3_DIAG_BUFFER_IS_REGISTERED)) 3612 continue; 3613 if ((ioc->diag_buffer_status[i] & 3614 MPT3_DIAG_BUFFER_IS_RELEASED)) 3615 continue; 3616 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3617 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3618 ioc->diag_buffer[i] = NULL; 3619 ioc->diag_buffer_status[i] = 0; 3620 } 3621 3622 kfree(ioc->event_log); 3623 } 3624 if (hbas_to_enumerate != 1) 3625 misc_deregister(&ctl_dev); 3626 if (hbas_to_enumerate != 2) 3627 misc_deregister(&gen2_ctl_dev); 3628 } 3629