1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_display_some_debug - debug routine 83 * @ioc: per adapter object 84 * @smid: system request message index 85 * @calling_function_name: string pass from calling function 86 * @mpi_reply: reply message frame 87 * Context: none. 88 * 89 * Function for displaying debug info helpful when debugging issues 90 * in this module. 91 */ 92 static void 93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 95 { 96 Mpi2ConfigRequest_t *mpi_request; 97 char *desc = NULL; 98 99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 100 return; 101 102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 103 switch (mpi_request->Function) { 104 case MPI2_FUNCTION_SCSI_IO_REQUEST: 105 { 106 Mpi2SCSIIORequest_t *scsi_request = 107 (Mpi2SCSIIORequest_t *)mpi_request; 108 109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 110 "scsi_io, cmd(0x%02x), cdb_len(%d)", 111 scsi_request->CDB.CDB32[0], 112 le16_to_cpu(scsi_request->IoFlags) & 0xF); 113 desc = ioc->tmp_string; 114 break; 115 } 116 case MPI2_FUNCTION_SCSI_TASK_MGMT: 117 desc = "task_mgmt"; 118 break; 119 case MPI2_FUNCTION_IOC_INIT: 120 desc = "ioc_init"; 121 break; 122 case MPI2_FUNCTION_IOC_FACTS: 123 desc = "ioc_facts"; 124 break; 125 case MPI2_FUNCTION_CONFIG: 126 { 127 Mpi2ConfigRequest_t *config_request = 128 (Mpi2ConfigRequest_t *)mpi_request; 129 130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 131 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 132 (config_request->Header.PageType & 133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 134 config_request->Header.PageNumber); 135 desc = ioc->tmp_string; 136 break; 137 } 138 case MPI2_FUNCTION_PORT_FACTS: 139 desc = "port_facts"; 140 break; 141 case MPI2_FUNCTION_PORT_ENABLE: 142 desc = "port_enable"; 143 break; 144 case MPI2_FUNCTION_EVENT_NOTIFICATION: 145 desc = "event_notification"; 146 break; 147 case MPI2_FUNCTION_FW_DOWNLOAD: 148 desc = "fw_download"; 149 break; 150 case MPI2_FUNCTION_FW_UPLOAD: 151 desc = "fw_upload"; 152 break; 153 case MPI2_FUNCTION_RAID_ACTION: 154 desc = "raid_action"; 155 break; 156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 157 { 158 Mpi2SCSIIORequest_t *scsi_request = 159 (Mpi2SCSIIORequest_t *)mpi_request; 160 161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 162 "raid_pass, cmd(0x%02x), cdb_len(%d)", 163 scsi_request->CDB.CDB32[0], 164 le16_to_cpu(scsi_request->IoFlags) & 0xF); 165 desc = ioc->tmp_string; 166 break; 167 } 168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 169 desc = "sas_iounit_cntl"; 170 break; 171 case MPI2_FUNCTION_SATA_PASSTHROUGH: 172 desc = "sata_pass"; 173 break; 174 case MPI2_FUNCTION_DIAG_BUFFER_POST: 175 desc = "diag_buffer_post"; 176 break; 177 case MPI2_FUNCTION_DIAG_RELEASE: 178 desc = "diag_release"; 179 break; 180 case MPI2_FUNCTION_SMP_PASSTHROUGH: 181 desc = "smp_passthrough"; 182 break; 183 } 184 185 if (!desc) 186 return; 187 188 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 189 ioc->name, calling_function_name, desc, smid); 190 191 if (!mpi_reply) 192 return; 193 194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 195 pr_info(MPT3SAS_FMT 196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 198 le32_to_cpu(mpi_reply->IOCLogInfo)); 199 200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 201 mpi_request->Function == 202 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 203 Mpi2SCSIIOReply_t *scsi_reply = 204 (Mpi2SCSIIOReply_t *)mpi_reply; 205 struct _sas_device *sas_device = NULL; 206 struct _pcie_device *pcie_device = NULL; 207 208 sas_device = mpt3sas_get_sdev_by_handle(ioc, 209 le16_to_cpu(scsi_reply->DevHandle)); 210 if (sas_device) { 211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 212 ioc->name, (unsigned long long) 213 sas_device->sas_address, sas_device->phy); 214 pr_warn(MPT3SAS_FMT 215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 216 ioc->name, (unsigned long long) 217 sas_device->enclosure_logical_id, sas_device->slot); 218 sas_device_put(sas_device); 219 } 220 if (!sas_device) { 221 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 222 le16_to_cpu(scsi_reply->DevHandle)); 223 if (pcie_device) { 224 pr_warn(MPT3SAS_FMT 225 "\tWWID(0x%016llx), port(%d)\n", ioc->name, 226 (unsigned long long)pcie_device->wwid, 227 pcie_device->port_num); 228 if (pcie_device->enclosure_handle != 0) 229 pr_warn(MPT3SAS_FMT 230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 231 ioc->name, (unsigned long long) 232 pcie_device->enclosure_logical_id, 233 pcie_device->slot); 234 pcie_device_put(pcie_device); 235 } 236 } 237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 238 pr_info(MPT3SAS_FMT 239 "\tscsi_state(0x%02x), scsi_status" 240 "(0x%02x)\n", ioc->name, 241 scsi_reply->SCSIState, 242 scsi_reply->SCSIStatus); 243 } 244 } 245 246 /** 247 * mpt3sas_ctl_done - ctl module completion routine 248 * @ioc: per adapter object 249 * @smid: system request message index 250 * @msix_index: MSIX table index supplied by the OS 251 * @reply: reply message frame(lower 32bit addr) 252 * Context: none. 253 * 254 * The callback handler when using ioc->ctl_cb_idx. 255 * 256 * Return 1 meaning mf should be freed from _base_interrupt 257 * 0 means the mf is freed from this function. 258 */ 259 u8 260 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 261 u32 reply) 262 { 263 MPI2DefaultReply_t *mpi_reply; 264 Mpi2SCSIIOReply_t *scsiio_reply; 265 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply; 266 const void *sense_data; 267 u32 sz; 268 269 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 270 return 1; 271 if (ioc->ctl_cmds.smid != smid) 272 return 1; 273 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 274 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 275 if (mpi_reply) { 276 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 277 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 278 /* get sense data */ 279 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 280 mpi_reply->Function == 281 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 282 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 283 if (scsiio_reply->SCSIState & 284 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 285 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 286 le32_to_cpu(scsiio_reply->SenseCount)); 287 sense_data = mpt3sas_base_get_sense_buffer(ioc, 288 smid); 289 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 290 } 291 } 292 /* 293 * Get Error Response data for NVMe device. The ctl_cmds.sense 294 * buffer is used to store the Error Response data. 295 */ 296 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 297 nvme_error_reply = 298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; 299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, 300 le16_to_cpu(nvme_error_reply->ErrorResponseCount)); 301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); 302 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 303 } 304 } 305 306 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 307 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 308 complete(&ioc->ctl_cmds.done); 309 return 1; 310 } 311 312 /** 313 * _ctl_check_event_type - determines when an event needs logging 314 * @ioc: per adapter object 315 * @event: firmware event 316 * 317 * The bitmask in ioc->event_type[] indicates which events should be 318 * be saved in the driver event_log. This bitmask is set by application. 319 * 320 * Returns 1 when event should be captured, or zero means no match. 321 */ 322 static int 323 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 324 { 325 u16 i; 326 u32 desired_event; 327 328 if (event >= 128 || !event || !ioc->event_log) 329 return 0; 330 331 desired_event = (1 << (event % 32)); 332 if (!desired_event) 333 desired_event = 1; 334 i = event / 32; 335 return desired_event & ioc->event_type[i]; 336 } 337 338 /** 339 * mpt3sas_ctl_add_to_event_log - add event 340 * @ioc: per adapter object 341 * @mpi_reply: reply message frame 342 * 343 * Return nothing. 344 */ 345 void 346 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 347 Mpi2EventNotificationReply_t *mpi_reply) 348 { 349 struct MPT3_IOCTL_EVENTS *event_log; 350 u16 event; 351 int i; 352 u32 sz, event_data_sz; 353 u8 send_aen = 0; 354 355 if (!ioc->event_log) 356 return; 357 358 event = le16_to_cpu(mpi_reply->Event); 359 360 if (_ctl_check_event_type(ioc, event)) { 361 362 /* insert entry into circular event_log */ 363 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 364 event_log = ioc->event_log; 365 event_log[i].event = event; 366 event_log[i].context = ioc->event_context++; 367 368 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 369 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 370 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 371 memcpy(event_log[i].data, mpi_reply->EventData, sz); 372 send_aen = 1; 373 } 374 375 /* This aen_event_read_flag flag is set until the 376 * application has read the event log. 377 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 378 */ 379 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 380 (send_aen && !ioc->aen_event_read_flag)) { 381 ioc->aen_event_read_flag = 1; 382 wake_up_interruptible(&ctl_poll_wait); 383 if (async_queue) 384 kill_fasync(&async_queue, SIGIO, POLL_IN); 385 } 386 } 387 388 /** 389 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 390 * @ioc: per adapter object 391 * @msix_index: MSIX table index supplied by the OS 392 * @reply: reply message frame(lower 32bit addr) 393 * Context: interrupt. 394 * 395 * This function merely adds a new work task into ioc->firmware_event_thread. 396 * The tasks are worked from _firmware_event_work in user context. 397 * 398 * Return 1 meaning mf should be freed from _base_interrupt 399 * 0 means the mf is freed from this function. 400 */ 401 u8 402 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 403 u32 reply) 404 { 405 Mpi2EventNotificationReply_t *mpi_reply; 406 407 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 408 if (mpi_reply) 409 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 410 return 1; 411 } 412 413 /** 414 * _ctl_verify_adapter - validates ioc_number passed from application 415 * @ioc: per adapter object 416 * @iocpp: The ioc pointer is returned in this. 417 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 418 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 419 * 420 * Return (-1) means error, else ioc_number. 421 */ 422 static int 423 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 424 int mpi_version) 425 { 426 struct MPT3SAS_ADAPTER *ioc; 427 int version = 0; 428 /* global ioc lock to protect controller on list operations */ 429 spin_lock(&gioc_lock); 430 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 431 if (ioc->id != ioc_number) 432 continue; 433 /* Check whether this ioctl command is from right 434 * ioctl device or not, if not continue the search. 435 */ 436 version = ioc->hba_mpi_version_belonged; 437 /* MPI25_VERSION and MPI26_VERSION uses same ioctl 438 * device. 439 */ 440 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { 441 if ((version == MPI25_VERSION) || 442 (version == MPI26_VERSION)) 443 goto out; 444 else 445 continue; 446 } else { 447 if (version != mpi_version) 448 continue; 449 } 450 out: 451 spin_unlock(&gioc_lock); 452 *iocpp = ioc; 453 return ioc_number; 454 } 455 spin_unlock(&gioc_lock); 456 *iocpp = NULL; 457 return -1; 458 } 459 460 /** 461 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) 462 * @ioc: per adapter object 463 * @reset_phase: phase 464 * 465 * The handler for doing any required cleanup or initialization. 466 * 467 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 468 * MPT3_IOC_DONE_RESET 469 */ 470 void 471 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 472 { 473 int i; 474 u8 issue_reset; 475 476 switch (reset_phase) { 477 case MPT3_IOC_PRE_RESET: 478 dtmprintk(ioc, pr_info(MPT3SAS_FMT 479 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 480 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 481 if (!(ioc->diag_buffer_status[i] & 482 MPT3_DIAG_BUFFER_IS_REGISTERED)) 483 continue; 484 if ((ioc->diag_buffer_status[i] & 485 MPT3_DIAG_BUFFER_IS_RELEASED)) 486 continue; 487 mpt3sas_send_diag_release(ioc, i, &issue_reset); 488 } 489 break; 490 case MPT3_IOC_AFTER_RESET: 491 dtmprintk(ioc, pr_info(MPT3SAS_FMT 492 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 493 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 494 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 495 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 496 complete(&ioc->ctl_cmds.done); 497 } 498 break; 499 case MPT3_IOC_DONE_RESET: 500 dtmprintk(ioc, pr_info(MPT3SAS_FMT 501 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 502 503 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 504 if (!(ioc->diag_buffer_status[i] & 505 MPT3_DIAG_BUFFER_IS_REGISTERED)) 506 continue; 507 if ((ioc->diag_buffer_status[i] & 508 MPT3_DIAG_BUFFER_IS_RELEASED)) 509 continue; 510 ioc->diag_buffer_status[i] |= 511 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 512 } 513 break; 514 } 515 } 516 517 /** 518 * _ctl_fasync - 519 * @fd - 520 * @filep - 521 * @mode - 522 * 523 * Called when application request fasyn callback handler. 524 */ 525 static int 526 _ctl_fasync(int fd, struct file *filep, int mode) 527 { 528 return fasync_helper(fd, filep, mode, &async_queue); 529 } 530 531 /** 532 * _ctl_poll - 533 * @file - 534 * @wait - 535 * 536 */ 537 static __poll_t 538 _ctl_poll(struct file *filep, poll_table *wait) 539 { 540 struct MPT3SAS_ADAPTER *ioc; 541 542 poll_wait(filep, &ctl_poll_wait, wait); 543 544 /* global ioc lock to protect controller on list operations */ 545 spin_lock(&gioc_lock); 546 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 547 if (ioc->aen_event_read_flag) { 548 spin_unlock(&gioc_lock); 549 return EPOLLIN | EPOLLRDNORM; 550 } 551 } 552 spin_unlock(&gioc_lock); 553 return 0; 554 } 555 556 /** 557 * _ctl_set_task_mid - assign an active smid to tm request 558 * @ioc: per adapter object 559 * @karg - (struct mpt3_ioctl_command) 560 * @tm_request - pointer to mf from user space 561 * 562 * Returns 0 when an smid if found, else fail. 563 * during failure, the reply frame is filled. 564 */ 565 static int 566 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 567 Mpi2SCSITaskManagementRequest_t *tm_request) 568 { 569 u8 found = 0; 570 u16 smid; 571 u16 handle; 572 struct scsi_cmnd *scmd; 573 struct MPT3SAS_DEVICE *priv_data; 574 Mpi2SCSITaskManagementReply_t *tm_reply; 575 u32 sz; 576 u32 lun; 577 char *desc = NULL; 578 579 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 580 desc = "abort_task"; 581 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 582 desc = "query_task"; 583 else 584 return 0; 585 586 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 587 588 handle = le16_to_cpu(tm_request->DevHandle); 589 for (smid = ioc->scsiio_depth; smid && !found; smid--) { 590 struct scsiio_tracker *st; 591 592 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 593 if (!scmd) 594 continue; 595 if (lun != scmd->device->lun) 596 continue; 597 priv_data = scmd->device->hostdata; 598 if (priv_data->sas_target == NULL) 599 continue; 600 if (priv_data->sas_target->handle != handle) 601 continue; 602 st = scsi_cmd_priv(scmd); 603 tm_request->TaskMID = cpu_to_le16(st->smid); 604 found = 1; 605 } 606 607 if (!found) { 608 dctlprintk(ioc, pr_info(MPT3SAS_FMT 609 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 610 ioc->name, 611 desc, le16_to_cpu(tm_request->DevHandle), lun)); 612 tm_reply = ioc->ctl_cmds.reply; 613 tm_reply->DevHandle = tm_request->DevHandle; 614 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 615 tm_reply->TaskType = tm_request->TaskType; 616 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 617 tm_reply->VP_ID = tm_request->VP_ID; 618 tm_reply->VF_ID = tm_request->VF_ID; 619 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 620 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 621 sz)) 622 pr_err("failure at %s:%d/%s()!\n", __FILE__, 623 __LINE__, __func__); 624 return 1; 625 } 626 627 dctlprintk(ioc, pr_info(MPT3SAS_FMT 628 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 629 desc, le16_to_cpu(tm_request->DevHandle), lun, 630 le16_to_cpu(tm_request->TaskMID))); 631 return 0; 632 } 633 634 /** 635 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 636 * @ioc: per adapter object 637 * @karg - (struct mpt3_ioctl_command) 638 * @mf - pointer to mf in user space 639 */ 640 static long 641 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 642 void __user *mf) 643 { 644 MPI2RequestHeader_t *mpi_request = NULL, *request; 645 MPI2DefaultReply_t *mpi_reply; 646 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 647 struct _pcie_device *pcie_device = NULL; 648 u32 ioc_state; 649 u16 smid; 650 u8 timeout; 651 u8 issue_reset; 652 u32 sz, sz_arg; 653 void *psge; 654 void *data_out = NULL; 655 dma_addr_t data_out_dma = 0; 656 size_t data_out_sz = 0; 657 void *data_in = NULL; 658 dma_addr_t data_in_dma = 0; 659 size_t data_in_sz = 0; 660 long ret; 661 u16 wait_state_count; 662 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 663 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; 664 665 issue_reset = 0; 666 667 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 668 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 669 ioc->name, __func__); 670 ret = -EAGAIN; 671 goto out; 672 } 673 674 wait_state_count = 0; 675 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 676 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 677 if (wait_state_count++ == 10) { 678 pr_err(MPT3SAS_FMT 679 "%s: failed due to ioc not operational\n", 680 ioc->name, __func__); 681 ret = -EFAULT; 682 goto out; 683 } 684 ssleep(1); 685 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 686 pr_info(MPT3SAS_FMT 687 "%s: waiting for operational state(count=%d)\n", 688 ioc->name, 689 __func__, wait_state_count); 690 } 691 if (wait_state_count) 692 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 693 ioc->name, __func__); 694 695 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 696 if (!mpi_request) { 697 pr_err(MPT3SAS_FMT 698 "%s: failed obtaining a memory for mpi_request\n", 699 ioc->name, __func__); 700 ret = -ENOMEM; 701 goto out; 702 } 703 704 /* Check for overflow and wraparound */ 705 if (karg.data_sge_offset * 4 > ioc->request_sz || 706 karg.data_sge_offset > (UINT_MAX / 4)) { 707 ret = -EINVAL; 708 goto out; 709 } 710 711 /* copy in request message frame from user */ 712 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 713 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 714 __func__); 715 ret = -EFAULT; 716 goto out; 717 } 718 719 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 720 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 721 if (!smid) { 722 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 723 ioc->name, __func__); 724 ret = -EAGAIN; 725 goto out; 726 } 727 } else { 728 /* Use first reserved smid for passthrough ioctls */ 729 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 730 } 731 732 ret = 0; 733 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 734 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 735 request = mpt3sas_base_get_msg_frame(ioc, smid); 736 memcpy(request, mpi_request, karg.data_sge_offset*4); 737 ioc->ctl_cmds.smid = smid; 738 data_out_sz = karg.data_out_size; 739 data_in_sz = karg.data_in_size; 740 741 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 742 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 743 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || 744 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || 745 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 746 747 device_handle = le16_to_cpu(mpi_request->FunctionDependent1); 748 if (!device_handle || (device_handle > 749 ioc->facts.MaxDevHandle)) { 750 ret = -EINVAL; 751 mpt3sas_base_free_smid(ioc, smid); 752 goto out; 753 } 754 } 755 756 /* obtain dma-able memory for data transfer */ 757 if (data_out_sz) /* WRITE */ { 758 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 759 &data_out_dma); 760 if (!data_out) { 761 pr_err("failure at %s:%d/%s()!\n", __FILE__, 762 __LINE__, __func__); 763 ret = -ENOMEM; 764 mpt3sas_base_free_smid(ioc, smid); 765 goto out; 766 } 767 if (copy_from_user(data_out, karg.data_out_buf_ptr, 768 data_out_sz)) { 769 pr_err("failure at %s:%d/%s()!\n", __FILE__, 770 __LINE__, __func__); 771 ret = -EFAULT; 772 mpt3sas_base_free_smid(ioc, smid); 773 goto out; 774 } 775 } 776 777 if (data_in_sz) /* READ */ { 778 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 779 &data_in_dma); 780 if (!data_in) { 781 pr_err("failure at %s:%d/%s()!\n", __FILE__, 782 __LINE__, __func__); 783 ret = -ENOMEM; 784 mpt3sas_base_free_smid(ioc, smid); 785 goto out; 786 } 787 } 788 789 psge = (void *)request + (karg.data_sge_offset*4); 790 791 /* send command to firmware */ 792 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 793 794 init_completion(&ioc->ctl_cmds.done); 795 switch (mpi_request->Function) { 796 case MPI2_FUNCTION_NVME_ENCAPSULATED: 797 { 798 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; 799 /* 800 * Get the Physical Address of the sense buffer. 801 * Use Error Response buffer address field to hold the sense 802 * buffer address. 803 * Clear the internal sense buffer, which will potentially hold 804 * the Completion Queue Entry on return, or 0 if no Entry. 805 * Build the PRPs and set direction bits. 806 * Send the request. 807 */ 808 nvme_encap_request->ErrorResponseBaseAddress = 809 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL); 810 nvme_encap_request->ErrorResponseBaseAddress |= 811 cpu_to_le64(le32_to_cpu( 812 mpt3sas_base_get_sense_buffer_dma(ioc, smid))); 813 nvme_encap_request->ErrorResponseAllocationLength = 814 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE); 815 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); 816 ioc->build_nvme_prp(ioc, smid, nvme_encap_request, 817 data_out_dma, data_out_sz, data_in_dma, data_in_sz); 818 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 819 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :" 820 "ioctl failed due to device removal in progress\n", 821 ioc->name, device_handle)); 822 mpt3sas_base_free_smid(ioc, smid); 823 ret = -EINVAL; 824 goto out; 825 } 826 mpt3sas_base_put_smid_nvme_encap(ioc, smid); 827 break; 828 } 829 case MPI2_FUNCTION_SCSI_IO_REQUEST: 830 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 831 { 832 Mpi2SCSIIORequest_t *scsiio_request = 833 (Mpi2SCSIIORequest_t *)request; 834 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 835 scsiio_request->SenseBufferLowAddress = 836 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 837 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 838 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 839 dtmprintk(ioc, pr_info(MPT3SAS_FMT 840 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 841 ioc->name, device_handle)); 842 mpt3sas_base_free_smid(ioc, smid); 843 ret = -EINVAL; 844 goto out; 845 } 846 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 847 data_in_dma, data_in_sz); 848 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 849 ioc->put_smid_scsi_io(ioc, smid, device_handle); 850 else 851 mpt3sas_base_put_smid_default(ioc, smid); 852 break; 853 } 854 case MPI2_FUNCTION_SCSI_TASK_MGMT: 855 { 856 Mpi2SCSITaskManagementRequest_t *tm_request = 857 (Mpi2SCSITaskManagementRequest_t *)request; 858 859 dtmprintk(ioc, pr_info(MPT3SAS_FMT 860 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 861 ioc->name, 862 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 863 ioc->got_task_abort_from_ioctl = 1; 864 if (tm_request->TaskType == 865 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 866 tm_request->TaskType == 867 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 868 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 869 mpt3sas_base_free_smid(ioc, smid); 870 ioc->got_task_abort_from_ioctl = 0; 871 goto out; 872 } 873 } 874 ioc->got_task_abort_from_ioctl = 0; 875 876 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 877 dtmprintk(ioc, pr_info(MPT3SAS_FMT 878 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 879 ioc->name, device_handle)); 880 mpt3sas_base_free_smid(ioc, smid); 881 ret = -EINVAL; 882 goto out; 883 } 884 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 885 tm_request->DevHandle)); 886 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 887 data_in_dma, data_in_sz); 888 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 889 break; 890 } 891 case MPI2_FUNCTION_SMP_PASSTHROUGH: 892 { 893 Mpi2SmpPassthroughRequest_t *smp_request = 894 (Mpi2SmpPassthroughRequest_t *)mpi_request; 895 u8 *data; 896 897 /* ioc determines which port to use */ 898 smp_request->PhysicalPort = 0xFF; 899 if (smp_request->PassthroughFlags & 900 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 901 data = (u8 *)&smp_request->SGL; 902 else { 903 if (unlikely(data_out == NULL)) { 904 pr_err("failure at %s:%d/%s()!\n", 905 __FILE__, __LINE__, __func__); 906 mpt3sas_base_free_smid(ioc, smid); 907 ret = -EINVAL; 908 goto out; 909 } 910 data = data_out; 911 } 912 913 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 914 ioc->ioc_link_reset_in_progress = 1; 915 ioc->ignore_loginfos = 1; 916 } 917 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 918 data_in_sz); 919 mpt3sas_base_put_smid_default(ioc, smid); 920 break; 921 } 922 case MPI2_FUNCTION_SATA_PASSTHROUGH: 923 { 924 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 925 dtmprintk(ioc, pr_info(MPT3SAS_FMT 926 "handle(0x%04x) :ioctl failed due to device removal in progress\n", 927 ioc->name, device_handle)); 928 mpt3sas_base_free_smid(ioc, smid); 929 ret = -EINVAL; 930 goto out; 931 } 932 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 933 data_in_sz); 934 mpt3sas_base_put_smid_default(ioc, smid); 935 break; 936 } 937 case MPI2_FUNCTION_FW_DOWNLOAD: 938 case MPI2_FUNCTION_FW_UPLOAD: 939 { 940 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 941 data_in_sz); 942 mpt3sas_base_put_smid_default(ioc, smid); 943 break; 944 } 945 case MPI2_FUNCTION_TOOLBOX: 946 { 947 Mpi2ToolboxCleanRequest_t *toolbox_request = 948 (Mpi2ToolboxCleanRequest_t *)mpi_request; 949 950 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 951 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 952 data_in_dma, data_in_sz); 953 } else { 954 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 955 data_in_dma, data_in_sz); 956 } 957 mpt3sas_base_put_smid_default(ioc, smid); 958 break; 959 } 960 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 961 { 962 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 963 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 964 965 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 966 || sasiounit_request->Operation == 967 MPI2_SAS_OP_PHY_LINK_RESET) { 968 ioc->ioc_link_reset_in_progress = 1; 969 ioc->ignore_loginfos = 1; 970 } 971 /* drop to default case for posting the request */ 972 } 973 default: 974 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 975 data_in_dma, data_in_sz); 976 mpt3sas_base_put_smid_default(ioc, smid); 977 break; 978 } 979 980 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 981 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 982 else 983 timeout = karg.timeout; 984 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); 985 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 986 Mpi2SCSITaskManagementRequest_t *tm_request = 987 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 988 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 989 tm_request->DevHandle)); 990 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 991 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 992 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 993 ioc->ioc_link_reset_in_progress) { 994 ioc->ioc_link_reset_in_progress = 0; 995 ioc->ignore_loginfos = 0; 996 } 997 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 998 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 999 __func__); 1000 _debug_dump_mf(mpi_request, karg.data_sge_offset); 1001 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1002 issue_reset = 1; 1003 goto issue_host_reset; 1004 } 1005 1006 mpi_reply = ioc->ctl_cmds.reply; 1007 1008 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 1009 (ioc->logging_level & MPT_DEBUG_TM)) { 1010 Mpi2SCSITaskManagementReply_t *tm_reply = 1011 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 1012 1013 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 1014 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 1015 "TerminationCount(0x%08x)\n", ioc->name, 1016 le16_to_cpu(tm_reply->IOCStatus), 1017 le32_to_cpu(tm_reply->IOCLogInfo), 1018 le32_to_cpu(tm_reply->TerminationCount)); 1019 } 1020 1021 /* copy out xdata to user */ 1022 if (data_in_sz) { 1023 if (copy_to_user(karg.data_in_buf_ptr, data_in, 1024 data_in_sz)) { 1025 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1026 __LINE__, __func__); 1027 ret = -ENODATA; 1028 goto out; 1029 } 1030 } 1031 1032 /* copy out reply message frame to user */ 1033 if (karg.max_reply_bytes) { 1034 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 1035 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 1036 sz)) { 1037 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1038 __LINE__, __func__); 1039 ret = -ENODATA; 1040 goto out; 1041 } 1042 } 1043 1044 /* copy out sense/NVMe Error Response to user */ 1045 if (karg.max_sense_bytes && (mpi_request->Function == 1046 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 1047 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == 1048 MPI2_FUNCTION_NVME_ENCAPSULATED)) { 1049 if (karg.sense_data_ptr == NULL) { 1050 pr_info(MPT3SAS_FMT "Response buffer provided" 1051 " by application is NULL; Response data will" 1052 " not be returned.\n", ioc->name); 1053 goto out; 1054 } 1055 sz_arg = (mpi_request->Function == 1056 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : 1057 SCSI_SENSE_BUFFERSIZE; 1058 sz = min_t(u32, karg.max_sense_bytes, sz_arg); 1059 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 1060 sz)) { 1061 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1062 __LINE__, __func__); 1063 ret = -ENODATA; 1064 goto out; 1065 } 1066 } 1067 1068 issue_host_reset: 1069 if (issue_reset) { 1070 ret = -ENODATA; 1071 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 1072 mpi_request->Function == 1073 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 1074 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 1075 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 1076 ioc->name, 1077 le16_to_cpu(mpi_request->FunctionDependent1)); 1078 mpt3sas_halt_firmware(ioc); 1079 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 1080 le16_to_cpu(mpi_request->FunctionDependent1)); 1081 if (pcie_device && (!ioc->tm_custom_handling)) 1082 mpt3sas_scsih_issue_locked_tm(ioc, 1083 le16_to_cpu(mpi_request->FunctionDependent1), 1084 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 1085 0, pcie_device->reset_timeout, 1086 tr_method); 1087 else 1088 mpt3sas_scsih_issue_locked_tm(ioc, 1089 le16_to_cpu(mpi_request->FunctionDependent1), 1090 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 1091 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); 1092 } else 1093 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1094 } 1095 1096 out: 1097 if (pcie_device) 1098 pcie_device_put(pcie_device); 1099 1100 /* free memory associated with sg buffers */ 1101 if (data_in) 1102 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1103 data_in_dma); 1104 1105 if (data_out) 1106 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1107 data_out_dma); 1108 1109 kfree(mpi_request); 1110 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1111 return ret; 1112 } 1113 1114 /** 1115 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1116 * @ioc: per adapter object 1117 * @arg - user space buffer containing ioctl content 1118 */ 1119 static long 1120 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1121 { 1122 struct mpt3_ioctl_iocinfo karg; 1123 1124 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1125 __func__)); 1126 1127 memset(&karg, 0 , sizeof(karg)); 1128 if (ioc->pfacts) 1129 karg.port_number = ioc->pfacts[0].PortNumber; 1130 karg.hw_rev = ioc->pdev->revision; 1131 karg.pci_id = ioc->pdev->device; 1132 karg.subsystem_device = ioc->pdev->subsystem_device; 1133 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1134 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1135 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1136 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1137 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1138 karg.firmware_version = ioc->facts.FWVersion.Word; 1139 strcpy(karg.driver_version, ioc->driver_name); 1140 strcat(karg.driver_version, "-"); 1141 switch (ioc->hba_mpi_version_belonged) { 1142 case MPI2_VERSION: 1143 if (ioc->is_warpdrive) 1144 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1145 else 1146 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1147 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1148 break; 1149 case MPI25_VERSION: 1150 case MPI26_VERSION: 1151 if (ioc->is_gen35_ioc) 1152 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; 1153 else 1154 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1155 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1156 break; 1157 } 1158 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1159 1160 if (copy_to_user(arg, &karg, sizeof(karg))) { 1161 pr_err("failure at %s:%d/%s()!\n", 1162 __FILE__, __LINE__, __func__); 1163 return -EFAULT; 1164 } 1165 return 0; 1166 } 1167 1168 /** 1169 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1170 * @ioc: per adapter object 1171 * @arg - user space buffer containing ioctl content 1172 */ 1173 static long 1174 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1175 { 1176 struct mpt3_ioctl_eventquery karg; 1177 1178 if (copy_from_user(&karg, arg, sizeof(karg))) { 1179 pr_err("failure at %s:%d/%s()!\n", 1180 __FILE__, __LINE__, __func__); 1181 return -EFAULT; 1182 } 1183 1184 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1185 __func__)); 1186 1187 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1188 memcpy(karg.event_types, ioc->event_type, 1189 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1190 1191 if (copy_to_user(arg, &karg, sizeof(karg))) { 1192 pr_err("failure at %s:%d/%s()!\n", 1193 __FILE__, __LINE__, __func__); 1194 return -EFAULT; 1195 } 1196 return 0; 1197 } 1198 1199 /** 1200 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1201 * @ioc: per adapter object 1202 * @arg - user space buffer containing ioctl content 1203 */ 1204 static long 1205 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1206 { 1207 struct mpt3_ioctl_eventenable karg; 1208 1209 if (copy_from_user(&karg, arg, sizeof(karg))) { 1210 pr_err("failure at %s:%d/%s()!\n", 1211 __FILE__, __LINE__, __func__); 1212 return -EFAULT; 1213 } 1214 1215 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1216 __func__)); 1217 1218 memcpy(ioc->event_type, karg.event_types, 1219 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1220 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1221 1222 if (ioc->event_log) 1223 return 0; 1224 /* initialize event_log */ 1225 ioc->event_context = 0; 1226 ioc->aen_event_read_flag = 0; 1227 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1228 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1229 if (!ioc->event_log) { 1230 pr_err("failure at %s:%d/%s()!\n", 1231 __FILE__, __LINE__, __func__); 1232 return -ENOMEM; 1233 } 1234 return 0; 1235 } 1236 1237 /** 1238 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1239 * @ioc: per adapter object 1240 * @arg - user space buffer containing ioctl content 1241 */ 1242 static long 1243 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1244 { 1245 struct mpt3_ioctl_eventreport karg; 1246 u32 number_bytes, max_events, max; 1247 struct mpt3_ioctl_eventreport __user *uarg = arg; 1248 1249 if (copy_from_user(&karg, arg, sizeof(karg))) { 1250 pr_err("failure at %s:%d/%s()!\n", 1251 __FILE__, __LINE__, __func__); 1252 return -EFAULT; 1253 } 1254 1255 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1256 __func__)); 1257 1258 number_bytes = karg.hdr.max_data_size - 1259 sizeof(struct mpt3_ioctl_header); 1260 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1261 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1262 1263 /* If fewer than 1 event is requested, there must have 1264 * been some type of error. 1265 */ 1266 if (!max || !ioc->event_log) 1267 return -ENODATA; 1268 1269 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1270 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1271 pr_err("failure at %s:%d/%s()!\n", 1272 __FILE__, __LINE__, __func__); 1273 return -EFAULT; 1274 } 1275 1276 /* reset flag so SIGIO can restart */ 1277 ioc->aen_event_read_flag = 0; 1278 return 0; 1279 } 1280 1281 /** 1282 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1283 * @ioc: per adapter object 1284 * @arg - user space buffer containing ioctl content 1285 */ 1286 static long 1287 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1288 { 1289 struct mpt3_ioctl_diag_reset karg; 1290 int retval; 1291 1292 if (copy_from_user(&karg, arg, sizeof(karg))) { 1293 pr_err("failure at %s:%d/%s()!\n", 1294 __FILE__, __LINE__, __func__); 1295 return -EFAULT; 1296 } 1297 1298 if (ioc->shost_recovery || ioc->pci_error_recovery || 1299 ioc->is_driver_loading) 1300 return -EAGAIN; 1301 1302 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1303 __func__)); 1304 1305 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1306 pr_info(MPT3SAS_FMT "host reset: %s\n", 1307 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1308 return 0; 1309 } 1310 1311 /** 1312 * _ctl_btdh_search_sas_device - searching for sas device 1313 * @ioc: per adapter object 1314 * @btdh: btdh ioctl payload 1315 */ 1316 static int 1317 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1318 struct mpt3_ioctl_btdh_mapping *btdh) 1319 { 1320 struct _sas_device *sas_device; 1321 unsigned long flags; 1322 int rc = 0; 1323 1324 if (list_empty(&ioc->sas_device_list)) 1325 return rc; 1326 1327 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1328 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1329 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1330 btdh->handle == sas_device->handle) { 1331 btdh->bus = sas_device->channel; 1332 btdh->id = sas_device->id; 1333 rc = 1; 1334 goto out; 1335 } else if (btdh->bus == sas_device->channel && btdh->id == 1336 sas_device->id && btdh->handle == 0xFFFF) { 1337 btdh->handle = sas_device->handle; 1338 rc = 1; 1339 goto out; 1340 } 1341 } 1342 out: 1343 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1344 return rc; 1345 } 1346 1347 /** 1348 * _ctl_btdh_search_pcie_device - searching for pcie device 1349 * @ioc: per adapter object 1350 * @btdh: btdh ioctl payload 1351 */ 1352 static int 1353 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, 1354 struct mpt3_ioctl_btdh_mapping *btdh) 1355 { 1356 struct _pcie_device *pcie_device; 1357 unsigned long flags; 1358 int rc = 0; 1359 1360 if (list_empty(&ioc->pcie_device_list)) 1361 return rc; 1362 1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1364 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1365 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1366 btdh->handle == pcie_device->handle) { 1367 btdh->bus = pcie_device->channel; 1368 btdh->id = pcie_device->id; 1369 rc = 1; 1370 goto out; 1371 } else if (btdh->bus == pcie_device->channel && btdh->id == 1372 pcie_device->id && btdh->handle == 0xFFFF) { 1373 btdh->handle = pcie_device->handle; 1374 rc = 1; 1375 goto out; 1376 } 1377 } 1378 out: 1379 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1380 return rc; 1381 } 1382 1383 /** 1384 * _ctl_btdh_search_raid_device - searching for raid device 1385 * @ioc: per adapter object 1386 * @btdh: btdh ioctl payload 1387 */ 1388 static int 1389 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1390 struct mpt3_ioctl_btdh_mapping *btdh) 1391 { 1392 struct _raid_device *raid_device; 1393 unsigned long flags; 1394 int rc = 0; 1395 1396 if (list_empty(&ioc->raid_device_list)) 1397 return rc; 1398 1399 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1400 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1401 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1402 btdh->handle == raid_device->handle) { 1403 btdh->bus = raid_device->channel; 1404 btdh->id = raid_device->id; 1405 rc = 1; 1406 goto out; 1407 } else if (btdh->bus == raid_device->channel && btdh->id == 1408 raid_device->id && btdh->handle == 0xFFFF) { 1409 btdh->handle = raid_device->handle; 1410 rc = 1; 1411 goto out; 1412 } 1413 } 1414 out: 1415 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1416 return rc; 1417 } 1418 1419 /** 1420 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1421 * @ioc: per adapter object 1422 * @arg - user space buffer containing ioctl content 1423 */ 1424 static long 1425 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1426 { 1427 struct mpt3_ioctl_btdh_mapping karg; 1428 int rc; 1429 1430 if (copy_from_user(&karg, arg, sizeof(karg))) { 1431 pr_err("failure at %s:%d/%s()!\n", 1432 __FILE__, __LINE__, __func__); 1433 return -EFAULT; 1434 } 1435 1436 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1437 __func__)); 1438 1439 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1440 if (!rc) 1441 rc = _ctl_btdh_search_pcie_device(ioc, &karg); 1442 if (!rc) 1443 _ctl_btdh_search_raid_device(ioc, &karg); 1444 1445 if (copy_to_user(arg, &karg, sizeof(karg))) { 1446 pr_err("failure at %s:%d/%s()!\n", 1447 __FILE__, __LINE__, __func__); 1448 return -EFAULT; 1449 } 1450 return 0; 1451 } 1452 1453 /** 1454 * _ctl_diag_capability - return diag buffer capability 1455 * @ioc: per adapter object 1456 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1457 * 1458 * returns 1 when diag buffer support is enabled in firmware 1459 */ 1460 static u8 1461 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1462 { 1463 u8 rc = 0; 1464 1465 switch (buffer_type) { 1466 case MPI2_DIAG_BUF_TYPE_TRACE: 1467 if (ioc->facts.IOCCapabilities & 1468 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1469 rc = 1; 1470 break; 1471 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1472 if (ioc->facts.IOCCapabilities & 1473 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1474 rc = 1; 1475 break; 1476 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1477 if (ioc->facts.IOCCapabilities & 1478 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1479 rc = 1; 1480 } 1481 1482 return rc; 1483 } 1484 1485 1486 /** 1487 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1488 * @ioc: per adapter object 1489 * @diag_register: the diag_register struct passed in from user space 1490 * 1491 */ 1492 static long 1493 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1494 struct mpt3_diag_register *diag_register) 1495 { 1496 int rc, i; 1497 void *request_data = NULL; 1498 dma_addr_t request_data_dma; 1499 u32 request_data_sz = 0; 1500 Mpi2DiagBufferPostRequest_t *mpi_request; 1501 Mpi2DiagBufferPostReply_t *mpi_reply; 1502 u8 buffer_type; 1503 u16 smid; 1504 u16 ioc_status; 1505 u32 ioc_state; 1506 u8 issue_reset = 0; 1507 1508 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1509 __func__)); 1510 1511 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1512 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1513 pr_err(MPT3SAS_FMT 1514 "%s: failed due to ioc not operational\n", 1515 ioc->name, __func__); 1516 rc = -EAGAIN; 1517 goto out; 1518 } 1519 1520 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1521 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1522 ioc->name, __func__); 1523 rc = -EAGAIN; 1524 goto out; 1525 } 1526 1527 buffer_type = diag_register->buffer_type; 1528 if (!_ctl_diag_capability(ioc, buffer_type)) { 1529 pr_err(MPT3SAS_FMT 1530 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1531 ioc->name, __func__, buffer_type); 1532 return -EPERM; 1533 } 1534 1535 if (ioc->diag_buffer_status[buffer_type] & 1536 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1537 pr_err(MPT3SAS_FMT 1538 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1539 ioc->name, __func__, 1540 buffer_type); 1541 return -EINVAL; 1542 } 1543 1544 if (diag_register->requested_buffer_size % 4) { 1545 pr_err(MPT3SAS_FMT 1546 "%s: the requested_buffer_size is not 4 byte aligned\n", 1547 ioc->name, __func__); 1548 return -EINVAL; 1549 } 1550 1551 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1552 if (!smid) { 1553 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1554 ioc->name, __func__); 1555 rc = -EAGAIN; 1556 goto out; 1557 } 1558 1559 rc = 0; 1560 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1561 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1562 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1563 ioc->ctl_cmds.smid = smid; 1564 1565 request_data = ioc->diag_buffer[buffer_type]; 1566 request_data_sz = diag_register->requested_buffer_size; 1567 ioc->unique_id[buffer_type] = diag_register->unique_id; 1568 ioc->diag_buffer_status[buffer_type] = 0; 1569 memcpy(ioc->product_specific[buffer_type], 1570 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1571 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1572 1573 if (request_data) { 1574 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1575 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1576 pci_free_consistent(ioc->pdev, 1577 ioc->diag_buffer_sz[buffer_type], 1578 request_data, request_data_dma); 1579 request_data = NULL; 1580 } 1581 } 1582 1583 if (request_data == NULL) { 1584 ioc->diag_buffer_sz[buffer_type] = 0; 1585 ioc->diag_buffer_dma[buffer_type] = 0; 1586 request_data = pci_alloc_consistent( 1587 ioc->pdev, request_data_sz, &request_data_dma); 1588 if (request_data == NULL) { 1589 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1590 " for diag buffers, requested size(%d)\n", 1591 ioc->name, __func__, request_data_sz); 1592 mpt3sas_base_free_smid(ioc, smid); 1593 return -ENOMEM; 1594 } 1595 ioc->diag_buffer[buffer_type] = request_data; 1596 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1597 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1598 } 1599 1600 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1601 mpi_request->BufferType = diag_register->buffer_type; 1602 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1603 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1604 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1605 mpi_request->VF_ID = 0; /* TODO */ 1606 mpi_request->VP_ID = 0; 1607 1608 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1609 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1610 ioc->name, __func__, request_data, 1611 (unsigned long long)request_data_dma, 1612 le32_to_cpu(mpi_request->BufferLength))); 1613 1614 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1615 mpi_request->ProductSpecific[i] = 1616 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1617 1618 init_completion(&ioc->ctl_cmds.done); 1619 mpt3sas_base_put_smid_default(ioc, smid); 1620 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1621 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1622 1623 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1624 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1625 __func__); 1626 _debug_dump_mf(mpi_request, 1627 sizeof(Mpi2DiagBufferPostRequest_t)/4); 1628 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1629 issue_reset = 1; 1630 goto issue_host_reset; 1631 } 1632 1633 /* process the completed Reply Message Frame */ 1634 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1635 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1636 ioc->name, __func__); 1637 rc = -EFAULT; 1638 goto out; 1639 } 1640 1641 mpi_reply = ioc->ctl_cmds.reply; 1642 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1643 1644 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1645 ioc->diag_buffer_status[buffer_type] |= 1646 MPT3_DIAG_BUFFER_IS_REGISTERED; 1647 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1648 ioc->name, __func__)); 1649 } else { 1650 pr_info(MPT3SAS_FMT 1651 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1652 ioc->name, __func__, 1653 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1654 rc = -EFAULT; 1655 } 1656 1657 issue_host_reset: 1658 if (issue_reset) 1659 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1660 1661 out: 1662 1663 if (rc && request_data) 1664 pci_free_consistent(ioc->pdev, request_data_sz, 1665 request_data, request_data_dma); 1666 1667 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1668 return rc; 1669 } 1670 1671 /** 1672 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1673 * @ioc: per adapter object 1674 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1675 * 1676 * This is called when command line option diag_buffer_enable is enabled 1677 * at driver load time. 1678 */ 1679 void 1680 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1681 { 1682 struct mpt3_diag_register diag_register; 1683 1684 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1685 1686 if (bits_to_register & 1) { 1687 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1688 ioc->name); 1689 ioc->diag_trigger_master.MasterData = 1690 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1691 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1692 /* register for 2MB buffers */ 1693 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1694 diag_register.unique_id = 0x7075900; 1695 _ctl_diag_register_2(ioc, &diag_register); 1696 } 1697 1698 if (bits_to_register & 2) { 1699 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1700 ioc->name); 1701 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1702 /* register for 2MB buffers */ 1703 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1704 diag_register.unique_id = 0x7075901; 1705 _ctl_diag_register_2(ioc, &diag_register); 1706 } 1707 1708 if (bits_to_register & 4) { 1709 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1710 ioc->name); 1711 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1712 /* register for 2MB buffers */ 1713 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1714 diag_register.unique_id = 0x7075901; 1715 _ctl_diag_register_2(ioc, &diag_register); 1716 } 1717 } 1718 1719 /** 1720 * _ctl_diag_register - application register with driver 1721 * @ioc: per adapter object 1722 * @arg - user space buffer containing ioctl content 1723 * 1724 * This will allow the driver to setup any required buffers that will be 1725 * needed by firmware to communicate with the driver. 1726 */ 1727 static long 1728 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1729 { 1730 struct mpt3_diag_register karg; 1731 long rc; 1732 1733 if (copy_from_user(&karg, arg, sizeof(karg))) { 1734 pr_err("failure at %s:%d/%s()!\n", 1735 __FILE__, __LINE__, __func__); 1736 return -EFAULT; 1737 } 1738 1739 rc = _ctl_diag_register_2(ioc, &karg); 1740 return rc; 1741 } 1742 1743 /** 1744 * _ctl_diag_unregister - application unregister with driver 1745 * @ioc: per adapter object 1746 * @arg - user space buffer containing ioctl content 1747 * 1748 * This will allow the driver to cleanup any memory allocated for diag 1749 * messages and to free up any resources. 1750 */ 1751 static long 1752 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1753 { 1754 struct mpt3_diag_unregister karg; 1755 void *request_data; 1756 dma_addr_t request_data_dma; 1757 u32 request_data_sz; 1758 u8 buffer_type; 1759 1760 if (copy_from_user(&karg, arg, sizeof(karg))) { 1761 pr_err("failure at %s:%d/%s()!\n", 1762 __FILE__, __LINE__, __func__); 1763 return -EFAULT; 1764 } 1765 1766 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1767 __func__)); 1768 1769 buffer_type = karg.unique_id & 0x000000ff; 1770 if (!_ctl_diag_capability(ioc, buffer_type)) { 1771 pr_err(MPT3SAS_FMT 1772 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1773 ioc->name, __func__, buffer_type); 1774 return -EPERM; 1775 } 1776 1777 if ((ioc->diag_buffer_status[buffer_type] & 1778 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1779 pr_err(MPT3SAS_FMT 1780 "%s: buffer_type(0x%02x) is not registered\n", 1781 ioc->name, __func__, buffer_type); 1782 return -EINVAL; 1783 } 1784 if ((ioc->diag_buffer_status[buffer_type] & 1785 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1786 pr_err(MPT3SAS_FMT 1787 "%s: buffer_type(0x%02x) has not been released\n", 1788 ioc->name, __func__, buffer_type); 1789 return -EINVAL; 1790 } 1791 1792 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1793 pr_err(MPT3SAS_FMT 1794 "%s: unique_id(0x%08x) is not registered\n", 1795 ioc->name, __func__, karg.unique_id); 1796 return -EINVAL; 1797 } 1798 1799 request_data = ioc->diag_buffer[buffer_type]; 1800 if (!request_data) { 1801 pr_err(MPT3SAS_FMT 1802 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1803 ioc->name, __func__, buffer_type); 1804 return -ENOMEM; 1805 } 1806 1807 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1808 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1809 pci_free_consistent(ioc->pdev, request_data_sz, 1810 request_data, request_data_dma); 1811 ioc->diag_buffer[buffer_type] = NULL; 1812 ioc->diag_buffer_status[buffer_type] = 0; 1813 return 0; 1814 } 1815 1816 /** 1817 * _ctl_diag_query - query relevant info associated with diag buffers 1818 * @ioc: per adapter object 1819 * @arg - user space buffer containing ioctl content 1820 * 1821 * The application will send only buffer_type and unique_id. Driver will 1822 * inspect unique_id first, if valid, fill in all the info. If unique_id is 1823 * 0x00, the driver will return info specified by Buffer Type. 1824 */ 1825 static long 1826 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1827 { 1828 struct mpt3_diag_query karg; 1829 void *request_data; 1830 int i; 1831 u8 buffer_type; 1832 1833 if (copy_from_user(&karg, arg, sizeof(karg))) { 1834 pr_err("failure at %s:%d/%s()!\n", 1835 __FILE__, __LINE__, __func__); 1836 return -EFAULT; 1837 } 1838 1839 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1840 __func__)); 1841 1842 karg.application_flags = 0; 1843 buffer_type = karg.buffer_type; 1844 1845 if (!_ctl_diag_capability(ioc, buffer_type)) { 1846 pr_err(MPT3SAS_FMT 1847 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1848 ioc->name, __func__, buffer_type); 1849 return -EPERM; 1850 } 1851 1852 if ((ioc->diag_buffer_status[buffer_type] & 1853 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1854 pr_err(MPT3SAS_FMT 1855 "%s: buffer_type(0x%02x) is not registered\n", 1856 ioc->name, __func__, buffer_type); 1857 return -EINVAL; 1858 } 1859 1860 if (karg.unique_id & 0xffffff00) { 1861 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1862 pr_err(MPT3SAS_FMT 1863 "%s: unique_id(0x%08x) is not registered\n", 1864 ioc->name, __func__, karg.unique_id); 1865 return -EINVAL; 1866 } 1867 } 1868 1869 request_data = ioc->diag_buffer[buffer_type]; 1870 if (!request_data) { 1871 pr_err(MPT3SAS_FMT 1872 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1873 ioc->name, __func__, buffer_type); 1874 return -ENOMEM; 1875 } 1876 1877 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) 1878 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1879 MPT3_APP_FLAGS_BUFFER_VALID); 1880 else 1881 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1882 MPT3_APP_FLAGS_BUFFER_VALID | 1883 MPT3_APP_FLAGS_FW_BUFFER_ACCESS); 1884 1885 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1886 karg.product_specific[i] = 1887 ioc->product_specific[buffer_type][i]; 1888 1889 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 1890 karg.driver_added_buffer_size = 0; 1891 karg.unique_id = ioc->unique_id[buffer_type]; 1892 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1893 1894 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1895 pr_err(MPT3SAS_FMT 1896 "%s: unable to write mpt3_diag_query data @ %p\n", 1897 ioc->name, __func__, arg); 1898 return -EFAULT; 1899 } 1900 return 0; 1901 } 1902 1903 /** 1904 * mpt3sas_send_diag_release - Diag Release Message 1905 * @ioc: per adapter object 1906 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED 1907 * @issue_reset - specifies whether host reset is required. 1908 * 1909 */ 1910 int 1911 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1912 u8 *issue_reset) 1913 { 1914 Mpi2DiagReleaseRequest_t *mpi_request; 1915 Mpi2DiagReleaseReply_t *mpi_reply; 1916 u16 smid; 1917 u16 ioc_status; 1918 u32 ioc_state; 1919 int rc; 1920 1921 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1922 __func__)); 1923 1924 rc = 0; 1925 *issue_reset = 0; 1926 1927 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1928 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1929 if (ioc->diag_buffer_status[buffer_type] & 1930 MPT3_DIAG_BUFFER_IS_REGISTERED) 1931 ioc->diag_buffer_status[buffer_type] |= 1932 MPT3_DIAG_BUFFER_IS_RELEASED; 1933 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1934 "%s: skipping due to FAULT state\n", ioc->name, 1935 __func__)); 1936 rc = -EAGAIN; 1937 goto out; 1938 } 1939 1940 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1941 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1942 ioc->name, __func__); 1943 rc = -EAGAIN; 1944 goto out; 1945 } 1946 1947 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1948 if (!smid) { 1949 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1950 ioc->name, __func__); 1951 rc = -EAGAIN; 1952 goto out; 1953 } 1954 1955 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1956 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1957 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1958 ioc->ctl_cmds.smid = smid; 1959 1960 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1961 mpi_request->BufferType = buffer_type; 1962 mpi_request->VF_ID = 0; /* TODO */ 1963 mpi_request->VP_ID = 0; 1964 1965 init_completion(&ioc->ctl_cmds.done); 1966 mpt3sas_base_put_smid_default(ioc, smid); 1967 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1968 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1969 1970 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1971 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1972 __func__); 1973 _debug_dump_mf(mpi_request, 1974 sizeof(Mpi2DiagReleaseRequest_t)/4); 1975 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1976 *issue_reset = 1; 1977 rc = -EFAULT; 1978 goto out; 1979 } 1980 1981 /* process the completed Reply Message Frame */ 1982 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1983 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1984 ioc->name, __func__); 1985 rc = -EFAULT; 1986 goto out; 1987 } 1988 1989 mpi_reply = ioc->ctl_cmds.reply; 1990 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1991 1992 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1993 ioc->diag_buffer_status[buffer_type] |= 1994 MPT3_DIAG_BUFFER_IS_RELEASED; 1995 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1996 ioc->name, __func__)); 1997 } else { 1998 pr_info(MPT3SAS_FMT 1999 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2000 ioc->name, __func__, 2001 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2002 rc = -EFAULT; 2003 } 2004 2005 out: 2006 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2007 return rc; 2008 } 2009 2010 /** 2011 * _ctl_diag_release - request to send Diag Release Message to firmware 2012 * @arg - user space buffer containing ioctl content 2013 * 2014 * This allows ownership of the specified buffer to returned to the driver, 2015 * allowing an application to read the buffer without fear that firmware is 2016 * overwriting information in the buffer. 2017 */ 2018 static long 2019 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2020 { 2021 struct mpt3_diag_release karg; 2022 void *request_data; 2023 int rc; 2024 u8 buffer_type; 2025 u8 issue_reset = 0; 2026 2027 if (copy_from_user(&karg, arg, sizeof(karg))) { 2028 pr_err("failure at %s:%d/%s()!\n", 2029 __FILE__, __LINE__, __func__); 2030 return -EFAULT; 2031 } 2032 2033 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2034 __func__)); 2035 2036 buffer_type = karg.unique_id & 0x000000ff; 2037 if (!_ctl_diag_capability(ioc, buffer_type)) { 2038 pr_err(MPT3SAS_FMT 2039 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2040 ioc->name, __func__, buffer_type); 2041 return -EPERM; 2042 } 2043 2044 if ((ioc->diag_buffer_status[buffer_type] & 2045 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2046 pr_err(MPT3SAS_FMT 2047 "%s: buffer_type(0x%02x) is not registered\n", 2048 ioc->name, __func__, buffer_type); 2049 return -EINVAL; 2050 } 2051 2052 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2053 pr_err(MPT3SAS_FMT 2054 "%s: unique_id(0x%08x) is not registered\n", 2055 ioc->name, __func__, karg.unique_id); 2056 return -EINVAL; 2057 } 2058 2059 if (ioc->diag_buffer_status[buffer_type] & 2060 MPT3_DIAG_BUFFER_IS_RELEASED) { 2061 pr_err(MPT3SAS_FMT 2062 "%s: buffer_type(0x%02x) is already released\n", 2063 ioc->name, __func__, 2064 buffer_type); 2065 return 0; 2066 } 2067 2068 request_data = ioc->diag_buffer[buffer_type]; 2069 2070 if (!request_data) { 2071 pr_err(MPT3SAS_FMT 2072 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 2073 ioc->name, __func__, buffer_type); 2074 return -ENOMEM; 2075 } 2076 2077 /* buffers were released by due to host reset */ 2078 if ((ioc->diag_buffer_status[buffer_type] & 2079 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 2080 ioc->diag_buffer_status[buffer_type] |= 2081 MPT3_DIAG_BUFFER_IS_RELEASED; 2082 ioc->diag_buffer_status[buffer_type] &= 2083 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 2084 pr_err(MPT3SAS_FMT 2085 "%s: buffer_type(0x%02x) was released due to host reset\n", 2086 ioc->name, __func__, buffer_type); 2087 return 0; 2088 } 2089 2090 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 2091 2092 if (issue_reset) 2093 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2094 2095 return rc; 2096 } 2097 2098 /** 2099 * _ctl_diag_read_buffer - request for copy of the diag buffer 2100 * @ioc: per adapter object 2101 * @arg - user space buffer containing ioctl content 2102 */ 2103 static long 2104 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2105 { 2106 struct mpt3_diag_read_buffer karg; 2107 struct mpt3_diag_read_buffer __user *uarg = arg; 2108 void *request_data, *diag_data; 2109 Mpi2DiagBufferPostRequest_t *mpi_request; 2110 Mpi2DiagBufferPostReply_t *mpi_reply; 2111 int rc, i; 2112 u8 buffer_type; 2113 unsigned long request_size, copy_size; 2114 u16 smid; 2115 u16 ioc_status; 2116 u8 issue_reset = 0; 2117 2118 if (copy_from_user(&karg, arg, sizeof(karg))) { 2119 pr_err("failure at %s:%d/%s()!\n", 2120 __FILE__, __LINE__, __func__); 2121 return -EFAULT; 2122 } 2123 2124 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2125 __func__)); 2126 2127 buffer_type = karg.unique_id & 0x000000ff; 2128 if (!_ctl_diag_capability(ioc, buffer_type)) { 2129 pr_err(MPT3SAS_FMT 2130 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2131 ioc->name, __func__, buffer_type); 2132 return -EPERM; 2133 } 2134 2135 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2136 pr_err(MPT3SAS_FMT 2137 "%s: unique_id(0x%08x) is not registered\n", 2138 ioc->name, __func__, karg.unique_id); 2139 return -EINVAL; 2140 } 2141 2142 request_data = ioc->diag_buffer[buffer_type]; 2143 if (!request_data) { 2144 pr_err(MPT3SAS_FMT 2145 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2146 ioc->name, __func__, buffer_type); 2147 return -ENOMEM; 2148 } 2149 2150 request_size = ioc->diag_buffer_sz[buffer_type]; 2151 2152 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2153 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2154 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2155 __func__); 2156 return -EINVAL; 2157 } 2158 2159 if (karg.starting_offset > request_size) 2160 return -EINVAL; 2161 2162 diag_data = (void *)(request_data + karg.starting_offset); 2163 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2164 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2165 ioc->name, __func__, 2166 diag_data, karg.starting_offset, karg.bytes_to_read)); 2167 2168 /* Truncate data on requests that are too large */ 2169 if ((diag_data + karg.bytes_to_read < diag_data) || 2170 (diag_data + karg.bytes_to_read > request_data + request_size)) 2171 copy_size = request_size - karg.starting_offset; 2172 else 2173 copy_size = karg.bytes_to_read; 2174 2175 if (copy_to_user((void __user *)uarg->diagnostic_data, 2176 diag_data, copy_size)) { 2177 pr_err(MPT3SAS_FMT 2178 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2179 ioc->name, __func__, diag_data); 2180 return -EFAULT; 2181 } 2182 2183 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2184 return 0; 2185 2186 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2187 "%s: Reregister buffer_type(0x%02x)\n", 2188 ioc->name, __func__, buffer_type)); 2189 if ((ioc->diag_buffer_status[buffer_type] & 2190 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2191 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2192 "%s: buffer_type(0x%02x) is still registered\n", 2193 ioc->name, __func__, buffer_type)); 2194 return 0; 2195 } 2196 /* Get a free request frame and save the message context. 2197 */ 2198 2199 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2200 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2201 ioc->name, __func__); 2202 rc = -EAGAIN; 2203 goto out; 2204 } 2205 2206 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2207 if (!smid) { 2208 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2209 ioc->name, __func__); 2210 rc = -EAGAIN; 2211 goto out; 2212 } 2213 2214 rc = 0; 2215 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2216 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2217 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2218 ioc->ctl_cmds.smid = smid; 2219 2220 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2221 mpi_request->BufferType = buffer_type; 2222 mpi_request->BufferLength = 2223 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2224 mpi_request->BufferAddress = 2225 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2226 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2227 mpi_request->ProductSpecific[i] = 2228 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2229 mpi_request->VF_ID = 0; /* TODO */ 2230 mpi_request->VP_ID = 0; 2231 2232 init_completion(&ioc->ctl_cmds.done); 2233 mpt3sas_base_put_smid_default(ioc, smid); 2234 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2235 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2236 2237 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2238 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 2239 __func__); 2240 _debug_dump_mf(mpi_request, 2241 sizeof(Mpi2DiagBufferPostRequest_t)/4); 2242 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 2243 issue_reset = 1; 2244 goto issue_host_reset; 2245 } 2246 2247 /* process the completed Reply Message Frame */ 2248 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2249 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2250 ioc->name, __func__); 2251 rc = -EFAULT; 2252 goto out; 2253 } 2254 2255 mpi_reply = ioc->ctl_cmds.reply; 2256 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2257 2258 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2259 ioc->diag_buffer_status[buffer_type] |= 2260 MPT3_DIAG_BUFFER_IS_REGISTERED; 2261 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2262 ioc->name, __func__)); 2263 } else { 2264 pr_info(MPT3SAS_FMT 2265 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2266 ioc->name, __func__, 2267 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2268 rc = -EFAULT; 2269 } 2270 2271 issue_host_reset: 2272 if (issue_reset) 2273 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2274 2275 out: 2276 2277 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2278 return rc; 2279 } 2280 2281 2282 2283 #ifdef CONFIG_COMPAT 2284 /** 2285 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2286 * @ioc: per adapter object 2287 * @cmd - ioctl opcode 2288 * @arg - (struct mpt3_ioctl_command32) 2289 * 2290 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2291 */ 2292 static long 2293 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2294 void __user *arg) 2295 { 2296 struct mpt3_ioctl_command32 karg32; 2297 struct mpt3_ioctl_command32 __user *uarg; 2298 struct mpt3_ioctl_command karg; 2299 2300 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2301 return -EINVAL; 2302 2303 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2304 2305 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2306 pr_err("failure at %s:%d/%s()!\n", 2307 __FILE__, __LINE__, __func__); 2308 return -EFAULT; 2309 } 2310 2311 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2312 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2313 karg.hdr.port_number = karg32.hdr.port_number; 2314 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2315 karg.timeout = karg32.timeout; 2316 karg.max_reply_bytes = karg32.max_reply_bytes; 2317 karg.data_in_size = karg32.data_in_size; 2318 karg.data_out_size = karg32.data_out_size; 2319 karg.max_sense_bytes = karg32.max_sense_bytes; 2320 karg.data_sge_offset = karg32.data_sge_offset; 2321 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2322 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2323 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2324 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2325 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2326 } 2327 #endif 2328 2329 /** 2330 * _ctl_ioctl_main - main ioctl entry point 2331 * @file - (struct file) 2332 * @cmd - ioctl opcode 2333 * @arg - user space data buffer 2334 * @compat - handles 32 bit applications in 64bit os 2335 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2336 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 2337 */ 2338 static long 2339 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2340 u8 compat, u16 mpi_version) 2341 { 2342 struct MPT3SAS_ADAPTER *ioc; 2343 struct mpt3_ioctl_header ioctl_header; 2344 enum block_state state; 2345 long ret = -EINVAL; 2346 2347 /* get IOCTL header */ 2348 if (copy_from_user(&ioctl_header, (char __user *)arg, 2349 sizeof(struct mpt3_ioctl_header))) { 2350 pr_err("failure at %s:%d/%s()!\n", 2351 __FILE__, __LINE__, __func__); 2352 return -EFAULT; 2353 } 2354 2355 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2356 &ioc, mpi_version) == -1 || !ioc) 2357 return -ENODEV; 2358 2359 /* pci_access_mutex lock acquired by ioctl path */ 2360 mutex_lock(&ioc->pci_access_mutex); 2361 2362 if (ioc->shost_recovery || ioc->pci_error_recovery || 2363 ioc->is_driver_loading || ioc->remove_host) { 2364 ret = -EAGAIN; 2365 goto out_unlock_pciaccess; 2366 } 2367 2368 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2369 if (state == NON_BLOCKING) { 2370 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2371 ret = -EAGAIN; 2372 goto out_unlock_pciaccess; 2373 } 2374 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2375 ret = -ERESTARTSYS; 2376 goto out_unlock_pciaccess; 2377 } 2378 2379 2380 switch (cmd) { 2381 case MPT3IOCINFO: 2382 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2383 ret = _ctl_getiocinfo(ioc, arg); 2384 break; 2385 #ifdef CONFIG_COMPAT 2386 case MPT3COMMAND32: 2387 #endif 2388 case MPT3COMMAND: 2389 { 2390 struct mpt3_ioctl_command __user *uarg; 2391 struct mpt3_ioctl_command karg; 2392 2393 #ifdef CONFIG_COMPAT 2394 if (compat) { 2395 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2396 break; 2397 } 2398 #endif 2399 if (copy_from_user(&karg, arg, sizeof(karg))) { 2400 pr_err("failure at %s:%d/%s()!\n", 2401 __FILE__, __LINE__, __func__); 2402 ret = -EFAULT; 2403 break; 2404 } 2405 2406 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2407 uarg = arg; 2408 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2409 } 2410 break; 2411 } 2412 case MPT3EVENTQUERY: 2413 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2414 ret = _ctl_eventquery(ioc, arg); 2415 break; 2416 case MPT3EVENTENABLE: 2417 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2418 ret = _ctl_eventenable(ioc, arg); 2419 break; 2420 case MPT3EVENTREPORT: 2421 ret = _ctl_eventreport(ioc, arg); 2422 break; 2423 case MPT3HARDRESET: 2424 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2425 ret = _ctl_do_reset(ioc, arg); 2426 break; 2427 case MPT3BTDHMAPPING: 2428 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2429 ret = _ctl_btdh_mapping(ioc, arg); 2430 break; 2431 case MPT3DIAGREGISTER: 2432 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2433 ret = _ctl_diag_register(ioc, arg); 2434 break; 2435 case MPT3DIAGUNREGISTER: 2436 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2437 ret = _ctl_diag_unregister(ioc, arg); 2438 break; 2439 case MPT3DIAGQUERY: 2440 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2441 ret = _ctl_diag_query(ioc, arg); 2442 break; 2443 case MPT3DIAGRELEASE: 2444 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2445 ret = _ctl_diag_release(ioc, arg); 2446 break; 2447 case MPT3DIAGREADBUFFER: 2448 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2449 ret = _ctl_diag_read_buffer(ioc, arg); 2450 break; 2451 default: 2452 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2453 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2454 break; 2455 } 2456 2457 mutex_unlock(&ioc->ctl_cmds.mutex); 2458 out_unlock_pciaccess: 2459 mutex_unlock(&ioc->pci_access_mutex); 2460 return ret; 2461 } 2462 2463 /** 2464 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 2465 * @file - (struct file) 2466 * @cmd - ioctl opcode 2467 * @arg - 2468 */ 2469 static long 2470 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2471 { 2472 long ret; 2473 2474 /* pass MPI25_VERSION | MPI26_VERSION value, 2475 * to indicate that this ioctl cmd 2476 * came from mpt3ctl ioctl device. 2477 */ 2478 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, 2479 MPI25_VERSION | MPI26_VERSION); 2480 return ret; 2481 } 2482 2483 /** 2484 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 2485 * @file - (struct file) 2486 * @cmd - ioctl opcode 2487 * @arg - 2488 */ 2489 static long 2490 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2491 { 2492 long ret; 2493 2494 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 2495 * came from mpt2ctl ioctl device. 2496 */ 2497 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 2498 return ret; 2499 } 2500 #ifdef CONFIG_COMPAT 2501 /** 2502 *_ ctl_ioctl_compat - main ioctl entry point (compat) 2503 * @file - 2504 * @cmd - 2505 * @arg - 2506 * 2507 * This routine handles 32 bit applications in 64bit os. 2508 */ 2509 static long 2510 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2511 { 2512 long ret; 2513 2514 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, 2515 MPI25_VERSION | MPI26_VERSION); 2516 return ret; 2517 } 2518 2519 /** 2520 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 2521 * @file - 2522 * @cmd - 2523 * @arg - 2524 * 2525 * This routine handles 32 bit applications in 64bit os. 2526 */ 2527 static long 2528 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2529 { 2530 long ret; 2531 2532 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 2533 return ret; 2534 } 2535 #endif 2536 2537 /* scsi host attributes */ 2538 /** 2539 * _ctl_version_fw_show - firmware version 2540 * @cdev - pointer to embedded class device 2541 * @buf - the buffer returned 2542 * 2543 * A sysfs 'read-only' shost attribute. 2544 */ 2545 static ssize_t 2546 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, 2547 char *buf) 2548 { 2549 struct Scsi_Host *shost = class_to_shost(cdev); 2550 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2551 2552 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2553 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2554 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2555 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2556 ioc->facts.FWVersion.Word & 0x000000FF); 2557 } 2558 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); 2559 2560 /** 2561 * _ctl_version_bios_show - bios version 2562 * @cdev - pointer to embedded class device 2563 * @buf - the buffer returned 2564 * 2565 * A sysfs 'read-only' shost attribute. 2566 */ 2567 static ssize_t 2568 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, 2569 char *buf) 2570 { 2571 struct Scsi_Host *shost = class_to_shost(cdev); 2572 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2573 2574 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2575 2576 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2577 (version & 0xFF000000) >> 24, 2578 (version & 0x00FF0000) >> 16, 2579 (version & 0x0000FF00) >> 8, 2580 version & 0x000000FF); 2581 } 2582 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); 2583 2584 /** 2585 * _ctl_version_mpi_show - MPI (message passing interface) version 2586 * @cdev - pointer to embedded class device 2587 * @buf - the buffer returned 2588 * 2589 * A sysfs 'read-only' shost attribute. 2590 */ 2591 static ssize_t 2592 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, 2593 char *buf) 2594 { 2595 struct Scsi_Host *shost = class_to_shost(cdev); 2596 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2597 2598 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 2599 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 2600 } 2601 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); 2602 2603 /** 2604 * _ctl_version_product_show - product name 2605 * @cdev - pointer to embedded class device 2606 * @buf - the buffer returned 2607 * 2608 * A sysfs 'read-only' shost attribute. 2609 */ 2610 static ssize_t 2611 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, 2612 char *buf) 2613 { 2614 struct Scsi_Host *shost = class_to_shost(cdev); 2615 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2616 2617 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 2618 } 2619 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); 2620 2621 /** 2622 * _ctl_version_nvdata_persistent_show - ndvata persistent version 2623 * @cdev - pointer to embedded class device 2624 * @buf - the buffer returned 2625 * 2626 * A sysfs 'read-only' shost attribute. 2627 */ 2628 static ssize_t 2629 _ctl_version_nvdata_persistent_show(struct device *cdev, 2630 struct device_attribute *attr, char *buf) 2631 { 2632 struct Scsi_Host *shost = class_to_shost(cdev); 2633 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2634 2635 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2636 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2637 } 2638 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2639 _ctl_version_nvdata_persistent_show, NULL); 2640 2641 /** 2642 * _ctl_version_nvdata_default_show - nvdata default version 2643 * @cdev - pointer to embedded class device 2644 * @buf - the buffer returned 2645 * 2646 * A sysfs 'read-only' shost attribute. 2647 */ 2648 static ssize_t 2649 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute 2650 *attr, char *buf) 2651 { 2652 struct Scsi_Host *shost = class_to_shost(cdev); 2653 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2654 2655 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2656 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2657 } 2658 static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2659 _ctl_version_nvdata_default_show, NULL); 2660 2661 /** 2662 * _ctl_board_name_show - board name 2663 * @cdev - pointer to embedded class device 2664 * @buf - the buffer returned 2665 * 2666 * A sysfs 'read-only' shost attribute. 2667 */ 2668 static ssize_t 2669 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, 2670 char *buf) 2671 { 2672 struct Scsi_Host *shost = class_to_shost(cdev); 2673 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2674 2675 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 2676 } 2677 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); 2678 2679 /** 2680 * _ctl_board_assembly_show - board assembly name 2681 * @cdev - pointer to embedded class device 2682 * @buf - the buffer returned 2683 * 2684 * A sysfs 'read-only' shost attribute. 2685 */ 2686 static ssize_t 2687 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, 2688 char *buf) 2689 { 2690 struct Scsi_Host *shost = class_to_shost(cdev); 2691 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2692 2693 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 2694 } 2695 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); 2696 2697 /** 2698 * _ctl_board_tracer_show - board tracer number 2699 * @cdev - pointer to embedded class device 2700 * @buf - the buffer returned 2701 * 2702 * A sysfs 'read-only' shost attribute. 2703 */ 2704 static ssize_t 2705 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, 2706 char *buf) 2707 { 2708 struct Scsi_Host *shost = class_to_shost(cdev); 2709 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2710 2711 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 2712 } 2713 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); 2714 2715 /** 2716 * _ctl_io_delay_show - io missing delay 2717 * @cdev - pointer to embedded class device 2718 * @buf - the buffer returned 2719 * 2720 * This is for firmware implemention for deboucing device 2721 * removal events. 2722 * 2723 * A sysfs 'read-only' shost attribute. 2724 */ 2725 static ssize_t 2726 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, 2727 char *buf) 2728 { 2729 struct Scsi_Host *shost = class_to_shost(cdev); 2730 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2731 2732 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 2733 } 2734 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); 2735 2736 /** 2737 * _ctl_device_delay_show - device missing delay 2738 * @cdev - pointer to embedded class device 2739 * @buf - the buffer returned 2740 * 2741 * This is for firmware implemention for deboucing device 2742 * removal events. 2743 * 2744 * A sysfs 'read-only' shost attribute. 2745 */ 2746 static ssize_t 2747 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, 2748 char *buf) 2749 { 2750 struct Scsi_Host *shost = class_to_shost(cdev); 2751 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2752 2753 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 2754 } 2755 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); 2756 2757 /** 2758 * _ctl_fw_queue_depth_show - global credits 2759 * @cdev - pointer to embedded class device 2760 * @buf - the buffer returned 2761 * 2762 * This is firmware queue depth limit 2763 * 2764 * A sysfs 'read-only' shost attribute. 2765 */ 2766 static ssize_t 2767 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 2768 char *buf) 2769 { 2770 struct Scsi_Host *shost = class_to_shost(cdev); 2771 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2772 2773 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 2774 } 2775 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); 2776 2777 /** 2778 * _ctl_sas_address_show - sas address 2779 * @cdev - pointer to embedded class device 2780 * @buf - the buffer returned 2781 * 2782 * This is the controller sas address 2783 * 2784 * A sysfs 'read-only' shost attribute. 2785 */ 2786 static ssize_t 2787 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, 2788 char *buf) 2789 2790 { 2791 struct Scsi_Host *shost = class_to_shost(cdev); 2792 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2793 2794 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 2795 (unsigned long long)ioc->sas_hba.sas_address); 2796 } 2797 static DEVICE_ATTR(host_sas_address, S_IRUGO, 2798 _ctl_host_sas_address_show, NULL); 2799 2800 /** 2801 * _ctl_logging_level_show - logging level 2802 * @cdev - pointer to embedded class device 2803 * @buf - the buffer returned 2804 * 2805 * A sysfs 'read/write' shost attribute. 2806 */ 2807 static ssize_t 2808 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, 2809 char *buf) 2810 { 2811 struct Scsi_Host *shost = class_to_shost(cdev); 2812 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2813 2814 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 2815 } 2816 static ssize_t 2817 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, 2818 const char *buf, size_t count) 2819 { 2820 struct Scsi_Host *shost = class_to_shost(cdev); 2821 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2822 int val = 0; 2823 2824 if (sscanf(buf, "%x", &val) != 1) 2825 return -EINVAL; 2826 2827 ioc->logging_level = val; 2828 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2829 ioc->logging_level); 2830 return strlen(buf); 2831 } 2832 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2833 _ctl_logging_level_store); 2834 2835 /** 2836 * _ctl_fwfault_debug_show - show/store fwfault_debug 2837 * @cdev - pointer to embedded class device 2838 * @buf - the buffer returned 2839 * 2840 * mpt3sas_fwfault_debug is command line option 2841 * A sysfs 'read/write' shost attribute. 2842 */ 2843 static ssize_t 2844 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 2845 char *buf) 2846 { 2847 struct Scsi_Host *shost = class_to_shost(cdev); 2848 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2849 2850 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 2851 } 2852 static ssize_t 2853 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 2854 const char *buf, size_t count) 2855 { 2856 struct Scsi_Host *shost = class_to_shost(cdev); 2857 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2858 int val = 0; 2859 2860 if (sscanf(buf, "%d", &val) != 1) 2861 return -EINVAL; 2862 2863 ioc->fwfault_debug = val; 2864 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2865 ioc->fwfault_debug); 2866 return strlen(buf); 2867 } 2868 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2869 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2870 2871 /** 2872 * _ctl_ioc_reset_count_show - ioc reset count 2873 * @cdev - pointer to embedded class device 2874 * @buf - the buffer returned 2875 * 2876 * This is firmware queue depth limit 2877 * 2878 * A sysfs 'read-only' shost attribute. 2879 */ 2880 static ssize_t 2881 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 2882 char *buf) 2883 { 2884 struct Scsi_Host *shost = class_to_shost(cdev); 2885 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2886 2887 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 2888 } 2889 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); 2890 2891 /** 2892 * _ctl_ioc_reply_queue_count_show - number of reply queues 2893 * @cdev - pointer to embedded class device 2894 * @buf - the buffer returned 2895 * 2896 * This is number of reply queues 2897 * 2898 * A sysfs 'read-only' shost attribute. 2899 */ 2900 static ssize_t 2901 _ctl_ioc_reply_queue_count_show(struct device *cdev, 2902 struct device_attribute *attr, char *buf) 2903 { 2904 u8 reply_queue_count; 2905 struct Scsi_Host *shost = class_to_shost(cdev); 2906 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2907 2908 if ((ioc->facts.IOCCapabilities & 2909 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 2910 reply_queue_count = ioc->reply_queue_count; 2911 else 2912 reply_queue_count = 1; 2913 2914 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 2915 } 2916 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, 2917 NULL); 2918 2919 /** 2920 * _ctl_BRM_status_show - Backup Rail Monitor Status 2921 * @cdev - pointer to embedded class device 2922 * @buf - the buffer returned 2923 * 2924 * This is number of reply queues 2925 * 2926 * A sysfs 'read-only' shost attribute. 2927 */ 2928 static ssize_t 2929 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, 2930 char *buf) 2931 { 2932 struct Scsi_Host *shost = class_to_shost(cdev); 2933 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2934 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; 2935 Mpi2ConfigReply_t mpi_reply; 2936 u16 backup_rail_monitor_status = 0; 2937 u16 ioc_status; 2938 int sz; 2939 ssize_t rc = 0; 2940 2941 if (!ioc->is_warpdrive) { 2942 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2943 " warpdrive\n", ioc->name, __func__); 2944 goto out; 2945 } 2946 /* pci_access_mutex lock acquired by sysfs show path */ 2947 mutex_lock(&ioc->pci_access_mutex); 2948 if (ioc->pci_error_recovery || ioc->remove_host) { 2949 mutex_unlock(&ioc->pci_access_mutex); 2950 return 0; 2951 } 2952 2953 /* allocate upto GPIOVal 36 entries */ 2954 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2955 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2956 if (!io_unit_pg3) { 2957 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2958 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2959 goto out; 2960 } 2961 2962 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2963 0) { 2964 pr_err(MPT3SAS_FMT 2965 "%s: failed reading iounit_pg3\n", ioc->name, 2966 __func__); 2967 goto out; 2968 } 2969 2970 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2971 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2972 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2973 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2974 goto out; 2975 } 2976 2977 if (io_unit_pg3->GPIOCount < 25) { 2978 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2979 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2980 io_unit_pg3->GPIOCount); 2981 goto out; 2982 } 2983 2984 /* BRM status is in bit zero of GPIOVal[24] */ 2985 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); 2986 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 2987 2988 out: 2989 kfree(io_unit_pg3); 2990 mutex_unlock(&ioc->pci_access_mutex); 2991 return rc; 2992 } 2993 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2994 2995 struct DIAG_BUFFER_START { 2996 __le32 Size; 2997 __le32 DiagVersion; 2998 u8 BufferType; 2999 u8 Reserved[3]; 3000 __le32 Reserved1; 3001 __le32 Reserved2; 3002 __le32 Reserved3; 3003 }; 3004 3005 /** 3006 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 3007 * @cdev - pointer to embedded class device 3008 * @buf - the buffer returned 3009 * 3010 * A sysfs 'read-only' shost attribute. 3011 */ 3012 static ssize_t 3013 _ctl_host_trace_buffer_size_show(struct device *cdev, 3014 struct device_attribute *attr, char *buf) 3015 { 3016 struct Scsi_Host *shost = class_to_shost(cdev); 3017 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3018 u32 size = 0; 3019 struct DIAG_BUFFER_START *request_data; 3020 3021 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3022 pr_err(MPT3SAS_FMT 3023 "%s: host_trace_buffer is not registered\n", 3024 ioc->name, __func__); 3025 return 0; 3026 } 3027 3028 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3029 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3030 pr_err(MPT3SAS_FMT 3031 "%s: host_trace_buffer is not registered\n", 3032 ioc->name, __func__); 3033 return 0; 3034 } 3035 3036 request_data = (struct DIAG_BUFFER_START *) 3037 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 3038 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 3039 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 3040 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 3041 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 3042 size = le32_to_cpu(request_data->Size); 3043 3044 ioc->ring_buffer_sz = size; 3045 return snprintf(buf, PAGE_SIZE, "%d\n", size); 3046 } 3047 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, 3048 _ctl_host_trace_buffer_size_show, NULL); 3049 3050 /** 3051 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) 3052 * @cdev - pointer to embedded class device 3053 * @buf - the buffer returned 3054 * 3055 * A sysfs 'read/write' shost attribute. 3056 * 3057 * You will only be able to read 4k bytes of ring buffer at a time. 3058 * In order to read beyond 4k bytes, you will have to write out the 3059 * offset to the same attribute, it will move the pointer. 3060 */ 3061 static ssize_t 3062 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 3063 char *buf) 3064 { 3065 struct Scsi_Host *shost = class_to_shost(cdev); 3066 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3067 void *request_data; 3068 u32 size; 3069 3070 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3071 pr_err(MPT3SAS_FMT 3072 "%s: host_trace_buffer is not registered\n", 3073 ioc->name, __func__); 3074 return 0; 3075 } 3076 3077 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3078 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3079 pr_err(MPT3SAS_FMT 3080 "%s: host_trace_buffer is not registered\n", 3081 ioc->name, __func__); 3082 return 0; 3083 } 3084 3085 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 3086 return 0; 3087 3088 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 3089 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3090 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 3091 memcpy(buf, request_data, size); 3092 return size; 3093 } 3094 3095 static ssize_t 3096 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 3097 const char *buf, size_t count) 3098 { 3099 struct Scsi_Host *shost = class_to_shost(cdev); 3100 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3101 int val = 0; 3102 3103 if (sscanf(buf, "%d", &val) != 1) 3104 return -EINVAL; 3105 3106 ioc->ring_buffer_offset = val; 3107 return strlen(buf); 3108 } 3109 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, 3110 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); 3111 3112 3113 /*****************************************/ 3114 3115 /** 3116 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) 3117 * @cdev - pointer to embedded class device 3118 * @buf - the buffer returned 3119 * 3120 * A sysfs 'read/write' shost attribute. 3121 * 3122 * This is a mechnism to post/release host_trace_buffers 3123 */ 3124 static ssize_t 3125 _ctl_host_trace_buffer_enable_show(struct device *cdev, 3126 struct device_attribute *attr, char *buf) 3127 { 3128 struct Scsi_Host *shost = class_to_shost(cdev); 3129 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3130 3131 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 3132 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3133 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3134 return snprintf(buf, PAGE_SIZE, "off\n"); 3135 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3136 MPT3_DIAG_BUFFER_IS_RELEASED)) 3137 return snprintf(buf, PAGE_SIZE, "release\n"); 3138 else 3139 return snprintf(buf, PAGE_SIZE, "post\n"); 3140 } 3141 3142 static ssize_t 3143 _ctl_host_trace_buffer_enable_store(struct device *cdev, 3144 struct device_attribute *attr, const char *buf, size_t count) 3145 { 3146 struct Scsi_Host *shost = class_to_shost(cdev); 3147 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3148 char str[10] = ""; 3149 struct mpt3_diag_register diag_register; 3150 u8 issue_reset = 0; 3151 3152 /* don't allow post/release occurr while recovery is active */ 3153 if (ioc->shost_recovery || ioc->remove_host || 3154 ioc->pci_error_recovery || ioc->is_driver_loading) 3155 return -EBUSY; 3156 3157 if (sscanf(buf, "%9s", str) != 1) 3158 return -EINVAL; 3159 3160 if (!strcmp(str, "post")) { 3161 /* exit out if host buffers are already posted */ 3162 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3163 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3164 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3165 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3166 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3167 goto out; 3168 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3169 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3170 ioc->name); 3171 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3172 diag_register.requested_buffer_size = (1024 * 1024); 3173 diag_register.unique_id = 0x7075900; 3174 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3175 _ctl_diag_register_2(ioc, &diag_register); 3176 } else if (!strcmp(str, "release")) { 3177 /* exit out if host buffers are already released */ 3178 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3179 goto out; 3180 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3181 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3182 goto out; 3183 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3184 MPT3_DIAG_BUFFER_IS_RELEASED)) 3185 goto out; 3186 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3187 ioc->name); 3188 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3189 &issue_reset); 3190 } 3191 3192 out: 3193 return strlen(buf); 3194 } 3195 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, 3196 _ctl_host_trace_buffer_enable_show, 3197 _ctl_host_trace_buffer_enable_store); 3198 3199 /*********** diagnostic trigger suppport *********************************/ 3200 3201 /** 3202 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute 3203 * @cdev - pointer to embedded class device 3204 * @buf - the buffer returned 3205 * 3206 * A sysfs 'read/write' shost attribute. 3207 */ 3208 static ssize_t 3209 _ctl_diag_trigger_master_show(struct device *cdev, 3210 struct device_attribute *attr, char *buf) 3211 3212 { 3213 struct Scsi_Host *shost = class_to_shost(cdev); 3214 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3215 unsigned long flags; 3216 ssize_t rc; 3217 3218 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3219 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3220 memcpy(buf, &ioc->diag_trigger_master, rc); 3221 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3222 return rc; 3223 } 3224 3225 /** 3226 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute 3227 * @cdev - pointer to embedded class device 3228 * @buf - the buffer returned 3229 * 3230 * A sysfs 'read/write' shost attribute. 3231 */ 3232 static ssize_t 3233 _ctl_diag_trigger_master_store(struct device *cdev, 3234 struct device_attribute *attr, const char *buf, size_t count) 3235 3236 { 3237 struct Scsi_Host *shost = class_to_shost(cdev); 3238 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3239 unsigned long flags; 3240 ssize_t rc; 3241 3242 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3243 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3244 memset(&ioc->diag_trigger_master, 0, 3245 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3246 memcpy(&ioc->diag_trigger_master, buf, rc); 3247 ioc->diag_trigger_master.MasterData |= 3248 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3249 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3250 return rc; 3251 } 3252 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, 3253 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); 3254 3255 3256 /** 3257 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute 3258 * @cdev - pointer to embedded class device 3259 * @buf - the buffer returned 3260 * 3261 * A sysfs 'read/write' shost attribute. 3262 */ 3263 static ssize_t 3264 _ctl_diag_trigger_event_show(struct device *cdev, 3265 struct device_attribute *attr, char *buf) 3266 { 3267 struct Scsi_Host *shost = class_to_shost(cdev); 3268 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3269 unsigned long flags; 3270 ssize_t rc; 3271 3272 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3273 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3274 memcpy(buf, &ioc->diag_trigger_event, rc); 3275 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3276 return rc; 3277 } 3278 3279 /** 3280 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute 3281 * @cdev - pointer to embedded class device 3282 * @buf - the buffer returned 3283 * 3284 * A sysfs 'read/write' shost attribute. 3285 */ 3286 static ssize_t 3287 _ctl_diag_trigger_event_store(struct device *cdev, 3288 struct device_attribute *attr, const char *buf, size_t count) 3289 3290 { 3291 struct Scsi_Host *shost = class_to_shost(cdev); 3292 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3293 unsigned long flags; 3294 ssize_t sz; 3295 3296 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3297 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3298 memset(&ioc->diag_trigger_event, 0, 3299 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3300 memcpy(&ioc->diag_trigger_event, buf, sz); 3301 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3302 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3303 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3304 return sz; 3305 } 3306 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, 3307 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); 3308 3309 3310 /** 3311 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3312 * @cdev - pointer to embedded class device 3313 * @buf - the buffer returned 3314 * 3315 * A sysfs 'read/write' shost attribute. 3316 */ 3317 static ssize_t 3318 _ctl_diag_trigger_scsi_show(struct device *cdev, 3319 struct device_attribute *attr, char *buf) 3320 { 3321 struct Scsi_Host *shost = class_to_shost(cdev); 3322 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3323 unsigned long flags; 3324 ssize_t rc; 3325 3326 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3327 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 3328 memcpy(buf, &ioc->diag_trigger_scsi, rc); 3329 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3330 return rc; 3331 } 3332 3333 /** 3334 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute 3335 * @cdev - pointer to embedded class device 3336 * @buf - the buffer returned 3337 * 3338 * A sysfs 'read/write' shost attribute. 3339 */ 3340 static ssize_t 3341 _ctl_diag_trigger_scsi_store(struct device *cdev, 3342 struct device_attribute *attr, const char *buf, size_t count) 3343 { 3344 struct Scsi_Host *shost = class_to_shost(cdev); 3345 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3346 unsigned long flags; 3347 ssize_t sz; 3348 3349 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3350 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3351 memset(&ioc->diag_trigger_scsi, 0, 3352 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3353 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3354 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3355 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3356 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3357 return sz; 3358 } 3359 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, 3360 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); 3361 3362 3363 /** 3364 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute 3365 * @cdev - pointer to embedded class device 3366 * @buf - the buffer returned 3367 * 3368 * A sysfs 'read/write' shost attribute. 3369 */ 3370 static ssize_t 3371 _ctl_diag_trigger_mpi_show(struct device *cdev, 3372 struct device_attribute *attr, char *buf) 3373 { 3374 struct Scsi_Host *shost = class_to_shost(cdev); 3375 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3376 unsigned long flags; 3377 ssize_t rc; 3378 3379 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3380 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 3381 memcpy(buf, &ioc->diag_trigger_mpi, rc); 3382 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3383 return rc; 3384 } 3385 3386 /** 3387 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute 3388 * @cdev - pointer to embedded class device 3389 * @buf - the buffer returned 3390 * 3391 * A sysfs 'read/write' shost attribute. 3392 */ 3393 static ssize_t 3394 _ctl_diag_trigger_mpi_store(struct device *cdev, 3395 struct device_attribute *attr, const char *buf, size_t count) 3396 { 3397 struct Scsi_Host *shost = class_to_shost(cdev); 3398 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3399 unsigned long flags; 3400 ssize_t sz; 3401 3402 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3403 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3404 memset(&ioc->diag_trigger_mpi, 0, 3405 sizeof(ioc->diag_trigger_mpi)); 3406 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3407 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3408 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3409 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3410 return sz; 3411 } 3412 3413 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, 3414 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); 3415 3416 /*********** diagnostic trigger suppport *** END ****************************/ 3417 3418 /*****************************************/ 3419 3420 struct device_attribute *mpt3sas_host_attrs[] = { 3421 &dev_attr_version_fw, 3422 &dev_attr_version_bios, 3423 &dev_attr_version_mpi, 3424 &dev_attr_version_product, 3425 &dev_attr_version_nvdata_persistent, 3426 &dev_attr_version_nvdata_default, 3427 &dev_attr_board_name, 3428 &dev_attr_board_assembly, 3429 &dev_attr_board_tracer, 3430 &dev_attr_io_delay, 3431 &dev_attr_device_delay, 3432 &dev_attr_logging_level, 3433 &dev_attr_fwfault_debug, 3434 &dev_attr_fw_queue_depth, 3435 &dev_attr_host_sas_address, 3436 &dev_attr_ioc_reset_count, 3437 &dev_attr_host_trace_buffer_size, 3438 &dev_attr_host_trace_buffer, 3439 &dev_attr_host_trace_buffer_enable, 3440 &dev_attr_reply_queue_count, 3441 &dev_attr_diag_trigger_master, 3442 &dev_attr_diag_trigger_event, 3443 &dev_attr_diag_trigger_scsi, 3444 &dev_attr_diag_trigger_mpi, 3445 &dev_attr_BRM_status, 3446 NULL, 3447 }; 3448 3449 /* device attributes */ 3450 3451 /** 3452 * _ctl_device_sas_address_show - sas address 3453 * @cdev - pointer to embedded class device 3454 * @buf - the buffer returned 3455 * 3456 * This is the sas address for the target 3457 * 3458 * A sysfs 'read-only' shost attribute. 3459 */ 3460 static ssize_t 3461 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, 3462 char *buf) 3463 { 3464 struct scsi_device *sdev = to_scsi_device(dev); 3465 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3466 3467 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3468 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 3469 } 3470 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); 3471 3472 /** 3473 * _ctl_device_handle_show - device handle 3474 * @cdev - pointer to embedded class device 3475 * @buf - the buffer returned 3476 * 3477 * This is the firmware assigned device handle 3478 * 3479 * A sysfs 'read-only' shost attribute. 3480 */ 3481 static ssize_t 3482 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, 3483 char *buf) 3484 { 3485 struct scsi_device *sdev = to_scsi_device(dev); 3486 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3487 3488 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 3489 sas_device_priv_data->sas_target->handle); 3490 } 3491 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3492 3493 /** 3494 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device 3495 * @dev - pointer to embedded device 3496 * @buf - the buffer returned 3497 * 3498 * A sysfs 'read/write' sdev attribute, only works with SATA 3499 */ 3500 static ssize_t 3501 _ctl_device_ncq_prio_enable_show(struct device *dev, 3502 struct device_attribute *attr, char *buf) 3503 { 3504 struct scsi_device *sdev = to_scsi_device(dev); 3505 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3506 3507 return snprintf(buf, PAGE_SIZE, "%d\n", 3508 sas_device_priv_data->ncq_prio_enable); 3509 } 3510 3511 static ssize_t 3512 _ctl_device_ncq_prio_enable_store(struct device *dev, 3513 struct device_attribute *attr, 3514 const char *buf, size_t count) 3515 { 3516 struct scsi_device *sdev = to_scsi_device(dev); 3517 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3518 bool ncq_prio_enable = 0; 3519 3520 if (kstrtobool(buf, &ncq_prio_enable)) 3521 return -EINVAL; 3522 3523 if (!scsih_ncq_prio_supp(sdev)) 3524 return -EINVAL; 3525 3526 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; 3527 return strlen(buf); 3528 } 3529 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR, 3530 _ctl_device_ncq_prio_enable_show, 3531 _ctl_device_ncq_prio_enable_store); 3532 3533 struct device_attribute *mpt3sas_dev_attrs[] = { 3534 &dev_attr_sas_address, 3535 &dev_attr_sas_device_handle, 3536 &dev_attr_sas_ncq_prio_enable, 3537 NULL, 3538 }; 3539 3540 /* file operations table for mpt3ctl device */ 3541 static const struct file_operations ctl_fops = { 3542 .owner = THIS_MODULE, 3543 .unlocked_ioctl = _ctl_ioctl, 3544 .poll = _ctl_poll, 3545 .fasync = _ctl_fasync, 3546 #ifdef CONFIG_COMPAT 3547 .compat_ioctl = _ctl_ioctl_compat, 3548 #endif 3549 }; 3550 3551 /* file operations table for mpt2ctl device */ 3552 static const struct file_operations ctl_gen2_fops = { 3553 .owner = THIS_MODULE, 3554 .unlocked_ioctl = _ctl_mpt2_ioctl, 3555 .poll = _ctl_poll, 3556 .fasync = _ctl_fasync, 3557 #ifdef CONFIG_COMPAT 3558 .compat_ioctl = _ctl_mpt2_ioctl_compat, 3559 #endif 3560 }; 3561 3562 static struct miscdevice ctl_dev = { 3563 .minor = MPT3SAS_MINOR, 3564 .name = MPT3SAS_DEV_NAME, 3565 .fops = &ctl_fops, 3566 }; 3567 3568 static struct miscdevice gen2_ctl_dev = { 3569 .minor = MPT2SAS_MINOR, 3570 .name = MPT2SAS_DEV_NAME, 3571 .fops = &ctl_gen2_fops, 3572 }; 3573 3574 /** 3575 * mpt3sas_ctl_init - main entry point for ctl. 3576 * 3577 */ 3578 void 3579 mpt3sas_ctl_init(ushort hbas_to_enumerate) 3580 { 3581 async_queue = NULL; 3582 3583 /* Don't register mpt3ctl ioctl device if 3584 * hbas_to_enumarate is one. 3585 */ 3586 if (hbas_to_enumerate != 1) 3587 if (misc_register(&ctl_dev) < 0) 3588 pr_err("%s can't register misc device [minor=%d]\n", 3589 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 3590 3591 /* Don't register mpt3ctl ioctl device if 3592 * hbas_to_enumarate is two. 3593 */ 3594 if (hbas_to_enumerate != 2) 3595 if (misc_register(&gen2_ctl_dev) < 0) 3596 pr_err("%s can't register misc device [minor=%d]\n", 3597 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 3598 3599 init_waitqueue_head(&ctl_poll_wait); 3600 } 3601 3602 /** 3603 * mpt3sas_ctl_exit - exit point for ctl 3604 * 3605 */ 3606 void 3607 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 3608 { 3609 struct MPT3SAS_ADAPTER *ioc; 3610 int i; 3611 3612 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 3613 3614 /* free memory associated to diag buffers */ 3615 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 3616 if (!ioc->diag_buffer[i]) 3617 continue; 3618 if (!(ioc->diag_buffer_status[i] & 3619 MPT3_DIAG_BUFFER_IS_REGISTERED)) 3620 continue; 3621 if ((ioc->diag_buffer_status[i] & 3622 MPT3_DIAG_BUFFER_IS_RELEASED)) 3623 continue; 3624 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3625 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3626 ioc->diag_buffer[i] = NULL; 3627 ioc->diag_buffer_status[i] = 0; 3628 } 3629 3630 kfree(ioc->event_log); 3631 } 3632 if (hbas_to_enumerate != 1) 3633 misc_deregister(&ctl_dev); 3634 if (hbas_to_enumerate != 2) 3635 misc_deregister(&gen2_ctl_dev); 3636 } 3637