1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_sas_device_find_by_handle - sas device search 83 * @ioc: per adapter object 84 * @handle: sas device handle (assigned by firmware) 85 * Context: Calling function should acquire ioc->sas_device_lock 86 * 87 * This searches for sas_device based on sas_address, then return sas_device 88 * object. 89 */ 90 static struct _sas_device * 91 _ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) 92 { 93 struct _sas_device *sas_device, *r; 94 95 r = NULL; 96 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 97 if (sas_device->handle != handle) 98 continue; 99 r = sas_device; 100 goto out; 101 } 102 103 out: 104 return r; 105 } 106 107 /** 108 * _ctl_display_some_debug - debug routine 109 * @ioc: per adapter object 110 * @smid: system request message index 111 * @calling_function_name: string pass from calling function 112 * @mpi_reply: reply message frame 113 * Context: none. 114 * 115 * Function for displaying debug info helpful when debugging issues 116 * in this module. 117 */ 118 static void 119 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 120 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 121 { 122 Mpi2ConfigRequest_t *mpi_request; 123 char *desc = NULL; 124 125 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 126 return; 127 128 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 129 switch (mpi_request->Function) { 130 case MPI2_FUNCTION_SCSI_IO_REQUEST: 131 { 132 Mpi2SCSIIORequest_t *scsi_request = 133 (Mpi2SCSIIORequest_t *)mpi_request; 134 135 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 136 "scsi_io, cmd(0x%02x), cdb_len(%d)", 137 scsi_request->CDB.CDB32[0], 138 le16_to_cpu(scsi_request->IoFlags) & 0xF); 139 desc = ioc->tmp_string; 140 break; 141 } 142 case MPI2_FUNCTION_SCSI_TASK_MGMT: 143 desc = "task_mgmt"; 144 break; 145 case MPI2_FUNCTION_IOC_INIT: 146 desc = "ioc_init"; 147 break; 148 case MPI2_FUNCTION_IOC_FACTS: 149 desc = "ioc_facts"; 150 break; 151 case MPI2_FUNCTION_CONFIG: 152 { 153 Mpi2ConfigRequest_t *config_request = 154 (Mpi2ConfigRequest_t *)mpi_request; 155 156 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 157 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 158 (config_request->Header.PageType & 159 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 160 config_request->Header.PageNumber); 161 desc = ioc->tmp_string; 162 break; 163 } 164 case MPI2_FUNCTION_PORT_FACTS: 165 desc = "port_facts"; 166 break; 167 case MPI2_FUNCTION_PORT_ENABLE: 168 desc = "port_enable"; 169 break; 170 case MPI2_FUNCTION_EVENT_NOTIFICATION: 171 desc = "event_notification"; 172 break; 173 case MPI2_FUNCTION_FW_DOWNLOAD: 174 desc = "fw_download"; 175 break; 176 case MPI2_FUNCTION_FW_UPLOAD: 177 desc = "fw_upload"; 178 break; 179 case MPI2_FUNCTION_RAID_ACTION: 180 desc = "raid_action"; 181 break; 182 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 183 { 184 Mpi2SCSIIORequest_t *scsi_request = 185 (Mpi2SCSIIORequest_t *)mpi_request; 186 187 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 188 "raid_pass, cmd(0x%02x), cdb_len(%d)", 189 scsi_request->CDB.CDB32[0], 190 le16_to_cpu(scsi_request->IoFlags) & 0xF); 191 desc = ioc->tmp_string; 192 break; 193 } 194 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 195 desc = "sas_iounit_cntl"; 196 break; 197 case MPI2_FUNCTION_SATA_PASSTHROUGH: 198 desc = "sata_pass"; 199 break; 200 case MPI2_FUNCTION_DIAG_BUFFER_POST: 201 desc = "diag_buffer_post"; 202 break; 203 case MPI2_FUNCTION_DIAG_RELEASE: 204 desc = "diag_release"; 205 break; 206 case MPI2_FUNCTION_SMP_PASSTHROUGH: 207 desc = "smp_passthrough"; 208 break; 209 } 210 211 if (!desc) 212 return; 213 214 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", 215 ioc->name, calling_function_name, desc, smid); 216 217 if (!mpi_reply) 218 return; 219 220 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 221 pr_info(MPT3SAS_FMT 222 "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 223 ioc->name, le16_to_cpu(mpi_reply->IOCStatus), 224 le32_to_cpu(mpi_reply->IOCLogInfo)); 225 226 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 227 mpi_request->Function == 228 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 229 Mpi2SCSIIOReply_t *scsi_reply = 230 (Mpi2SCSIIOReply_t *)mpi_reply; 231 struct _sas_device *sas_device = NULL; 232 unsigned long flags; 233 234 spin_lock_irqsave(&ioc->sas_device_lock, flags); 235 sas_device = _ctl_sas_device_find_by_handle(ioc, 236 le16_to_cpu(scsi_reply->DevHandle)); 237 if (sas_device) { 238 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", 239 ioc->name, (unsigned long long) 240 sas_device->sas_address, sas_device->phy); 241 pr_warn(MPT3SAS_FMT 242 "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 243 ioc->name, (unsigned long long) 244 sas_device->enclosure_logical_id, sas_device->slot); 245 } 246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 247 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 248 pr_info(MPT3SAS_FMT 249 "\tscsi_state(0x%02x), scsi_status" 250 "(0x%02x)\n", ioc->name, 251 scsi_reply->SCSIState, 252 scsi_reply->SCSIStatus); 253 } 254 } 255 256 /** 257 * mpt3sas_ctl_done - ctl module completion routine 258 * @ioc: per adapter object 259 * @smid: system request message index 260 * @msix_index: MSIX table index supplied by the OS 261 * @reply: reply message frame(lower 32bit addr) 262 * Context: none. 263 * 264 * The callback handler when using ioc->ctl_cb_idx. 265 * 266 * Return 1 meaning mf should be freed from _base_interrupt 267 * 0 means the mf is freed from this function. 268 */ 269 u8 270 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 271 u32 reply) 272 { 273 MPI2DefaultReply_t *mpi_reply; 274 Mpi2SCSIIOReply_t *scsiio_reply; 275 const void *sense_data; 276 u32 sz; 277 278 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 279 return 1; 280 if (ioc->ctl_cmds.smid != smid) 281 return 1; 282 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 283 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 284 if (mpi_reply) { 285 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 286 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 287 /* get sense data */ 288 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 289 mpi_reply->Function == 290 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 291 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 292 if (scsiio_reply->SCSIState & 293 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 294 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 295 le32_to_cpu(scsiio_reply->SenseCount)); 296 sense_data = mpt3sas_base_get_sense_buffer(ioc, 297 smid); 298 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 299 } 300 } 301 } 302 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 303 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 304 complete(&ioc->ctl_cmds.done); 305 return 1; 306 } 307 308 /** 309 * _ctl_check_event_type - determines when an event needs logging 310 * @ioc: per adapter object 311 * @event: firmware event 312 * 313 * The bitmask in ioc->event_type[] indicates which events should be 314 * be saved in the driver event_log. This bitmask is set by application. 315 * 316 * Returns 1 when event should be captured, or zero means no match. 317 */ 318 static int 319 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 320 { 321 u16 i; 322 u32 desired_event; 323 324 if (event >= 128 || !event || !ioc->event_log) 325 return 0; 326 327 desired_event = (1 << (event % 32)); 328 if (!desired_event) 329 desired_event = 1; 330 i = event / 32; 331 return desired_event & ioc->event_type[i]; 332 } 333 334 /** 335 * mpt3sas_ctl_add_to_event_log - add event 336 * @ioc: per adapter object 337 * @mpi_reply: reply message frame 338 * 339 * Return nothing. 340 */ 341 void 342 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 343 Mpi2EventNotificationReply_t *mpi_reply) 344 { 345 struct MPT3_IOCTL_EVENTS *event_log; 346 u16 event; 347 int i; 348 u32 sz, event_data_sz; 349 u8 send_aen = 0; 350 351 if (!ioc->event_log) 352 return; 353 354 event = le16_to_cpu(mpi_reply->Event); 355 356 if (_ctl_check_event_type(ioc, event)) { 357 358 /* insert entry into circular event_log */ 359 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 360 event_log = ioc->event_log; 361 event_log[i].event = event; 362 event_log[i].context = ioc->event_context++; 363 364 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 365 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 366 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 367 memcpy(event_log[i].data, mpi_reply->EventData, sz); 368 send_aen = 1; 369 } 370 371 /* This aen_event_read_flag flag is set until the 372 * application has read the event log. 373 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 374 */ 375 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 376 (send_aen && !ioc->aen_event_read_flag)) { 377 ioc->aen_event_read_flag = 1; 378 wake_up_interruptible(&ctl_poll_wait); 379 if (async_queue) 380 kill_fasync(&async_queue, SIGIO, POLL_IN); 381 } 382 } 383 384 /** 385 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 386 * @ioc: per adapter object 387 * @msix_index: MSIX table index supplied by the OS 388 * @reply: reply message frame(lower 32bit addr) 389 * Context: interrupt. 390 * 391 * This function merely adds a new work task into ioc->firmware_event_thread. 392 * The tasks are worked from _firmware_event_work in user context. 393 * 394 * Return 1 meaning mf should be freed from _base_interrupt 395 * 0 means the mf is freed from this function. 396 */ 397 u8 398 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 399 u32 reply) 400 { 401 Mpi2EventNotificationReply_t *mpi_reply; 402 403 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 404 if (mpi_reply) 405 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 406 return 1; 407 } 408 409 /** 410 * _ctl_verify_adapter - validates ioc_number passed from application 411 * @ioc: per adapter object 412 * @iocpp: The ioc pointer is returned in this. 413 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 414 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 415 * 416 * Return (-1) means error, else ioc_number. 417 */ 418 static int 419 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 420 int mpi_version) 421 { 422 struct MPT3SAS_ADAPTER *ioc; 423 int version = 0; 424 /* global ioc lock to protect controller on list operations */ 425 spin_lock(&gioc_lock); 426 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 427 if (ioc->id != ioc_number) 428 continue; 429 /* Check whether this ioctl command is from right 430 * ioctl device or not, if not continue the search. 431 */ 432 version = ioc->hba_mpi_version_belonged; 433 /* MPI25_VERSION and MPI26_VERSION uses same ioctl 434 * device. 435 */ 436 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { 437 if ((version == MPI25_VERSION) || 438 (version == MPI26_VERSION)) 439 goto out; 440 else 441 continue; 442 } else { 443 if (version != mpi_version) 444 continue; 445 } 446 out: 447 spin_unlock(&gioc_lock); 448 *iocpp = ioc; 449 return ioc_number; 450 } 451 spin_unlock(&gioc_lock); 452 *iocpp = NULL; 453 return -1; 454 } 455 456 /** 457 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) 458 * @ioc: per adapter object 459 * @reset_phase: phase 460 * 461 * The handler for doing any required cleanup or initialization. 462 * 463 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 464 * MPT3_IOC_DONE_RESET 465 */ 466 void 467 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 468 { 469 int i; 470 u8 issue_reset; 471 472 switch (reset_phase) { 473 case MPT3_IOC_PRE_RESET: 474 dtmprintk(ioc, pr_info(MPT3SAS_FMT 475 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 476 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 477 if (!(ioc->diag_buffer_status[i] & 478 MPT3_DIAG_BUFFER_IS_REGISTERED)) 479 continue; 480 if ((ioc->diag_buffer_status[i] & 481 MPT3_DIAG_BUFFER_IS_RELEASED)) 482 continue; 483 mpt3sas_send_diag_release(ioc, i, &issue_reset); 484 } 485 break; 486 case MPT3_IOC_AFTER_RESET: 487 dtmprintk(ioc, pr_info(MPT3SAS_FMT 488 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 489 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 490 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 491 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 492 complete(&ioc->ctl_cmds.done); 493 } 494 break; 495 case MPT3_IOC_DONE_RESET: 496 dtmprintk(ioc, pr_info(MPT3SAS_FMT 497 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 498 499 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 500 if (!(ioc->diag_buffer_status[i] & 501 MPT3_DIAG_BUFFER_IS_REGISTERED)) 502 continue; 503 if ((ioc->diag_buffer_status[i] & 504 MPT3_DIAG_BUFFER_IS_RELEASED)) 505 continue; 506 ioc->diag_buffer_status[i] |= 507 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 508 } 509 break; 510 } 511 } 512 513 /** 514 * _ctl_fasync - 515 * @fd - 516 * @filep - 517 * @mode - 518 * 519 * Called when application request fasyn callback handler. 520 */ 521 int 522 _ctl_fasync(int fd, struct file *filep, int mode) 523 { 524 return fasync_helper(fd, filep, mode, &async_queue); 525 } 526 527 /** 528 * _ctl_poll - 529 * @file - 530 * @wait - 531 * 532 */ 533 unsigned int 534 _ctl_poll(struct file *filep, poll_table *wait) 535 { 536 struct MPT3SAS_ADAPTER *ioc; 537 538 poll_wait(filep, &ctl_poll_wait, wait); 539 540 /* global ioc lock to protect controller on list operations */ 541 spin_lock(&gioc_lock); 542 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 543 if (ioc->aen_event_read_flag) { 544 spin_unlock(&gioc_lock); 545 return POLLIN | POLLRDNORM; 546 } 547 } 548 spin_unlock(&gioc_lock); 549 return 0; 550 } 551 552 /** 553 * _ctl_set_task_mid - assign an active smid to tm request 554 * @ioc: per adapter object 555 * @karg - (struct mpt3_ioctl_command) 556 * @tm_request - pointer to mf from user space 557 * 558 * Returns 0 when an smid if found, else fail. 559 * during failure, the reply frame is filled. 560 */ 561 static int 562 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 563 Mpi2SCSITaskManagementRequest_t *tm_request) 564 { 565 u8 found = 0; 566 u16 i; 567 u16 handle; 568 struct scsi_cmnd *scmd; 569 struct MPT3SAS_DEVICE *priv_data; 570 unsigned long flags; 571 Mpi2SCSITaskManagementReply_t *tm_reply; 572 u32 sz; 573 u32 lun; 574 char *desc = NULL; 575 576 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 577 desc = "abort_task"; 578 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 579 desc = "query_task"; 580 else 581 return 0; 582 583 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 584 585 handle = le16_to_cpu(tm_request->DevHandle); 586 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 587 for (i = ioc->scsiio_depth; i && !found; i--) { 588 scmd = ioc->scsi_lookup[i - 1].scmd; 589 if (scmd == NULL || scmd->device == NULL || 590 scmd->device->hostdata == NULL) 591 continue; 592 if (lun != scmd->device->lun) 593 continue; 594 priv_data = scmd->device->hostdata; 595 if (priv_data->sas_target == NULL) 596 continue; 597 if (priv_data->sas_target->handle != handle) 598 continue; 599 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); 600 found = 1; 601 } 602 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 603 604 if (!found) { 605 dctlprintk(ioc, pr_info(MPT3SAS_FMT 606 "%s: handle(0x%04x), lun(%d), no active mid!!\n", 607 ioc->name, 608 desc, le16_to_cpu(tm_request->DevHandle), lun)); 609 tm_reply = ioc->ctl_cmds.reply; 610 tm_reply->DevHandle = tm_request->DevHandle; 611 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 612 tm_reply->TaskType = tm_request->TaskType; 613 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 614 tm_reply->VP_ID = tm_request->VP_ID; 615 tm_reply->VF_ID = tm_request->VF_ID; 616 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 617 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 618 sz)) 619 pr_err("failure at %s:%d/%s()!\n", __FILE__, 620 __LINE__, __func__); 621 return 1; 622 } 623 624 dctlprintk(ioc, pr_info(MPT3SAS_FMT 625 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, 626 desc, le16_to_cpu(tm_request->DevHandle), lun, 627 le16_to_cpu(tm_request->TaskMID))); 628 return 0; 629 } 630 631 /** 632 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 633 * @ioc: per adapter object 634 * @karg - (struct mpt3_ioctl_command) 635 * @mf - pointer to mf in user space 636 */ 637 static long 638 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 639 void __user *mf) 640 { 641 MPI2RequestHeader_t *mpi_request = NULL, *request; 642 MPI2DefaultReply_t *mpi_reply; 643 u32 ioc_state; 644 u16 ioc_status; 645 u16 smid; 646 unsigned long timeout, timeleft; 647 u8 issue_reset; 648 u32 sz; 649 void *psge; 650 void *data_out = NULL; 651 dma_addr_t data_out_dma = 0; 652 size_t data_out_sz = 0; 653 void *data_in = NULL; 654 dma_addr_t data_in_dma = 0; 655 size_t data_in_sz = 0; 656 long ret; 657 u16 wait_state_count; 658 659 issue_reset = 0; 660 661 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 662 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 663 ioc->name, __func__); 664 ret = -EAGAIN; 665 goto out; 666 } 667 668 wait_state_count = 0; 669 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 670 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 671 if (wait_state_count++ == 10) { 672 pr_err(MPT3SAS_FMT 673 "%s: failed due to ioc not operational\n", 674 ioc->name, __func__); 675 ret = -EFAULT; 676 goto out; 677 } 678 ssleep(1); 679 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 680 pr_info(MPT3SAS_FMT 681 "%s: waiting for operational state(count=%d)\n", 682 ioc->name, 683 __func__, wait_state_count); 684 } 685 if (wait_state_count) 686 pr_info(MPT3SAS_FMT "%s: ioc is operational\n", 687 ioc->name, __func__); 688 689 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 690 if (!mpi_request) { 691 pr_err(MPT3SAS_FMT 692 "%s: failed obtaining a memory for mpi_request\n", 693 ioc->name, __func__); 694 ret = -ENOMEM; 695 goto out; 696 } 697 698 /* Check for overflow and wraparound */ 699 if (karg.data_sge_offset * 4 > ioc->request_sz || 700 karg.data_sge_offset > (UINT_MAX / 4)) { 701 ret = -EINVAL; 702 goto out; 703 } 704 705 /* copy in request message frame from user */ 706 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 707 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 708 __func__); 709 ret = -EFAULT; 710 goto out; 711 } 712 713 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 714 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 715 if (!smid) { 716 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 717 ioc->name, __func__); 718 ret = -EAGAIN; 719 goto out; 720 } 721 } else { 722 723 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); 724 if (!smid) { 725 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 726 ioc->name, __func__); 727 ret = -EAGAIN; 728 goto out; 729 } 730 } 731 732 ret = 0; 733 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 734 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 735 request = mpt3sas_base_get_msg_frame(ioc, smid); 736 memcpy(request, mpi_request, karg.data_sge_offset*4); 737 ioc->ctl_cmds.smid = smid; 738 data_out_sz = karg.data_out_size; 739 data_in_sz = karg.data_in_size; 740 741 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 742 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 743 if (!le16_to_cpu(mpi_request->FunctionDependent1) || 744 le16_to_cpu(mpi_request->FunctionDependent1) > 745 ioc->facts.MaxDevHandle) { 746 ret = -EINVAL; 747 mpt3sas_base_free_smid(ioc, smid); 748 goto out; 749 } 750 } 751 752 /* obtain dma-able memory for data transfer */ 753 if (data_out_sz) /* WRITE */ { 754 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, 755 &data_out_dma); 756 if (!data_out) { 757 pr_err("failure at %s:%d/%s()!\n", __FILE__, 758 __LINE__, __func__); 759 ret = -ENOMEM; 760 mpt3sas_base_free_smid(ioc, smid); 761 goto out; 762 } 763 if (copy_from_user(data_out, karg.data_out_buf_ptr, 764 data_out_sz)) { 765 pr_err("failure at %s:%d/%s()!\n", __FILE__, 766 __LINE__, __func__); 767 ret = -EFAULT; 768 mpt3sas_base_free_smid(ioc, smid); 769 goto out; 770 } 771 } 772 773 if (data_in_sz) /* READ */ { 774 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, 775 &data_in_dma); 776 if (!data_in) { 777 pr_err("failure at %s:%d/%s()!\n", __FILE__, 778 __LINE__, __func__); 779 ret = -ENOMEM; 780 mpt3sas_base_free_smid(ioc, smid); 781 goto out; 782 } 783 } 784 785 psge = (void *)request + (karg.data_sge_offset*4); 786 787 /* send command to firmware */ 788 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 789 790 init_completion(&ioc->ctl_cmds.done); 791 switch (mpi_request->Function) { 792 case MPI2_FUNCTION_SCSI_IO_REQUEST: 793 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 794 { 795 Mpi2SCSIIORequest_t *scsiio_request = 796 (Mpi2SCSIIORequest_t *)request; 797 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 798 scsiio_request->SenseBufferLowAddress = 799 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 800 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 801 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 802 data_in_dma, data_in_sz); 803 804 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 805 mpt3sas_base_put_smid_scsi_io(ioc, smid, 806 le16_to_cpu(mpi_request->FunctionDependent1)); 807 else 808 mpt3sas_base_put_smid_default(ioc, smid); 809 break; 810 } 811 case MPI2_FUNCTION_SCSI_TASK_MGMT: 812 { 813 Mpi2SCSITaskManagementRequest_t *tm_request = 814 (Mpi2SCSITaskManagementRequest_t *)request; 815 816 dtmprintk(ioc, pr_info(MPT3SAS_FMT 817 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 818 ioc->name, 819 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); 820 821 if (tm_request->TaskType == 822 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 823 tm_request->TaskType == 824 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 825 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 826 mpt3sas_base_free_smid(ioc, smid); 827 goto out; 828 } 829 } 830 831 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 832 tm_request->DevHandle)); 833 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 834 data_in_dma, data_in_sz); 835 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); 836 break; 837 } 838 case MPI2_FUNCTION_SMP_PASSTHROUGH: 839 { 840 Mpi2SmpPassthroughRequest_t *smp_request = 841 (Mpi2SmpPassthroughRequest_t *)mpi_request; 842 u8 *data; 843 844 /* ioc determines which port to use */ 845 smp_request->PhysicalPort = 0xFF; 846 if (smp_request->PassthroughFlags & 847 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 848 data = (u8 *)&smp_request->SGL; 849 else { 850 if (unlikely(data_out == NULL)) { 851 pr_err("failure at %s:%d/%s()!\n", 852 __FILE__, __LINE__, __func__); 853 mpt3sas_base_free_smid(ioc, smid); 854 ret = -EINVAL; 855 goto out; 856 } 857 data = data_out; 858 } 859 860 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 861 ioc->ioc_link_reset_in_progress = 1; 862 ioc->ignore_loginfos = 1; 863 } 864 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 865 data_in_sz); 866 mpt3sas_base_put_smid_default(ioc, smid); 867 break; 868 } 869 case MPI2_FUNCTION_SATA_PASSTHROUGH: 870 case MPI2_FUNCTION_FW_DOWNLOAD: 871 case MPI2_FUNCTION_FW_UPLOAD: 872 { 873 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 874 data_in_sz); 875 mpt3sas_base_put_smid_default(ioc, smid); 876 break; 877 } 878 case MPI2_FUNCTION_TOOLBOX: 879 { 880 Mpi2ToolboxCleanRequest_t *toolbox_request = 881 (Mpi2ToolboxCleanRequest_t *)mpi_request; 882 883 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { 884 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 885 data_in_dma, data_in_sz); 886 } else { 887 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 888 data_in_dma, data_in_sz); 889 } 890 mpt3sas_base_put_smid_default(ioc, smid); 891 break; 892 } 893 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 894 { 895 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 896 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 897 898 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 899 || sasiounit_request->Operation == 900 MPI2_SAS_OP_PHY_LINK_RESET) { 901 ioc->ioc_link_reset_in_progress = 1; 902 ioc->ignore_loginfos = 1; 903 } 904 /* drop to default case for posting the request */ 905 } 906 default: 907 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 908 data_in_dma, data_in_sz); 909 mpt3sas_base_put_smid_default(ioc, smid); 910 break; 911 } 912 913 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 914 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 915 else 916 timeout = karg.timeout; 917 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 918 timeout*HZ); 919 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 920 Mpi2SCSITaskManagementRequest_t *tm_request = 921 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 922 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 923 tm_request->DevHandle)); 924 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 925 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 926 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 927 ioc->ioc_link_reset_in_progress) { 928 ioc->ioc_link_reset_in_progress = 0; 929 ioc->ignore_loginfos = 0; 930 } 931 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 932 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 933 __func__); 934 _debug_dump_mf(mpi_request, karg.data_sge_offset); 935 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 936 issue_reset = 1; 937 goto issue_host_reset; 938 } 939 940 mpi_reply = ioc->ctl_cmds.reply; 941 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 942 943 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 944 (ioc->logging_level & MPT_DEBUG_TM)) { 945 Mpi2SCSITaskManagementReply_t *tm_reply = 946 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 947 948 pr_info(MPT3SAS_FMT "TASK_MGMT: " \ 949 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " 950 "TerminationCount(0x%08x)\n", ioc->name, 951 le16_to_cpu(tm_reply->IOCStatus), 952 le32_to_cpu(tm_reply->IOCLogInfo), 953 le32_to_cpu(tm_reply->TerminationCount)); 954 } 955 956 /* copy out xdata to user */ 957 if (data_in_sz) { 958 if (copy_to_user(karg.data_in_buf_ptr, data_in, 959 data_in_sz)) { 960 pr_err("failure at %s:%d/%s()!\n", __FILE__, 961 __LINE__, __func__); 962 ret = -ENODATA; 963 goto out; 964 } 965 } 966 967 /* copy out reply message frame to user */ 968 if (karg.max_reply_bytes) { 969 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 970 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 971 sz)) { 972 pr_err("failure at %s:%d/%s()!\n", __FILE__, 973 __LINE__, __func__); 974 ret = -ENODATA; 975 goto out; 976 } 977 } 978 979 /* copy out sense to user */ 980 if (karg.max_sense_bytes && (mpi_request->Function == 981 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 982 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { 983 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); 984 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 985 sz)) { 986 pr_err("failure at %s:%d/%s()!\n", __FILE__, 987 __LINE__, __func__); 988 ret = -ENODATA; 989 goto out; 990 } 991 } 992 993 issue_host_reset: 994 if (issue_reset) { 995 ret = -ENODATA; 996 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 997 mpi_request->Function == 998 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 999 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 1000 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", 1001 ioc->name, 1002 le16_to_cpu(mpi_request->FunctionDependent1)); 1003 mpt3sas_halt_firmware(ioc); 1004 mpt3sas_scsih_issue_tm(ioc, 1005 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 1006 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 1007 TM_MUTEX_ON); 1008 } else 1009 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1010 FORCE_BIG_HAMMER); 1011 } 1012 1013 out: 1014 1015 /* free memory associated with sg buffers */ 1016 if (data_in) 1017 pci_free_consistent(ioc->pdev, data_in_sz, data_in, 1018 data_in_dma); 1019 1020 if (data_out) 1021 pci_free_consistent(ioc->pdev, data_out_sz, data_out, 1022 data_out_dma); 1023 1024 kfree(mpi_request); 1025 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1026 return ret; 1027 } 1028 1029 /** 1030 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1031 * @ioc: per adapter object 1032 * @arg - user space buffer containing ioctl content 1033 */ 1034 static long 1035 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1036 { 1037 struct mpt3_ioctl_iocinfo karg; 1038 1039 if (copy_from_user(&karg, arg, sizeof(karg))) { 1040 pr_err("failure at %s:%d/%s()!\n", 1041 __FILE__, __LINE__, __func__); 1042 return -EFAULT; 1043 } 1044 1045 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1046 __func__)); 1047 1048 memset(&karg, 0 , sizeof(karg)); 1049 if (ioc->pfacts) 1050 karg.port_number = ioc->pfacts[0].PortNumber; 1051 karg.hw_rev = ioc->pdev->revision; 1052 karg.pci_id = ioc->pdev->device; 1053 karg.subsystem_device = ioc->pdev->subsystem_device; 1054 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1055 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1056 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1057 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1058 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1059 karg.firmware_version = ioc->facts.FWVersion.Word; 1060 strcpy(karg.driver_version, ioc->driver_name); 1061 strcat(karg.driver_version, "-"); 1062 switch (ioc->hba_mpi_version_belonged) { 1063 case MPI2_VERSION: 1064 if (ioc->is_warpdrive) 1065 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1066 else 1067 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1068 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1069 break; 1070 case MPI25_VERSION: 1071 case MPI26_VERSION: 1072 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1073 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1074 break; 1075 } 1076 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1077 1078 if (copy_to_user(arg, &karg, sizeof(karg))) { 1079 pr_err("failure at %s:%d/%s()!\n", 1080 __FILE__, __LINE__, __func__); 1081 return -EFAULT; 1082 } 1083 return 0; 1084 } 1085 1086 /** 1087 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1088 * @ioc: per adapter object 1089 * @arg - user space buffer containing ioctl content 1090 */ 1091 static long 1092 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1093 { 1094 struct mpt3_ioctl_eventquery karg; 1095 1096 if (copy_from_user(&karg, arg, sizeof(karg))) { 1097 pr_err("failure at %s:%d/%s()!\n", 1098 __FILE__, __LINE__, __func__); 1099 return -EFAULT; 1100 } 1101 1102 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1103 __func__)); 1104 1105 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1106 memcpy(karg.event_types, ioc->event_type, 1107 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1108 1109 if (copy_to_user(arg, &karg, sizeof(karg))) { 1110 pr_err("failure at %s:%d/%s()!\n", 1111 __FILE__, __LINE__, __func__); 1112 return -EFAULT; 1113 } 1114 return 0; 1115 } 1116 1117 /** 1118 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1119 * @ioc: per adapter object 1120 * @arg - user space buffer containing ioctl content 1121 */ 1122 static long 1123 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1124 { 1125 struct mpt3_ioctl_eventenable karg; 1126 1127 if (copy_from_user(&karg, arg, sizeof(karg))) { 1128 pr_err("failure at %s:%d/%s()!\n", 1129 __FILE__, __LINE__, __func__); 1130 return -EFAULT; 1131 } 1132 1133 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1134 __func__)); 1135 1136 memcpy(ioc->event_type, karg.event_types, 1137 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1138 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1139 1140 if (ioc->event_log) 1141 return 0; 1142 /* initialize event_log */ 1143 ioc->event_context = 0; 1144 ioc->aen_event_read_flag = 0; 1145 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1146 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1147 if (!ioc->event_log) { 1148 pr_err("failure at %s:%d/%s()!\n", 1149 __FILE__, __LINE__, __func__); 1150 return -ENOMEM; 1151 } 1152 return 0; 1153 } 1154 1155 /** 1156 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1157 * @ioc: per adapter object 1158 * @arg - user space buffer containing ioctl content 1159 */ 1160 static long 1161 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1162 { 1163 struct mpt3_ioctl_eventreport karg; 1164 u32 number_bytes, max_events, max; 1165 struct mpt3_ioctl_eventreport __user *uarg = arg; 1166 1167 if (copy_from_user(&karg, arg, sizeof(karg))) { 1168 pr_err("failure at %s:%d/%s()!\n", 1169 __FILE__, __LINE__, __func__); 1170 return -EFAULT; 1171 } 1172 1173 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1174 __func__)); 1175 1176 number_bytes = karg.hdr.max_data_size - 1177 sizeof(struct mpt3_ioctl_header); 1178 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1179 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1180 1181 /* If fewer than 1 event is requested, there must have 1182 * been some type of error. 1183 */ 1184 if (!max || !ioc->event_log) 1185 return -ENODATA; 1186 1187 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1188 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1189 pr_err("failure at %s:%d/%s()!\n", 1190 __FILE__, __LINE__, __func__); 1191 return -EFAULT; 1192 } 1193 1194 /* reset flag so SIGIO can restart */ 1195 ioc->aen_event_read_flag = 0; 1196 return 0; 1197 } 1198 1199 /** 1200 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1201 * @ioc: per adapter object 1202 * @arg - user space buffer containing ioctl content 1203 */ 1204 static long 1205 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1206 { 1207 struct mpt3_ioctl_diag_reset karg; 1208 int retval; 1209 1210 if (copy_from_user(&karg, arg, sizeof(karg))) { 1211 pr_err("failure at %s:%d/%s()!\n", 1212 __FILE__, __LINE__, __func__); 1213 return -EFAULT; 1214 } 1215 1216 if (ioc->shost_recovery || ioc->pci_error_recovery || 1217 ioc->is_driver_loading) 1218 return -EAGAIN; 1219 1220 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1221 __func__)); 1222 1223 retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1224 FORCE_BIG_HAMMER); 1225 pr_info(MPT3SAS_FMT "host reset: %s\n", 1226 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1227 return 0; 1228 } 1229 1230 /** 1231 * _ctl_btdh_search_sas_device - searching for sas device 1232 * @ioc: per adapter object 1233 * @btdh: btdh ioctl payload 1234 */ 1235 static int 1236 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1237 struct mpt3_ioctl_btdh_mapping *btdh) 1238 { 1239 struct _sas_device *sas_device; 1240 unsigned long flags; 1241 int rc = 0; 1242 1243 if (list_empty(&ioc->sas_device_list)) 1244 return rc; 1245 1246 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1247 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1248 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1249 btdh->handle == sas_device->handle) { 1250 btdh->bus = sas_device->channel; 1251 btdh->id = sas_device->id; 1252 rc = 1; 1253 goto out; 1254 } else if (btdh->bus == sas_device->channel && btdh->id == 1255 sas_device->id && btdh->handle == 0xFFFF) { 1256 btdh->handle = sas_device->handle; 1257 rc = 1; 1258 goto out; 1259 } 1260 } 1261 out: 1262 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1263 return rc; 1264 } 1265 1266 /** 1267 * _ctl_btdh_search_raid_device - searching for raid device 1268 * @ioc: per adapter object 1269 * @btdh: btdh ioctl payload 1270 */ 1271 static int 1272 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1273 struct mpt3_ioctl_btdh_mapping *btdh) 1274 { 1275 struct _raid_device *raid_device; 1276 unsigned long flags; 1277 int rc = 0; 1278 1279 if (list_empty(&ioc->raid_device_list)) 1280 return rc; 1281 1282 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1283 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1284 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1285 btdh->handle == raid_device->handle) { 1286 btdh->bus = raid_device->channel; 1287 btdh->id = raid_device->id; 1288 rc = 1; 1289 goto out; 1290 } else if (btdh->bus == raid_device->channel && btdh->id == 1291 raid_device->id && btdh->handle == 0xFFFF) { 1292 btdh->handle = raid_device->handle; 1293 rc = 1; 1294 goto out; 1295 } 1296 } 1297 out: 1298 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1299 return rc; 1300 } 1301 1302 /** 1303 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1304 * @ioc: per adapter object 1305 * @arg - user space buffer containing ioctl content 1306 */ 1307 static long 1308 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1309 { 1310 struct mpt3_ioctl_btdh_mapping karg; 1311 int rc; 1312 1313 if (copy_from_user(&karg, arg, sizeof(karg))) { 1314 pr_err("failure at %s:%d/%s()!\n", 1315 __FILE__, __LINE__, __func__); 1316 return -EFAULT; 1317 } 1318 1319 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1320 __func__)); 1321 1322 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1323 if (!rc) 1324 _ctl_btdh_search_raid_device(ioc, &karg); 1325 1326 if (copy_to_user(arg, &karg, sizeof(karg))) { 1327 pr_err("failure at %s:%d/%s()!\n", 1328 __FILE__, __LINE__, __func__); 1329 return -EFAULT; 1330 } 1331 return 0; 1332 } 1333 1334 /** 1335 * _ctl_diag_capability - return diag buffer capability 1336 * @ioc: per adapter object 1337 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1338 * 1339 * returns 1 when diag buffer support is enabled in firmware 1340 */ 1341 static u8 1342 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1343 { 1344 u8 rc = 0; 1345 1346 switch (buffer_type) { 1347 case MPI2_DIAG_BUF_TYPE_TRACE: 1348 if (ioc->facts.IOCCapabilities & 1349 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1350 rc = 1; 1351 break; 1352 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1353 if (ioc->facts.IOCCapabilities & 1354 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1355 rc = 1; 1356 break; 1357 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1358 if (ioc->facts.IOCCapabilities & 1359 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1360 rc = 1; 1361 } 1362 1363 return rc; 1364 } 1365 1366 1367 /** 1368 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1369 * @ioc: per adapter object 1370 * @diag_register: the diag_register struct passed in from user space 1371 * 1372 */ 1373 static long 1374 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1375 struct mpt3_diag_register *diag_register) 1376 { 1377 int rc, i; 1378 void *request_data = NULL; 1379 dma_addr_t request_data_dma; 1380 u32 request_data_sz = 0; 1381 Mpi2DiagBufferPostRequest_t *mpi_request; 1382 Mpi2DiagBufferPostReply_t *mpi_reply; 1383 u8 buffer_type; 1384 unsigned long timeleft; 1385 u16 smid; 1386 u16 ioc_status; 1387 u32 ioc_state; 1388 u8 issue_reset = 0; 1389 1390 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1391 __func__)); 1392 1393 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1394 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1395 pr_err(MPT3SAS_FMT 1396 "%s: failed due to ioc not operational\n", 1397 ioc->name, __func__); 1398 rc = -EAGAIN; 1399 goto out; 1400 } 1401 1402 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1403 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1404 ioc->name, __func__); 1405 rc = -EAGAIN; 1406 goto out; 1407 } 1408 1409 buffer_type = diag_register->buffer_type; 1410 if (!_ctl_diag_capability(ioc, buffer_type)) { 1411 pr_err(MPT3SAS_FMT 1412 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1413 ioc->name, __func__, buffer_type); 1414 return -EPERM; 1415 } 1416 1417 if (ioc->diag_buffer_status[buffer_type] & 1418 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1419 pr_err(MPT3SAS_FMT 1420 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1421 ioc->name, __func__, 1422 buffer_type); 1423 return -EINVAL; 1424 } 1425 1426 if (diag_register->requested_buffer_size % 4) { 1427 pr_err(MPT3SAS_FMT 1428 "%s: the requested_buffer_size is not 4 byte aligned\n", 1429 ioc->name, __func__); 1430 return -EINVAL; 1431 } 1432 1433 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1434 if (!smid) { 1435 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1436 ioc->name, __func__); 1437 rc = -EAGAIN; 1438 goto out; 1439 } 1440 1441 rc = 0; 1442 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1443 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1444 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1445 ioc->ctl_cmds.smid = smid; 1446 1447 request_data = ioc->diag_buffer[buffer_type]; 1448 request_data_sz = diag_register->requested_buffer_size; 1449 ioc->unique_id[buffer_type] = diag_register->unique_id; 1450 ioc->diag_buffer_status[buffer_type] = 0; 1451 memcpy(ioc->product_specific[buffer_type], 1452 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1453 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1454 1455 if (request_data) { 1456 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1457 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1458 pci_free_consistent(ioc->pdev, 1459 ioc->diag_buffer_sz[buffer_type], 1460 request_data, request_data_dma); 1461 request_data = NULL; 1462 } 1463 } 1464 1465 if (request_data == NULL) { 1466 ioc->diag_buffer_sz[buffer_type] = 0; 1467 ioc->diag_buffer_dma[buffer_type] = 0; 1468 request_data = pci_alloc_consistent( 1469 ioc->pdev, request_data_sz, &request_data_dma); 1470 if (request_data == NULL) { 1471 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ 1472 " for diag buffers, requested size(%d)\n", 1473 ioc->name, __func__, request_data_sz); 1474 mpt3sas_base_free_smid(ioc, smid); 1475 return -ENOMEM; 1476 } 1477 ioc->diag_buffer[buffer_type] = request_data; 1478 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1479 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1480 } 1481 1482 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1483 mpi_request->BufferType = diag_register->buffer_type; 1484 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1485 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1486 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1487 mpi_request->VF_ID = 0; /* TODO */ 1488 mpi_request->VP_ID = 0; 1489 1490 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1491 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1492 ioc->name, __func__, request_data, 1493 (unsigned long long)request_data_dma, 1494 le32_to_cpu(mpi_request->BufferLength))); 1495 1496 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1497 mpi_request->ProductSpecific[i] = 1498 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1499 1500 init_completion(&ioc->ctl_cmds.done); 1501 mpt3sas_base_put_smid_default(ioc, smid); 1502 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1503 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1504 1505 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1506 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1507 __func__); 1508 _debug_dump_mf(mpi_request, 1509 sizeof(Mpi2DiagBufferPostRequest_t)/4); 1510 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1511 issue_reset = 1; 1512 goto issue_host_reset; 1513 } 1514 1515 /* process the completed Reply Message Frame */ 1516 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1517 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1518 ioc->name, __func__); 1519 rc = -EFAULT; 1520 goto out; 1521 } 1522 1523 mpi_reply = ioc->ctl_cmds.reply; 1524 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1525 1526 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1527 ioc->diag_buffer_status[buffer_type] |= 1528 MPT3_DIAG_BUFFER_IS_REGISTERED; 1529 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1530 ioc->name, __func__)); 1531 } else { 1532 pr_info(MPT3SAS_FMT 1533 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1534 ioc->name, __func__, 1535 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1536 rc = -EFAULT; 1537 } 1538 1539 issue_host_reset: 1540 if (issue_reset) 1541 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1542 FORCE_BIG_HAMMER); 1543 1544 out: 1545 1546 if (rc && request_data) 1547 pci_free_consistent(ioc->pdev, request_data_sz, 1548 request_data, request_data_dma); 1549 1550 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1551 return rc; 1552 } 1553 1554 /** 1555 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1556 * @ioc: per adapter object 1557 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1558 * 1559 * This is called when command line option diag_buffer_enable is enabled 1560 * at driver load time. 1561 */ 1562 void 1563 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1564 { 1565 struct mpt3_diag_register diag_register; 1566 1567 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1568 1569 if (bits_to_register & 1) { 1570 pr_info(MPT3SAS_FMT "registering trace buffer support\n", 1571 ioc->name); 1572 ioc->diag_trigger_master.MasterData = 1573 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1574 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1575 /* register for 2MB buffers */ 1576 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1577 diag_register.unique_id = 0x7075900; 1578 _ctl_diag_register_2(ioc, &diag_register); 1579 } 1580 1581 if (bits_to_register & 2) { 1582 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", 1583 ioc->name); 1584 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1585 /* register for 2MB buffers */ 1586 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1587 diag_register.unique_id = 0x7075901; 1588 _ctl_diag_register_2(ioc, &diag_register); 1589 } 1590 1591 if (bits_to_register & 4) { 1592 pr_info(MPT3SAS_FMT "registering extended buffer support\n", 1593 ioc->name); 1594 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1595 /* register for 2MB buffers */ 1596 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1597 diag_register.unique_id = 0x7075901; 1598 _ctl_diag_register_2(ioc, &diag_register); 1599 } 1600 } 1601 1602 /** 1603 * _ctl_diag_register - application register with driver 1604 * @ioc: per adapter object 1605 * @arg - user space buffer containing ioctl content 1606 * 1607 * This will allow the driver to setup any required buffers that will be 1608 * needed by firmware to communicate with the driver. 1609 */ 1610 static long 1611 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1612 { 1613 struct mpt3_diag_register karg; 1614 long rc; 1615 1616 if (copy_from_user(&karg, arg, sizeof(karg))) { 1617 pr_err("failure at %s:%d/%s()!\n", 1618 __FILE__, __LINE__, __func__); 1619 return -EFAULT; 1620 } 1621 1622 rc = _ctl_diag_register_2(ioc, &karg); 1623 return rc; 1624 } 1625 1626 /** 1627 * _ctl_diag_unregister - application unregister with driver 1628 * @ioc: per adapter object 1629 * @arg - user space buffer containing ioctl content 1630 * 1631 * This will allow the driver to cleanup any memory allocated for diag 1632 * messages and to free up any resources. 1633 */ 1634 static long 1635 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1636 { 1637 struct mpt3_diag_unregister karg; 1638 void *request_data; 1639 dma_addr_t request_data_dma; 1640 u32 request_data_sz; 1641 u8 buffer_type; 1642 1643 if (copy_from_user(&karg, arg, sizeof(karg))) { 1644 pr_err("failure at %s:%d/%s()!\n", 1645 __FILE__, __LINE__, __func__); 1646 return -EFAULT; 1647 } 1648 1649 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1650 __func__)); 1651 1652 buffer_type = karg.unique_id & 0x000000ff; 1653 if (!_ctl_diag_capability(ioc, buffer_type)) { 1654 pr_err(MPT3SAS_FMT 1655 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1656 ioc->name, __func__, buffer_type); 1657 return -EPERM; 1658 } 1659 1660 if ((ioc->diag_buffer_status[buffer_type] & 1661 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1662 pr_err(MPT3SAS_FMT 1663 "%s: buffer_type(0x%02x) is not registered\n", 1664 ioc->name, __func__, buffer_type); 1665 return -EINVAL; 1666 } 1667 if ((ioc->diag_buffer_status[buffer_type] & 1668 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 1669 pr_err(MPT3SAS_FMT 1670 "%s: buffer_type(0x%02x) has not been released\n", 1671 ioc->name, __func__, buffer_type); 1672 return -EINVAL; 1673 } 1674 1675 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1676 pr_err(MPT3SAS_FMT 1677 "%s: unique_id(0x%08x) is not registered\n", 1678 ioc->name, __func__, karg.unique_id); 1679 return -EINVAL; 1680 } 1681 1682 request_data = ioc->diag_buffer[buffer_type]; 1683 if (!request_data) { 1684 pr_err(MPT3SAS_FMT 1685 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1686 ioc->name, __func__, buffer_type); 1687 return -ENOMEM; 1688 } 1689 1690 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 1691 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1692 pci_free_consistent(ioc->pdev, request_data_sz, 1693 request_data, request_data_dma); 1694 ioc->diag_buffer[buffer_type] = NULL; 1695 ioc->diag_buffer_status[buffer_type] = 0; 1696 return 0; 1697 } 1698 1699 /** 1700 * _ctl_diag_query - query relevant info associated with diag buffers 1701 * @ioc: per adapter object 1702 * @arg - user space buffer containing ioctl content 1703 * 1704 * The application will send only buffer_type and unique_id. Driver will 1705 * inspect unique_id first, if valid, fill in all the info. If unique_id is 1706 * 0x00, the driver will return info specified by Buffer Type. 1707 */ 1708 static long 1709 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1710 { 1711 struct mpt3_diag_query karg; 1712 void *request_data; 1713 int i; 1714 u8 buffer_type; 1715 1716 if (copy_from_user(&karg, arg, sizeof(karg))) { 1717 pr_err("failure at %s:%d/%s()!\n", 1718 __FILE__, __LINE__, __func__); 1719 return -EFAULT; 1720 } 1721 1722 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1723 __func__)); 1724 1725 karg.application_flags = 0; 1726 buffer_type = karg.buffer_type; 1727 1728 if (!_ctl_diag_capability(ioc, buffer_type)) { 1729 pr_err(MPT3SAS_FMT 1730 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1731 ioc->name, __func__, buffer_type); 1732 return -EPERM; 1733 } 1734 1735 if ((ioc->diag_buffer_status[buffer_type] & 1736 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1737 pr_err(MPT3SAS_FMT 1738 "%s: buffer_type(0x%02x) is not registered\n", 1739 ioc->name, __func__, buffer_type); 1740 return -EINVAL; 1741 } 1742 1743 if (karg.unique_id & 0xffffff00) { 1744 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1745 pr_err(MPT3SAS_FMT 1746 "%s: unique_id(0x%08x) is not registered\n", 1747 ioc->name, __func__, karg.unique_id); 1748 return -EINVAL; 1749 } 1750 } 1751 1752 request_data = ioc->diag_buffer[buffer_type]; 1753 if (!request_data) { 1754 pr_err(MPT3SAS_FMT 1755 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 1756 ioc->name, __func__, buffer_type); 1757 return -ENOMEM; 1758 } 1759 1760 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) 1761 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1762 MPT3_APP_FLAGS_BUFFER_VALID); 1763 else 1764 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | 1765 MPT3_APP_FLAGS_BUFFER_VALID | 1766 MPT3_APP_FLAGS_FW_BUFFER_ACCESS); 1767 1768 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1769 karg.product_specific[i] = 1770 ioc->product_specific[buffer_type][i]; 1771 1772 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 1773 karg.driver_added_buffer_size = 0; 1774 karg.unique_id = ioc->unique_id[buffer_type]; 1775 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 1776 1777 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 1778 pr_err(MPT3SAS_FMT 1779 "%s: unable to write mpt3_diag_query data @ %p\n", 1780 ioc->name, __func__, arg); 1781 return -EFAULT; 1782 } 1783 return 0; 1784 } 1785 1786 /** 1787 * mpt3sas_send_diag_release - Diag Release Message 1788 * @ioc: per adapter object 1789 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED 1790 * @issue_reset - specifies whether host reset is required. 1791 * 1792 */ 1793 int 1794 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 1795 u8 *issue_reset) 1796 { 1797 Mpi2DiagReleaseRequest_t *mpi_request; 1798 Mpi2DiagReleaseReply_t *mpi_reply; 1799 u16 smid; 1800 u16 ioc_status; 1801 u32 ioc_state; 1802 int rc; 1803 unsigned long timeleft; 1804 1805 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1806 __func__)); 1807 1808 rc = 0; 1809 *issue_reset = 0; 1810 1811 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1812 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1813 if (ioc->diag_buffer_status[buffer_type] & 1814 MPT3_DIAG_BUFFER_IS_REGISTERED) 1815 ioc->diag_buffer_status[buffer_type] |= 1816 MPT3_DIAG_BUFFER_IS_RELEASED; 1817 dctlprintk(ioc, pr_info(MPT3SAS_FMT 1818 "%s: skipping due to FAULT state\n", ioc->name, 1819 __func__)); 1820 rc = -EAGAIN; 1821 goto out; 1822 } 1823 1824 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1825 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 1826 ioc->name, __func__); 1827 rc = -EAGAIN; 1828 goto out; 1829 } 1830 1831 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1832 if (!smid) { 1833 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 1834 ioc->name, __func__); 1835 rc = -EAGAIN; 1836 goto out; 1837 } 1838 1839 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1840 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1841 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1842 ioc->ctl_cmds.smid = smid; 1843 1844 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 1845 mpi_request->BufferType = buffer_type; 1846 mpi_request->VF_ID = 0; /* TODO */ 1847 mpi_request->VP_ID = 0; 1848 1849 init_completion(&ioc->ctl_cmds.done); 1850 mpt3sas_base_put_smid_default(ioc, smid); 1851 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1852 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1853 1854 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1855 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 1856 __func__); 1857 _debug_dump_mf(mpi_request, 1858 sizeof(Mpi2DiagReleaseRequest_t)/4); 1859 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 1860 *issue_reset = 1; 1861 rc = -EFAULT; 1862 goto out; 1863 } 1864 1865 /* process the completed Reply Message Frame */ 1866 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1867 pr_err(MPT3SAS_FMT "%s: no reply message\n", 1868 ioc->name, __func__); 1869 rc = -EFAULT; 1870 goto out; 1871 } 1872 1873 mpi_reply = ioc->ctl_cmds.reply; 1874 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1875 1876 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1877 ioc->diag_buffer_status[buffer_type] |= 1878 MPT3_DIAG_BUFFER_IS_RELEASED; 1879 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 1880 ioc->name, __func__)); 1881 } else { 1882 pr_info(MPT3SAS_FMT 1883 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1884 ioc->name, __func__, 1885 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1886 rc = -EFAULT; 1887 } 1888 1889 out: 1890 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1891 return rc; 1892 } 1893 1894 /** 1895 * _ctl_diag_release - request to send Diag Release Message to firmware 1896 * @arg - user space buffer containing ioctl content 1897 * 1898 * This allows ownership of the specified buffer to returned to the driver, 1899 * allowing an application to read the buffer without fear that firmware is 1900 * overwritting information in the buffer. 1901 */ 1902 static long 1903 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1904 { 1905 struct mpt3_diag_release karg; 1906 void *request_data; 1907 int rc; 1908 u8 buffer_type; 1909 u8 issue_reset = 0; 1910 1911 if (copy_from_user(&karg, arg, sizeof(karg))) { 1912 pr_err("failure at %s:%d/%s()!\n", 1913 __FILE__, __LINE__, __func__); 1914 return -EFAULT; 1915 } 1916 1917 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1918 __func__)); 1919 1920 buffer_type = karg.unique_id & 0x000000ff; 1921 if (!_ctl_diag_capability(ioc, buffer_type)) { 1922 pr_err(MPT3SAS_FMT 1923 "%s: doesn't have capability for buffer_type(0x%02x)\n", 1924 ioc->name, __func__, buffer_type); 1925 return -EPERM; 1926 } 1927 1928 if ((ioc->diag_buffer_status[buffer_type] & 1929 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 1930 pr_err(MPT3SAS_FMT 1931 "%s: buffer_type(0x%02x) is not registered\n", 1932 ioc->name, __func__, buffer_type); 1933 return -EINVAL; 1934 } 1935 1936 if (karg.unique_id != ioc->unique_id[buffer_type]) { 1937 pr_err(MPT3SAS_FMT 1938 "%s: unique_id(0x%08x) is not registered\n", 1939 ioc->name, __func__, karg.unique_id); 1940 return -EINVAL; 1941 } 1942 1943 if (ioc->diag_buffer_status[buffer_type] & 1944 MPT3_DIAG_BUFFER_IS_RELEASED) { 1945 pr_err(MPT3SAS_FMT 1946 "%s: buffer_type(0x%02x) is already released\n", 1947 ioc->name, __func__, 1948 buffer_type); 1949 return 0; 1950 } 1951 1952 request_data = ioc->diag_buffer[buffer_type]; 1953 1954 if (!request_data) { 1955 pr_err(MPT3SAS_FMT 1956 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 1957 ioc->name, __func__, buffer_type); 1958 return -ENOMEM; 1959 } 1960 1961 /* buffers were released by due to host reset */ 1962 if ((ioc->diag_buffer_status[buffer_type] & 1963 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 1964 ioc->diag_buffer_status[buffer_type] |= 1965 MPT3_DIAG_BUFFER_IS_RELEASED; 1966 ioc->diag_buffer_status[buffer_type] &= 1967 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 1968 pr_err(MPT3SAS_FMT 1969 "%s: buffer_type(0x%02x) was released due to host reset\n", 1970 ioc->name, __func__, buffer_type); 1971 return 0; 1972 } 1973 1974 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 1975 1976 if (issue_reset) 1977 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1978 FORCE_BIG_HAMMER); 1979 1980 return rc; 1981 } 1982 1983 /** 1984 * _ctl_diag_read_buffer - request for copy of the diag buffer 1985 * @ioc: per adapter object 1986 * @arg - user space buffer containing ioctl content 1987 */ 1988 static long 1989 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1990 { 1991 struct mpt3_diag_read_buffer karg; 1992 struct mpt3_diag_read_buffer __user *uarg = arg; 1993 void *request_data, *diag_data; 1994 Mpi2DiagBufferPostRequest_t *mpi_request; 1995 Mpi2DiagBufferPostReply_t *mpi_reply; 1996 int rc, i; 1997 u8 buffer_type; 1998 unsigned long timeleft, request_size, copy_size; 1999 u16 smid; 2000 u16 ioc_status; 2001 u8 issue_reset = 0; 2002 2003 if (copy_from_user(&karg, arg, sizeof(karg))) { 2004 pr_err("failure at %s:%d/%s()!\n", 2005 __FILE__, __LINE__, __func__); 2006 return -EFAULT; 2007 } 2008 2009 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2010 __func__)); 2011 2012 buffer_type = karg.unique_id & 0x000000ff; 2013 if (!_ctl_diag_capability(ioc, buffer_type)) { 2014 pr_err(MPT3SAS_FMT 2015 "%s: doesn't have capability for buffer_type(0x%02x)\n", 2016 ioc->name, __func__, buffer_type); 2017 return -EPERM; 2018 } 2019 2020 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2021 pr_err(MPT3SAS_FMT 2022 "%s: unique_id(0x%08x) is not registered\n", 2023 ioc->name, __func__, karg.unique_id); 2024 return -EINVAL; 2025 } 2026 2027 request_data = ioc->diag_buffer[buffer_type]; 2028 if (!request_data) { 2029 pr_err(MPT3SAS_FMT 2030 "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2031 ioc->name, __func__, buffer_type); 2032 return -ENOMEM; 2033 } 2034 2035 request_size = ioc->diag_buffer_sz[buffer_type]; 2036 2037 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2038 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ 2039 "or bytes_to_read are not 4 byte aligned\n", ioc->name, 2040 __func__); 2041 return -EINVAL; 2042 } 2043 2044 if (karg.starting_offset > request_size) 2045 return -EINVAL; 2046 2047 diag_data = (void *)(request_data + karg.starting_offset); 2048 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2049 "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2050 ioc->name, __func__, 2051 diag_data, karg.starting_offset, karg.bytes_to_read)); 2052 2053 /* Truncate data on requests that are too large */ 2054 if ((diag_data + karg.bytes_to_read < diag_data) || 2055 (diag_data + karg.bytes_to_read > request_data + request_size)) 2056 copy_size = request_size - karg.starting_offset; 2057 else 2058 copy_size = karg.bytes_to_read; 2059 2060 if (copy_to_user((void __user *)uarg->diagnostic_data, 2061 diag_data, copy_size)) { 2062 pr_err(MPT3SAS_FMT 2063 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2064 ioc->name, __func__, diag_data); 2065 return -EFAULT; 2066 } 2067 2068 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2069 return 0; 2070 2071 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2072 "%s: Reregister buffer_type(0x%02x)\n", 2073 ioc->name, __func__, buffer_type)); 2074 if ((ioc->diag_buffer_status[buffer_type] & 2075 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2076 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2077 "%s: buffer_type(0x%02x) is still registered\n", 2078 ioc->name, __func__, buffer_type)); 2079 return 0; 2080 } 2081 /* Get a free request frame and save the message context. 2082 */ 2083 2084 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2085 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", 2086 ioc->name, __func__); 2087 rc = -EAGAIN; 2088 goto out; 2089 } 2090 2091 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2092 if (!smid) { 2093 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2094 ioc->name, __func__); 2095 rc = -EAGAIN; 2096 goto out; 2097 } 2098 2099 rc = 0; 2100 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2101 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2103 ioc->ctl_cmds.smid = smid; 2104 2105 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2106 mpi_request->BufferType = buffer_type; 2107 mpi_request->BufferLength = 2108 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2109 mpi_request->BufferAddress = 2110 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2111 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2112 mpi_request->ProductSpecific[i] = 2113 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2114 mpi_request->VF_ID = 0; /* TODO */ 2115 mpi_request->VP_ID = 0; 2116 2117 init_completion(&ioc->ctl_cmds.done); 2118 mpt3sas_base_put_smid_default(ioc, smid); 2119 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 2120 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2121 2122 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2123 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, 2124 __func__); 2125 _debug_dump_mf(mpi_request, 2126 sizeof(Mpi2DiagBufferPostRequest_t)/4); 2127 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) 2128 issue_reset = 1; 2129 goto issue_host_reset; 2130 } 2131 2132 /* process the completed Reply Message Frame */ 2133 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2134 pr_err(MPT3SAS_FMT "%s: no reply message\n", 2135 ioc->name, __func__); 2136 rc = -EFAULT; 2137 goto out; 2138 } 2139 2140 mpi_reply = ioc->ctl_cmds.reply; 2141 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2142 2143 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2144 ioc->diag_buffer_status[buffer_type] |= 2145 MPT3_DIAG_BUFFER_IS_REGISTERED; 2146 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", 2147 ioc->name, __func__)); 2148 } else { 2149 pr_info(MPT3SAS_FMT 2150 "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2151 ioc->name, __func__, 2152 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2153 rc = -EFAULT; 2154 } 2155 2156 issue_host_reset: 2157 if (issue_reset) 2158 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2159 FORCE_BIG_HAMMER); 2160 2161 out: 2162 2163 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2164 return rc; 2165 } 2166 2167 2168 2169 #ifdef CONFIG_COMPAT 2170 /** 2171 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2172 * @ioc: per adapter object 2173 * @cmd - ioctl opcode 2174 * @arg - (struct mpt3_ioctl_command32) 2175 * 2176 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2177 */ 2178 static long 2179 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2180 void __user *arg) 2181 { 2182 struct mpt3_ioctl_command32 karg32; 2183 struct mpt3_ioctl_command32 __user *uarg; 2184 struct mpt3_ioctl_command karg; 2185 2186 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2187 return -EINVAL; 2188 2189 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2190 2191 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2192 pr_err("failure at %s:%d/%s()!\n", 2193 __FILE__, __LINE__, __func__); 2194 return -EFAULT; 2195 } 2196 2197 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2198 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2199 karg.hdr.port_number = karg32.hdr.port_number; 2200 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2201 karg.timeout = karg32.timeout; 2202 karg.max_reply_bytes = karg32.max_reply_bytes; 2203 karg.data_in_size = karg32.data_in_size; 2204 karg.data_out_size = karg32.data_out_size; 2205 karg.max_sense_bytes = karg32.max_sense_bytes; 2206 karg.data_sge_offset = karg32.data_sge_offset; 2207 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2208 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2209 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2210 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2211 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2212 } 2213 #endif 2214 2215 /** 2216 * _ctl_ioctl_main - main ioctl entry point 2217 * @file - (struct file) 2218 * @cmd - ioctl opcode 2219 * @arg - user space data buffer 2220 * @compat - handles 32 bit applications in 64bit os 2221 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2222 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 2223 */ 2224 static long 2225 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2226 u8 compat, u16 mpi_version) 2227 { 2228 struct MPT3SAS_ADAPTER *ioc; 2229 struct mpt3_ioctl_header ioctl_header; 2230 enum block_state state; 2231 long ret = -EINVAL; 2232 2233 /* get IOCTL header */ 2234 if (copy_from_user(&ioctl_header, (char __user *)arg, 2235 sizeof(struct mpt3_ioctl_header))) { 2236 pr_err("failure at %s:%d/%s()!\n", 2237 __FILE__, __LINE__, __func__); 2238 return -EFAULT; 2239 } 2240 2241 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2242 &ioc, mpi_version) == -1 || !ioc) 2243 return -ENODEV; 2244 2245 /* pci_access_mutex lock acquired by ioctl path */ 2246 mutex_lock(&ioc->pci_access_mutex); 2247 2248 if (ioc->shost_recovery || ioc->pci_error_recovery || 2249 ioc->is_driver_loading || ioc->remove_host) { 2250 ret = -EAGAIN; 2251 goto out_unlock_pciaccess; 2252 } 2253 2254 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2255 if (state == NON_BLOCKING) { 2256 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2257 ret = -EAGAIN; 2258 goto out_unlock_pciaccess; 2259 } 2260 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2261 ret = -ERESTARTSYS; 2262 goto out_unlock_pciaccess; 2263 } 2264 2265 2266 switch (cmd) { 2267 case MPT3IOCINFO: 2268 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2269 ret = _ctl_getiocinfo(ioc, arg); 2270 break; 2271 #ifdef CONFIG_COMPAT 2272 case MPT3COMMAND32: 2273 #endif 2274 case MPT3COMMAND: 2275 { 2276 struct mpt3_ioctl_command __user *uarg; 2277 struct mpt3_ioctl_command karg; 2278 2279 #ifdef CONFIG_COMPAT 2280 if (compat) { 2281 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2282 break; 2283 } 2284 #endif 2285 if (copy_from_user(&karg, arg, sizeof(karg))) { 2286 pr_err("failure at %s:%d/%s()!\n", 2287 __FILE__, __LINE__, __func__); 2288 ret = -EFAULT; 2289 break; 2290 } 2291 2292 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2293 uarg = arg; 2294 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2295 } 2296 break; 2297 } 2298 case MPT3EVENTQUERY: 2299 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2300 ret = _ctl_eventquery(ioc, arg); 2301 break; 2302 case MPT3EVENTENABLE: 2303 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2304 ret = _ctl_eventenable(ioc, arg); 2305 break; 2306 case MPT3EVENTREPORT: 2307 ret = _ctl_eventreport(ioc, arg); 2308 break; 2309 case MPT3HARDRESET: 2310 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2311 ret = _ctl_do_reset(ioc, arg); 2312 break; 2313 case MPT3BTDHMAPPING: 2314 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2315 ret = _ctl_btdh_mapping(ioc, arg); 2316 break; 2317 case MPT3DIAGREGISTER: 2318 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2319 ret = _ctl_diag_register(ioc, arg); 2320 break; 2321 case MPT3DIAGUNREGISTER: 2322 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2323 ret = _ctl_diag_unregister(ioc, arg); 2324 break; 2325 case MPT3DIAGQUERY: 2326 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2327 ret = _ctl_diag_query(ioc, arg); 2328 break; 2329 case MPT3DIAGRELEASE: 2330 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2331 ret = _ctl_diag_release(ioc, arg); 2332 break; 2333 case MPT3DIAGREADBUFFER: 2334 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2335 ret = _ctl_diag_read_buffer(ioc, arg); 2336 break; 2337 default: 2338 dctlprintk(ioc, pr_info(MPT3SAS_FMT 2339 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); 2340 break; 2341 } 2342 2343 mutex_unlock(&ioc->ctl_cmds.mutex); 2344 out_unlock_pciaccess: 2345 mutex_unlock(&ioc->pci_access_mutex); 2346 return ret; 2347 } 2348 2349 /** 2350 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 2351 * @file - (struct file) 2352 * @cmd - ioctl opcode 2353 * @arg - 2354 */ 2355 long 2356 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2357 { 2358 long ret; 2359 2360 /* pass MPI25_VERSION | MPI26_VERSION value, 2361 * to indicate that this ioctl cmd 2362 * came from mpt3ctl ioctl device. 2363 */ 2364 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, 2365 MPI25_VERSION | MPI26_VERSION); 2366 return ret; 2367 } 2368 2369 /** 2370 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 2371 * @file - (struct file) 2372 * @cmd - ioctl opcode 2373 * @arg - 2374 */ 2375 long 2376 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2377 { 2378 long ret; 2379 2380 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 2381 * came from mpt2ctl ioctl device. 2382 */ 2383 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 2384 return ret; 2385 } 2386 #ifdef CONFIG_COMPAT 2387 /** 2388 *_ ctl_ioctl_compat - main ioctl entry point (compat) 2389 * @file - 2390 * @cmd - 2391 * @arg - 2392 * 2393 * This routine handles 32 bit applications in 64bit os. 2394 */ 2395 long 2396 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2397 { 2398 long ret; 2399 2400 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, 2401 MPI25_VERSION | MPI26_VERSION); 2402 return ret; 2403 } 2404 2405 /** 2406 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 2407 * @file - 2408 * @cmd - 2409 * @arg - 2410 * 2411 * This routine handles 32 bit applications in 64bit os. 2412 */ 2413 long 2414 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2415 { 2416 long ret; 2417 2418 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 2419 return ret; 2420 } 2421 #endif 2422 2423 /* scsi host attributes */ 2424 /** 2425 * _ctl_version_fw_show - firmware version 2426 * @cdev - pointer to embedded class device 2427 * @buf - the buffer returned 2428 * 2429 * A sysfs 'read-only' shost attribute. 2430 */ 2431 static ssize_t 2432 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, 2433 char *buf) 2434 { 2435 struct Scsi_Host *shost = class_to_shost(cdev); 2436 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2437 2438 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2439 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2440 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2441 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2442 ioc->facts.FWVersion.Word & 0x000000FF); 2443 } 2444 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); 2445 2446 /** 2447 * _ctl_version_bios_show - bios version 2448 * @cdev - pointer to embedded class device 2449 * @buf - the buffer returned 2450 * 2451 * A sysfs 'read-only' shost attribute. 2452 */ 2453 static ssize_t 2454 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, 2455 char *buf) 2456 { 2457 struct Scsi_Host *shost = class_to_shost(cdev); 2458 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2459 2460 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2461 2462 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 2463 (version & 0xFF000000) >> 24, 2464 (version & 0x00FF0000) >> 16, 2465 (version & 0x0000FF00) >> 8, 2466 version & 0x000000FF); 2467 } 2468 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); 2469 2470 /** 2471 * _ctl_version_mpi_show - MPI (message passing interface) version 2472 * @cdev - pointer to embedded class device 2473 * @buf - the buffer returned 2474 * 2475 * A sysfs 'read-only' shost attribute. 2476 */ 2477 static ssize_t 2478 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, 2479 char *buf) 2480 { 2481 struct Scsi_Host *shost = class_to_shost(cdev); 2482 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2483 2484 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 2485 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 2486 } 2487 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); 2488 2489 /** 2490 * _ctl_version_product_show - product name 2491 * @cdev - pointer to embedded class device 2492 * @buf - the buffer returned 2493 * 2494 * A sysfs 'read-only' shost attribute. 2495 */ 2496 static ssize_t 2497 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr, 2498 char *buf) 2499 { 2500 struct Scsi_Host *shost = class_to_shost(cdev); 2501 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2502 2503 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 2504 } 2505 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); 2506 2507 /** 2508 * _ctl_version_nvdata_persistent_show - ndvata persistent version 2509 * @cdev - pointer to embedded class device 2510 * @buf - the buffer returned 2511 * 2512 * A sysfs 'read-only' shost attribute. 2513 */ 2514 static ssize_t 2515 _ctl_version_nvdata_persistent_show(struct device *cdev, 2516 struct device_attribute *attr, char *buf) 2517 { 2518 struct Scsi_Host *shost = class_to_shost(cdev); 2519 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2520 2521 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2522 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 2523 } 2524 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, 2525 _ctl_version_nvdata_persistent_show, NULL); 2526 2527 /** 2528 * _ctl_version_nvdata_default_show - nvdata default version 2529 * @cdev - pointer to embedded class device 2530 * @buf - the buffer returned 2531 * 2532 * A sysfs 'read-only' shost attribute. 2533 */ 2534 static ssize_t 2535 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute 2536 *attr, char *buf) 2537 { 2538 struct Scsi_Host *shost = class_to_shost(cdev); 2539 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2540 2541 return snprintf(buf, PAGE_SIZE, "%08xh\n", 2542 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 2543 } 2544 static DEVICE_ATTR(version_nvdata_default, S_IRUGO, 2545 _ctl_version_nvdata_default_show, NULL); 2546 2547 /** 2548 * _ctl_board_name_show - board name 2549 * @cdev - pointer to embedded class device 2550 * @buf - the buffer returned 2551 * 2552 * A sysfs 'read-only' shost attribute. 2553 */ 2554 static ssize_t 2555 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr, 2556 char *buf) 2557 { 2558 struct Scsi_Host *shost = class_to_shost(cdev); 2559 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2560 2561 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 2562 } 2563 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); 2564 2565 /** 2566 * _ctl_board_assembly_show - board assembly name 2567 * @cdev - pointer to embedded class device 2568 * @buf - the buffer returned 2569 * 2570 * A sysfs 'read-only' shost attribute. 2571 */ 2572 static ssize_t 2573 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, 2574 char *buf) 2575 { 2576 struct Scsi_Host *shost = class_to_shost(cdev); 2577 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2578 2579 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 2580 } 2581 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); 2582 2583 /** 2584 * _ctl_board_tracer_show - board tracer number 2585 * @cdev - pointer to embedded class device 2586 * @buf - the buffer returned 2587 * 2588 * A sysfs 'read-only' shost attribute. 2589 */ 2590 static ssize_t 2591 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, 2592 char *buf) 2593 { 2594 struct Scsi_Host *shost = class_to_shost(cdev); 2595 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2596 2597 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 2598 } 2599 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); 2600 2601 /** 2602 * _ctl_io_delay_show - io missing delay 2603 * @cdev - pointer to embedded class device 2604 * @buf - the buffer returned 2605 * 2606 * This is for firmware implemention for deboucing device 2607 * removal events. 2608 * 2609 * A sysfs 'read-only' shost attribute. 2610 */ 2611 static ssize_t 2612 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, 2613 char *buf) 2614 { 2615 struct Scsi_Host *shost = class_to_shost(cdev); 2616 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2617 2618 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 2619 } 2620 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); 2621 2622 /** 2623 * _ctl_device_delay_show - device missing delay 2624 * @cdev - pointer to embedded class device 2625 * @buf - the buffer returned 2626 * 2627 * This is for firmware implemention for deboucing device 2628 * removal events. 2629 * 2630 * A sysfs 'read-only' shost attribute. 2631 */ 2632 static ssize_t 2633 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, 2634 char *buf) 2635 { 2636 struct Scsi_Host *shost = class_to_shost(cdev); 2637 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2638 2639 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 2640 } 2641 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); 2642 2643 /** 2644 * _ctl_fw_queue_depth_show - global credits 2645 * @cdev - pointer to embedded class device 2646 * @buf - the buffer returned 2647 * 2648 * This is firmware queue depth limit 2649 * 2650 * A sysfs 'read-only' shost attribute. 2651 */ 2652 static ssize_t 2653 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 2654 char *buf) 2655 { 2656 struct Scsi_Host *shost = class_to_shost(cdev); 2657 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2658 2659 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 2660 } 2661 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); 2662 2663 /** 2664 * _ctl_sas_address_show - sas address 2665 * @cdev - pointer to embedded class device 2666 * @buf - the buffer returned 2667 * 2668 * This is the controller sas address 2669 * 2670 * A sysfs 'read-only' shost attribute. 2671 */ 2672 static ssize_t 2673 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, 2674 char *buf) 2675 2676 { 2677 struct Scsi_Host *shost = class_to_shost(cdev); 2678 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2679 2680 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 2681 (unsigned long long)ioc->sas_hba.sas_address); 2682 } 2683 static DEVICE_ATTR(host_sas_address, S_IRUGO, 2684 _ctl_host_sas_address_show, NULL); 2685 2686 /** 2687 * _ctl_logging_level_show - logging level 2688 * @cdev - pointer to embedded class device 2689 * @buf - the buffer returned 2690 * 2691 * A sysfs 'read/write' shost attribute. 2692 */ 2693 static ssize_t 2694 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, 2695 char *buf) 2696 { 2697 struct Scsi_Host *shost = class_to_shost(cdev); 2698 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2699 2700 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 2701 } 2702 static ssize_t 2703 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, 2704 const char *buf, size_t count) 2705 { 2706 struct Scsi_Host *shost = class_to_shost(cdev); 2707 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2708 int val = 0; 2709 2710 if (sscanf(buf, "%x", &val) != 1) 2711 return -EINVAL; 2712 2713 ioc->logging_level = val; 2714 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, 2715 ioc->logging_level); 2716 return strlen(buf); 2717 } 2718 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, 2719 _ctl_logging_level_store); 2720 2721 /** 2722 * _ctl_fwfault_debug_show - show/store fwfault_debug 2723 * @cdev - pointer to embedded class device 2724 * @buf - the buffer returned 2725 * 2726 * mpt3sas_fwfault_debug is command line option 2727 * A sysfs 'read/write' shost attribute. 2728 */ 2729 static ssize_t 2730 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 2731 char *buf) 2732 { 2733 struct Scsi_Host *shost = class_to_shost(cdev); 2734 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2735 2736 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 2737 } 2738 static ssize_t 2739 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 2740 const char *buf, size_t count) 2741 { 2742 struct Scsi_Host *shost = class_to_shost(cdev); 2743 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2744 int val = 0; 2745 2746 if (sscanf(buf, "%d", &val) != 1) 2747 return -EINVAL; 2748 2749 ioc->fwfault_debug = val; 2750 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, 2751 ioc->fwfault_debug); 2752 return strlen(buf); 2753 } 2754 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, 2755 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); 2756 2757 /** 2758 * _ctl_ioc_reset_count_show - ioc reset count 2759 * @cdev - pointer to embedded class device 2760 * @buf - the buffer returned 2761 * 2762 * This is firmware queue depth limit 2763 * 2764 * A sysfs 'read-only' shost attribute. 2765 */ 2766 static ssize_t 2767 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 2768 char *buf) 2769 { 2770 struct Scsi_Host *shost = class_to_shost(cdev); 2771 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2772 2773 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 2774 } 2775 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); 2776 2777 /** 2778 * _ctl_ioc_reply_queue_count_show - number of reply queues 2779 * @cdev - pointer to embedded class device 2780 * @buf - the buffer returned 2781 * 2782 * This is number of reply queues 2783 * 2784 * A sysfs 'read-only' shost attribute. 2785 */ 2786 static ssize_t 2787 _ctl_ioc_reply_queue_count_show(struct device *cdev, 2788 struct device_attribute *attr, char *buf) 2789 { 2790 u8 reply_queue_count; 2791 struct Scsi_Host *shost = class_to_shost(cdev); 2792 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2793 2794 if ((ioc->facts.IOCCapabilities & 2795 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 2796 reply_queue_count = ioc->reply_queue_count; 2797 else 2798 reply_queue_count = 1; 2799 2800 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 2801 } 2802 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, 2803 NULL); 2804 2805 /** 2806 * _ctl_BRM_status_show - Backup Rail Monitor Status 2807 * @cdev - pointer to embedded class device 2808 * @buf - the buffer returned 2809 * 2810 * This is number of reply queues 2811 * 2812 * A sysfs 'read-only' shost attribute. 2813 */ 2814 static ssize_t 2815 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, 2816 char *buf) 2817 { 2818 struct Scsi_Host *shost = class_to_shost(cdev); 2819 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2820 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; 2821 Mpi2ConfigReply_t mpi_reply; 2822 u16 backup_rail_monitor_status = 0; 2823 u16 ioc_status; 2824 int sz; 2825 ssize_t rc = 0; 2826 2827 if (!ioc->is_warpdrive) { 2828 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" 2829 " warpdrive\n", ioc->name, __func__); 2830 goto out; 2831 } 2832 /* pci_access_mutex lock acquired by sysfs show path */ 2833 mutex_lock(&ioc->pci_access_mutex); 2834 if (ioc->pci_error_recovery || ioc->remove_host) { 2835 mutex_unlock(&ioc->pci_access_mutex); 2836 return 0; 2837 } 2838 2839 /* allocate upto GPIOVal 36 entries */ 2840 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); 2841 io_unit_pg3 = kzalloc(sz, GFP_KERNEL); 2842 if (!io_unit_pg3) { 2843 pr_err(MPT3SAS_FMT "%s: failed allocating memory " 2844 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); 2845 goto out; 2846 } 2847 2848 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != 2849 0) { 2850 pr_err(MPT3SAS_FMT 2851 "%s: failed reading iounit_pg3\n", ioc->name, 2852 __func__); 2853 goto out; 2854 } 2855 2856 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 2857 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2858 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " 2859 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); 2860 goto out; 2861 } 2862 2863 if (io_unit_pg3->GPIOCount < 25) { 2864 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " 2865 "25 entries, detected (%d) entries\n", ioc->name, __func__, 2866 io_unit_pg3->GPIOCount); 2867 goto out; 2868 } 2869 2870 /* BRM status is in bit zero of GPIOVal[24] */ 2871 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); 2872 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 2873 2874 out: 2875 kfree(io_unit_pg3); 2876 mutex_unlock(&ioc->pci_access_mutex); 2877 return rc; 2878 } 2879 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); 2880 2881 struct DIAG_BUFFER_START { 2882 __le32 Size; 2883 __le32 DiagVersion; 2884 u8 BufferType; 2885 u8 Reserved[3]; 2886 __le32 Reserved1; 2887 __le32 Reserved2; 2888 __le32 Reserved3; 2889 }; 2890 2891 /** 2892 * _ctl_host_trace_buffer_size_show - host buffer size (trace only) 2893 * @cdev - pointer to embedded class device 2894 * @buf - the buffer returned 2895 * 2896 * A sysfs 'read-only' shost attribute. 2897 */ 2898 static ssize_t 2899 _ctl_host_trace_buffer_size_show(struct device *cdev, 2900 struct device_attribute *attr, char *buf) 2901 { 2902 struct Scsi_Host *shost = class_to_shost(cdev); 2903 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2904 u32 size = 0; 2905 struct DIAG_BUFFER_START *request_data; 2906 2907 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2908 pr_err(MPT3SAS_FMT 2909 "%s: host_trace_buffer is not registered\n", 2910 ioc->name, __func__); 2911 return 0; 2912 } 2913 2914 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2915 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2916 pr_err(MPT3SAS_FMT 2917 "%s: host_trace_buffer is not registered\n", 2918 ioc->name, __func__); 2919 return 0; 2920 } 2921 2922 request_data = (struct DIAG_BUFFER_START *) 2923 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 2924 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 2925 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 2926 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 2927 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 2928 size = le32_to_cpu(request_data->Size); 2929 2930 ioc->ring_buffer_sz = size; 2931 return snprintf(buf, PAGE_SIZE, "%d\n", size); 2932 } 2933 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, 2934 _ctl_host_trace_buffer_size_show, NULL); 2935 2936 /** 2937 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) 2938 * @cdev - pointer to embedded class device 2939 * @buf - the buffer returned 2940 * 2941 * A sysfs 'read/write' shost attribute. 2942 * 2943 * You will only be able to read 4k bytes of ring buffer at a time. 2944 * In order to read beyond 4k bytes, you will have to write out the 2945 * offset to the same attribute, it will move the pointer. 2946 */ 2947 static ssize_t 2948 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 2949 char *buf) 2950 { 2951 struct Scsi_Host *shost = class_to_shost(cdev); 2952 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2953 void *request_data; 2954 u32 size; 2955 2956 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 2957 pr_err(MPT3SAS_FMT 2958 "%s: host_trace_buffer is not registered\n", 2959 ioc->name, __func__); 2960 return 0; 2961 } 2962 2963 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 2964 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2965 pr_err(MPT3SAS_FMT 2966 "%s: host_trace_buffer is not registered\n", 2967 ioc->name, __func__); 2968 return 0; 2969 } 2970 2971 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 2972 return 0; 2973 2974 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 2975 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 2976 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 2977 memcpy(buf, request_data, size); 2978 return size; 2979 } 2980 2981 static ssize_t 2982 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 2983 const char *buf, size_t count) 2984 { 2985 struct Scsi_Host *shost = class_to_shost(cdev); 2986 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 2987 int val = 0; 2988 2989 if (sscanf(buf, "%d", &val) != 1) 2990 return -EINVAL; 2991 2992 ioc->ring_buffer_offset = val; 2993 return strlen(buf); 2994 } 2995 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, 2996 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); 2997 2998 2999 /*****************************************/ 3000 3001 /** 3002 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) 3003 * @cdev - pointer to embedded class device 3004 * @buf - the buffer returned 3005 * 3006 * A sysfs 'read/write' shost attribute. 3007 * 3008 * This is a mechnism to post/release host_trace_buffers 3009 */ 3010 static ssize_t 3011 _ctl_host_trace_buffer_enable_show(struct device *cdev, 3012 struct device_attribute *attr, char *buf) 3013 { 3014 struct Scsi_Host *shost = class_to_shost(cdev); 3015 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3016 3017 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 3018 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3019 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3020 return snprintf(buf, PAGE_SIZE, "off\n"); 3021 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3022 MPT3_DIAG_BUFFER_IS_RELEASED)) 3023 return snprintf(buf, PAGE_SIZE, "release\n"); 3024 else 3025 return snprintf(buf, PAGE_SIZE, "post\n"); 3026 } 3027 3028 static ssize_t 3029 _ctl_host_trace_buffer_enable_store(struct device *cdev, 3030 struct device_attribute *attr, const char *buf, size_t count) 3031 { 3032 struct Scsi_Host *shost = class_to_shost(cdev); 3033 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3034 char str[10] = ""; 3035 struct mpt3_diag_register diag_register; 3036 u8 issue_reset = 0; 3037 3038 /* don't allow post/release occurr while recovery is active */ 3039 if (ioc->shost_recovery || ioc->remove_host || 3040 ioc->pci_error_recovery || ioc->is_driver_loading) 3041 return -EBUSY; 3042 3043 if (sscanf(buf, "%9s", str) != 1) 3044 return -EINVAL; 3045 3046 if (!strcmp(str, "post")) { 3047 /* exit out if host buffers are already posted */ 3048 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3049 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3050 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3051 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3052 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3053 goto out; 3054 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3055 pr_info(MPT3SAS_FMT "posting host trace buffers\n", 3056 ioc->name); 3057 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3058 diag_register.requested_buffer_size = (1024 * 1024); 3059 diag_register.unique_id = 0x7075900; 3060 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3061 _ctl_diag_register_2(ioc, &diag_register); 3062 } else if (!strcmp(str, "release")) { 3063 /* exit out if host buffers are already released */ 3064 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3065 goto out; 3066 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3067 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3068 goto out; 3069 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3070 MPT3_DIAG_BUFFER_IS_RELEASED)) 3071 goto out; 3072 pr_info(MPT3SAS_FMT "releasing host trace buffer\n", 3073 ioc->name); 3074 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3075 &issue_reset); 3076 } 3077 3078 out: 3079 return strlen(buf); 3080 } 3081 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, 3082 _ctl_host_trace_buffer_enable_show, 3083 _ctl_host_trace_buffer_enable_store); 3084 3085 /*********** diagnostic trigger suppport *********************************/ 3086 3087 /** 3088 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute 3089 * @cdev - pointer to embedded class device 3090 * @buf - the buffer returned 3091 * 3092 * A sysfs 'read/write' shost attribute. 3093 */ 3094 static ssize_t 3095 _ctl_diag_trigger_master_show(struct device *cdev, 3096 struct device_attribute *attr, char *buf) 3097 3098 { 3099 struct Scsi_Host *shost = class_to_shost(cdev); 3100 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3101 unsigned long flags; 3102 ssize_t rc; 3103 3104 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3105 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3106 memcpy(buf, &ioc->diag_trigger_master, rc); 3107 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3108 return rc; 3109 } 3110 3111 /** 3112 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute 3113 * @cdev - pointer to embedded class device 3114 * @buf - the buffer returned 3115 * 3116 * A sysfs 'read/write' shost attribute. 3117 */ 3118 static ssize_t 3119 _ctl_diag_trigger_master_store(struct device *cdev, 3120 struct device_attribute *attr, const char *buf, size_t count) 3121 3122 { 3123 struct Scsi_Host *shost = class_to_shost(cdev); 3124 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3125 unsigned long flags; 3126 ssize_t rc; 3127 3128 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3129 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3130 memset(&ioc->diag_trigger_master, 0, 3131 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3132 memcpy(&ioc->diag_trigger_master, buf, rc); 3133 ioc->diag_trigger_master.MasterData |= 3134 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3135 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3136 return rc; 3137 } 3138 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, 3139 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); 3140 3141 3142 /** 3143 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute 3144 * @cdev - pointer to embedded class device 3145 * @buf - the buffer returned 3146 * 3147 * A sysfs 'read/write' shost attribute. 3148 */ 3149 static ssize_t 3150 _ctl_diag_trigger_event_show(struct device *cdev, 3151 struct device_attribute *attr, char *buf) 3152 { 3153 struct Scsi_Host *shost = class_to_shost(cdev); 3154 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3155 unsigned long flags; 3156 ssize_t rc; 3157 3158 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3159 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3160 memcpy(buf, &ioc->diag_trigger_event, rc); 3161 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3162 return rc; 3163 } 3164 3165 /** 3166 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute 3167 * @cdev - pointer to embedded class device 3168 * @buf - the buffer returned 3169 * 3170 * A sysfs 'read/write' shost attribute. 3171 */ 3172 static ssize_t 3173 _ctl_diag_trigger_event_store(struct device *cdev, 3174 struct device_attribute *attr, const char *buf, size_t count) 3175 3176 { 3177 struct Scsi_Host *shost = class_to_shost(cdev); 3178 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3179 unsigned long flags; 3180 ssize_t sz; 3181 3182 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3183 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3184 memset(&ioc->diag_trigger_event, 0, 3185 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3186 memcpy(&ioc->diag_trigger_event, buf, sz); 3187 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3188 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3189 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3190 return sz; 3191 } 3192 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, 3193 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); 3194 3195 3196 /** 3197 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3198 * @cdev - pointer to embedded class device 3199 * @buf - the buffer returned 3200 * 3201 * A sysfs 'read/write' shost attribute. 3202 */ 3203 static ssize_t 3204 _ctl_diag_trigger_scsi_show(struct device *cdev, 3205 struct device_attribute *attr, char *buf) 3206 { 3207 struct Scsi_Host *shost = class_to_shost(cdev); 3208 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3209 unsigned long flags; 3210 ssize_t rc; 3211 3212 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3213 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 3214 memcpy(buf, &ioc->diag_trigger_scsi, rc); 3215 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3216 return rc; 3217 } 3218 3219 /** 3220 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute 3221 * @cdev - pointer to embedded class device 3222 * @buf - the buffer returned 3223 * 3224 * A sysfs 'read/write' shost attribute. 3225 */ 3226 static ssize_t 3227 _ctl_diag_trigger_scsi_store(struct device *cdev, 3228 struct device_attribute *attr, const char *buf, size_t count) 3229 { 3230 struct Scsi_Host *shost = class_to_shost(cdev); 3231 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3232 unsigned long flags; 3233 ssize_t sz; 3234 3235 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3236 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 3237 memset(&ioc->diag_trigger_scsi, 0, 3238 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3239 memcpy(&ioc->diag_trigger_scsi, buf, sz); 3240 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 3241 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 3242 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3243 return sz; 3244 } 3245 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, 3246 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); 3247 3248 3249 /** 3250 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute 3251 * @cdev - pointer to embedded class device 3252 * @buf - the buffer returned 3253 * 3254 * A sysfs 'read/write' shost attribute. 3255 */ 3256 static ssize_t 3257 _ctl_diag_trigger_mpi_show(struct device *cdev, 3258 struct device_attribute *attr, char *buf) 3259 { 3260 struct Scsi_Host *shost = class_to_shost(cdev); 3261 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3262 unsigned long flags; 3263 ssize_t rc; 3264 3265 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3266 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 3267 memcpy(buf, &ioc->diag_trigger_mpi, rc); 3268 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3269 return rc; 3270 } 3271 3272 /** 3273 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute 3274 * @cdev - pointer to embedded class device 3275 * @buf - the buffer returned 3276 * 3277 * A sysfs 'read/write' shost attribute. 3278 */ 3279 static ssize_t 3280 _ctl_diag_trigger_mpi_store(struct device *cdev, 3281 struct device_attribute *attr, const char *buf, size_t count) 3282 { 3283 struct Scsi_Host *shost = class_to_shost(cdev); 3284 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3285 unsigned long flags; 3286 ssize_t sz; 3287 3288 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3289 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 3290 memset(&ioc->diag_trigger_mpi, 0, 3291 sizeof(ioc->diag_trigger_mpi)); 3292 memcpy(&ioc->diag_trigger_mpi, buf, sz); 3293 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 3294 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 3295 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3296 return sz; 3297 } 3298 3299 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, 3300 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); 3301 3302 /*********** diagnostic trigger suppport *** END ****************************/ 3303 3304 3305 3306 /*****************************************/ 3307 3308 struct device_attribute *mpt3sas_host_attrs[] = { 3309 &dev_attr_version_fw, 3310 &dev_attr_version_bios, 3311 &dev_attr_version_mpi, 3312 &dev_attr_version_product, 3313 &dev_attr_version_nvdata_persistent, 3314 &dev_attr_version_nvdata_default, 3315 &dev_attr_board_name, 3316 &dev_attr_board_assembly, 3317 &dev_attr_board_tracer, 3318 &dev_attr_io_delay, 3319 &dev_attr_device_delay, 3320 &dev_attr_logging_level, 3321 &dev_attr_fwfault_debug, 3322 &dev_attr_fw_queue_depth, 3323 &dev_attr_host_sas_address, 3324 &dev_attr_ioc_reset_count, 3325 &dev_attr_host_trace_buffer_size, 3326 &dev_attr_host_trace_buffer, 3327 &dev_attr_host_trace_buffer_enable, 3328 &dev_attr_reply_queue_count, 3329 &dev_attr_diag_trigger_master, 3330 &dev_attr_diag_trigger_event, 3331 &dev_attr_diag_trigger_scsi, 3332 &dev_attr_diag_trigger_mpi, 3333 &dev_attr_BRM_status, 3334 NULL, 3335 }; 3336 3337 /* device attributes */ 3338 3339 /** 3340 * _ctl_device_sas_address_show - sas address 3341 * @cdev - pointer to embedded class device 3342 * @buf - the buffer returned 3343 * 3344 * This is the sas address for the target 3345 * 3346 * A sysfs 'read-only' shost attribute. 3347 */ 3348 static ssize_t 3349 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, 3350 char *buf) 3351 { 3352 struct scsi_device *sdev = to_scsi_device(dev); 3353 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3354 3355 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3356 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 3357 } 3358 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); 3359 3360 /** 3361 * _ctl_device_handle_show - device handle 3362 * @cdev - pointer to embedded class device 3363 * @buf - the buffer returned 3364 * 3365 * This is the firmware assigned device handle 3366 * 3367 * A sysfs 'read-only' shost attribute. 3368 */ 3369 static ssize_t 3370 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr, 3371 char *buf) 3372 { 3373 struct scsi_device *sdev = to_scsi_device(dev); 3374 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 3375 3376 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 3377 sas_device_priv_data->sas_target->handle); 3378 } 3379 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); 3380 3381 struct device_attribute *mpt3sas_dev_attrs[] = { 3382 &dev_attr_sas_address, 3383 &dev_attr_sas_device_handle, 3384 NULL, 3385 }; 3386 3387 /* file operations table for mpt3ctl device */ 3388 static const struct file_operations ctl_fops = { 3389 .owner = THIS_MODULE, 3390 .unlocked_ioctl = _ctl_ioctl, 3391 .poll = _ctl_poll, 3392 .fasync = _ctl_fasync, 3393 #ifdef CONFIG_COMPAT 3394 .compat_ioctl = _ctl_ioctl_compat, 3395 #endif 3396 }; 3397 3398 /* file operations table for mpt2ctl device */ 3399 static const struct file_operations ctl_gen2_fops = { 3400 .owner = THIS_MODULE, 3401 .unlocked_ioctl = _ctl_mpt2_ioctl, 3402 .poll = _ctl_poll, 3403 .fasync = _ctl_fasync, 3404 #ifdef CONFIG_COMPAT 3405 .compat_ioctl = _ctl_mpt2_ioctl_compat, 3406 #endif 3407 }; 3408 3409 static struct miscdevice ctl_dev = { 3410 .minor = MPT3SAS_MINOR, 3411 .name = MPT3SAS_DEV_NAME, 3412 .fops = &ctl_fops, 3413 }; 3414 3415 static struct miscdevice gen2_ctl_dev = { 3416 .minor = MPT2SAS_MINOR, 3417 .name = MPT2SAS_DEV_NAME, 3418 .fops = &ctl_gen2_fops, 3419 }; 3420 3421 /** 3422 * mpt3sas_ctl_init - main entry point for ctl. 3423 * 3424 */ 3425 void 3426 mpt3sas_ctl_init(ushort hbas_to_enumerate) 3427 { 3428 async_queue = NULL; 3429 3430 /* Don't register mpt3ctl ioctl device if 3431 * hbas_to_enumarate is one. 3432 */ 3433 if (hbas_to_enumerate != 1) 3434 if (misc_register(&ctl_dev) < 0) 3435 pr_err("%s can't register misc device [minor=%d]\n", 3436 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 3437 3438 /* Don't register mpt3ctl ioctl device if 3439 * hbas_to_enumarate is two. 3440 */ 3441 if (hbas_to_enumerate != 2) 3442 if (misc_register(&gen2_ctl_dev) < 0) 3443 pr_err("%s can't register misc device [minor=%d]\n", 3444 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 3445 3446 init_waitqueue_head(&ctl_poll_wait); 3447 } 3448 3449 /** 3450 * mpt3sas_ctl_exit - exit point for ctl 3451 * 3452 */ 3453 void 3454 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 3455 { 3456 struct MPT3SAS_ADAPTER *ioc; 3457 int i; 3458 3459 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 3460 3461 /* free memory associated to diag buffers */ 3462 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 3463 if (!ioc->diag_buffer[i]) 3464 continue; 3465 if (!(ioc->diag_buffer_status[i] & 3466 MPT3_DIAG_BUFFER_IS_REGISTERED)) 3467 continue; 3468 if ((ioc->diag_buffer_status[i] & 3469 MPT3_DIAG_BUFFER_IS_RELEASED)) 3470 continue; 3471 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], 3472 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); 3473 ioc->diag_buffer[i] = NULL; 3474 ioc->diag_buffer_status[i] = 0; 3475 } 3476 3477 kfree(ioc->event_log); 3478 } 3479 if (hbas_to_enumerate != 1) 3480 misc_deregister(&ctl_dev); 3481 if (hbas_to_enumerate != 2) 3482 misc_deregister(&gen2_ctl_dev); 3483 } 3484