1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microsemi PQI-based storage controllers 4 * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/blk-mq-pci.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_eh.h> 27 #include <scsi/scsi_transport_sas.h> 28 #include <asm/unaligned.h> 29 #include "smartpqi.h" 30 #include "smartpqi_sis.h" 31 32 #if !defined(BUILD_TIMESTAMP) 33 #define BUILD_TIMESTAMP 34 #endif 35 36 #define DRIVER_VERSION "1.2.10-025" 37 #define DRIVER_MAJOR 1 38 #define DRIVER_MINOR 2 39 #define DRIVER_RELEASE 10 40 #define DRIVER_REVISION 25 41 42 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 43 DRIVER_VERSION BUILD_TIMESTAMP ")" 44 #define DRIVER_NAME_SHORT "smartpqi" 45 46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 47 48 MODULE_AUTHOR("Microsemi"); 49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 50 DRIVER_VERSION); 51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 52 MODULE_VERSION(DRIVER_VERSION); 53 MODULE_LICENSE("GPL"); 54 55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 56 static void pqi_ctrl_offline_worker(struct work_struct *work); 57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 59 static void pqi_scan_start(struct Scsi_Host *shost); 60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 61 struct pqi_queue_group *queue_group, enum pqi_io_path path, 62 struct pqi_io_request *io_request); 63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 64 struct pqi_iu_header *request, unsigned int flags, 65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 68 unsigned int cdb_length, struct pqi_queue_group *queue_group, 69 struct pqi_encryption_info *encryption_info, bool raid_bypass); 70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 74 u32 bytes_requested); 75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 78 struct pqi_scsi_dev *device, unsigned long timeout_secs); 79 80 /* for flags argument to pqi_submit_raid_request_synchronous() */ 81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 82 83 static struct scsi_transport_template *pqi_sas_transport_template; 84 85 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 86 87 enum pqi_lockup_action { 88 NONE, 89 REBOOT, 90 PANIC 91 }; 92 93 static enum pqi_lockup_action pqi_lockup_action = NONE; 94 95 static struct { 96 enum pqi_lockup_action action; 97 char *name; 98 } pqi_lockup_actions[] = { 99 { 100 .action = NONE, 101 .name = "none", 102 }, 103 { 104 .action = REBOOT, 105 .name = "reboot", 106 }, 107 { 108 .action = PANIC, 109 .name = "panic", 110 }, 111 }; 112 113 static unsigned int pqi_supported_event_types[] = { 114 PQI_EVENT_TYPE_HOTPLUG, 115 PQI_EVENT_TYPE_HARDWARE, 116 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 117 PQI_EVENT_TYPE_LOGICAL_DEVICE, 118 PQI_EVENT_TYPE_OFA, 119 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 121 }; 122 123 static int pqi_disable_device_id_wildcards; 124 module_param_named(disable_device_id_wildcards, 125 pqi_disable_device_id_wildcards, int, 0644); 126 MODULE_PARM_DESC(disable_device_id_wildcards, 127 "Disable device ID wildcards."); 128 129 static int pqi_disable_heartbeat; 130 module_param_named(disable_heartbeat, 131 pqi_disable_heartbeat, int, 0644); 132 MODULE_PARM_DESC(disable_heartbeat, 133 "Disable heartbeat."); 134 135 static int pqi_disable_ctrl_shutdown; 136 module_param_named(disable_ctrl_shutdown, 137 pqi_disable_ctrl_shutdown, int, 0644); 138 MODULE_PARM_DESC(disable_ctrl_shutdown, 139 "Disable controller shutdown when controller locked up."); 140 141 static char *pqi_lockup_action_param; 142 module_param_named(lockup_action, 143 pqi_lockup_action_param, charp, 0644); 144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 145 "\t\tSupported: none, reboot, panic\n" 146 "\t\tDefault: none"); 147 148 static int pqi_expose_ld_first; 149 module_param_named(expose_ld_first, 150 pqi_expose_ld_first, int, 0644); 151 MODULE_PARM_DESC(expose_ld_first, 152 "Expose logical drives before physical drives."); 153 154 static int pqi_hide_vsep; 155 module_param_named(hide_vsep, 156 pqi_hide_vsep, int, 0644); 157 MODULE_PARM_DESC(hide_vsep, 158 "Hide the virtual SEP for direct attached drives."); 159 160 static char *raid_levels[] = { 161 "RAID-0", 162 "RAID-4", 163 "RAID-1(1+0)", 164 "RAID-5", 165 "RAID-5+1", 166 "RAID-ADG", 167 "RAID-1(ADM)", 168 }; 169 170 static char *pqi_raid_level_to_string(u8 raid_level) 171 { 172 if (raid_level < ARRAY_SIZE(raid_levels)) 173 return raid_levels[raid_level]; 174 175 return "RAID UNKNOWN"; 176 } 177 178 #define SA_RAID_0 0 179 #define SA_RAID_4 1 180 #define SA_RAID_1 2 /* also used for RAID 10 */ 181 #define SA_RAID_5 3 /* also used for RAID 50 */ 182 #define SA_RAID_51 4 183 #define SA_RAID_6 5 /* also used for RAID 60 */ 184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 185 #define SA_RAID_MAX SA_RAID_ADM 186 #define SA_RAID_UNKNOWN 0xff 187 188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 189 { 190 pqi_prep_for_scsi_done(scmd); 191 scmd->scsi_done(scmd); 192 } 193 194 static inline void pqi_disable_write_same(struct scsi_device *sdev) 195 { 196 sdev->no_write_same = 1; 197 } 198 199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 200 { 201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 202 } 203 204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 205 { 206 return !device->is_physical_device; 207 } 208 209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 210 { 211 return scsi3addr[2] != 0; 212 } 213 214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 215 { 216 return !ctrl_info->controller_online; 217 } 218 219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 220 { 221 if (ctrl_info->controller_online) 222 if (!sis_is_firmware_running(ctrl_info)) 223 pqi_take_ctrl_offline(ctrl_info); 224 } 225 226 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 227 { 228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 229 } 230 231 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 232 struct pqi_ctrl_info *ctrl_info) 233 { 234 return sis_read_driver_scratch(ctrl_info); 235 } 236 237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 238 enum pqi_ctrl_mode mode) 239 { 240 sis_write_driver_scratch(ctrl_info, mode); 241 } 242 243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 244 { 245 ctrl_info->block_device_reset = true; 246 } 247 248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) 249 { 250 return ctrl_info->block_device_reset; 251 } 252 253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 254 { 255 return ctrl_info->block_requests; 256 } 257 258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 259 { 260 ctrl_info->block_requests = true; 261 scsi_block_requests(ctrl_info->scsi_host); 262 } 263 264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 265 { 266 ctrl_info->block_requests = false; 267 wake_up_all(&ctrl_info->block_requests_wait); 268 pqi_retry_raid_bypass_requests(ctrl_info); 269 scsi_unblock_requests(ctrl_info->scsi_host); 270 } 271 272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 273 unsigned long timeout_msecs) 274 { 275 unsigned long remaining_msecs; 276 277 if (!pqi_ctrl_blocked(ctrl_info)) 278 return timeout_msecs; 279 280 atomic_inc(&ctrl_info->num_blocked_threads); 281 282 if (timeout_msecs == NO_TIMEOUT) { 283 wait_event(ctrl_info->block_requests_wait, 284 !pqi_ctrl_blocked(ctrl_info)); 285 remaining_msecs = timeout_msecs; 286 } else { 287 unsigned long remaining_jiffies; 288 289 remaining_jiffies = 290 wait_event_timeout(ctrl_info->block_requests_wait, 291 !pqi_ctrl_blocked(ctrl_info), 292 msecs_to_jiffies(timeout_msecs)); 293 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 294 } 295 296 atomic_dec(&ctrl_info->num_blocked_threads); 297 298 return remaining_msecs; 299 } 300 301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 302 { 303 while (atomic_read(&ctrl_info->num_busy_threads) > 304 atomic_read(&ctrl_info->num_blocked_threads)) 305 usleep_range(1000, 2000); 306 } 307 308 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 309 { 310 return device->device_offline; 311 } 312 313 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 314 { 315 device->in_reset = true; 316 } 317 318 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 319 { 320 device->in_reset = false; 321 } 322 323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 324 { 325 return device->in_reset; 326 } 327 328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 329 { 330 ctrl_info->in_ofa = true; 331 } 332 333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 334 { 335 ctrl_info->in_ofa = false; 336 } 337 338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) 339 { 340 return ctrl_info->in_ofa; 341 } 342 343 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 344 { 345 device->in_remove = true; 346 } 347 348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 349 struct pqi_scsi_dev *device) 350 { 351 return device->in_remove && !ctrl_info->in_shutdown; 352 } 353 354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) 355 { 356 ctrl_info->in_shutdown = true; 357 } 358 359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) 360 { 361 return ctrl_info->in_shutdown; 362 } 363 364 static inline void pqi_schedule_rescan_worker_with_delay( 365 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 366 { 367 if (pqi_ctrl_offline(ctrl_info)) 368 return; 369 if (pqi_ctrl_in_ofa(ctrl_info)) 370 return; 371 372 schedule_delayed_work(&ctrl_info->rescan_work, delay); 373 } 374 375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 376 { 377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 378 } 379 380 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) 381 382 static inline void pqi_schedule_rescan_worker_delayed( 383 struct pqi_ctrl_info *ctrl_info) 384 { 385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 386 } 387 388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 389 { 390 cancel_delayed_work_sync(&ctrl_info->rescan_work); 391 } 392 393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) 394 { 395 cancel_work_sync(&ctrl_info->event_work); 396 } 397 398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 399 { 400 if (!ctrl_info->heartbeat_counter) 401 return 0; 402 403 return readl(ctrl_info->heartbeat_counter); 404 } 405 406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 407 { 408 if (!ctrl_info->soft_reset_status) 409 return 0; 410 411 return readb(ctrl_info->soft_reset_status); 412 } 413 414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, 415 u8 clear) 416 { 417 u8 status; 418 419 if (!ctrl_info->soft_reset_status) 420 return; 421 422 status = pqi_read_soft_reset_status(ctrl_info); 423 status &= ~clear; 424 writeb(status, ctrl_info->soft_reset_status); 425 } 426 427 static int pqi_map_single(struct pci_dev *pci_dev, 428 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 429 size_t buffer_length, enum dma_data_direction data_direction) 430 { 431 dma_addr_t bus_address; 432 433 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 434 return 0; 435 436 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 437 data_direction); 438 if (dma_mapping_error(&pci_dev->dev, bus_address)) 439 return -ENOMEM; 440 441 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 442 put_unaligned_le32(buffer_length, &sg_descriptor->length); 443 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 444 445 return 0; 446 } 447 448 static void pqi_pci_unmap(struct pci_dev *pci_dev, 449 struct pqi_sg_descriptor *descriptors, int num_descriptors, 450 enum dma_data_direction data_direction) 451 { 452 int i; 453 454 if (data_direction == DMA_NONE) 455 return; 456 457 for (i = 0; i < num_descriptors; i++) 458 dma_unmap_single(&pci_dev->dev, 459 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 460 get_unaligned_le32(&descriptors[i].length), 461 data_direction); 462 } 463 464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 465 struct pqi_raid_path_request *request, u8 cmd, 466 u8 *scsi3addr, void *buffer, size_t buffer_length, 467 u16 vpd_page, enum dma_data_direction *dir) 468 { 469 u8 *cdb; 470 size_t cdb_length = buffer_length; 471 472 memset(request, 0, sizeof(*request)); 473 474 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 475 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 476 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 477 &request->header.iu_length); 478 put_unaligned_le32(buffer_length, &request->buffer_length); 479 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 480 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 481 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 482 483 cdb = request->cdb; 484 485 switch (cmd) { 486 case INQUIRY: 487 request->data_direction = SOP_READ_FLAG; 488 cdb[0] = INQUIRY; 489 if (vpd_page & VPD_PAGE) { 490 cdb[1] = 0x1; 491 cdb[2] = (u8)vpd_page; 492 } 493 cdb[4] = (u8)cdb_length; 494 break; 495 case CISS_REPORT_LOG: 496 case CISS_REPORT_PHYS: 497 request->data_direction = SOP_READ_FLAG; 498 cdb[0] = cmd; 499 if (cmd == CISS_REPORT_PHYS) 500 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; 501 else 502 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 503 put_unaligned_be32(cdb_length, &cdb[6]); 504 break; 505 case CISS_GET_RAID_MAP: 506 request->data_direction = SOP_READ_FLAG; 507 cdb[0] = CISS_READ; 508 cdb[1] = CISS_GET_RAID_MAP; 509 put_unaligned_be32(cdb_length, &cdb[6]); 510 break; 511 case SA_FLUSH_CACHE: 512 request->data_direction = SOP_WRITE_FLAG; 513 cdb[0] = BMIC_WRITE; 514 cdb[6] = BMIC_FLUSH_CACHE; 515 put_unaligned_be16(cdb_length, &cdb[7]); 516 break; 517 case BMIC_SENSE_DIAG_OPTIONS: 518 cdb_length = 0; 519 fallthrough; 520 case BMIC_IDENTIFY_CONTROLLER: 521 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 522 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 523 request->data_direction = SOP_READ_FLAG; 524 cdb[0] = BMIC_READ; 525 cdb[6] = cmd; 526 put_unaligned_be16(cdb_length, &cdb[7]); 527 break; 528 case BMIC_SET_DIAG_OPTIONS: 529 cdb_length = 0; 530 fallthrough; 531 case BMIC_WRITE_HOST_WELLNESS: 532 request->data_direction = SOP_WRITE_FLAG; 533 cdb[0] = BMIC_WRITE; 534 cdb[6] = cmd; 535 put_unaligned_be16(cdb_length, &cdb[7]); 536 break; 537 case BMIC_CSMI_PASSTHRU: 538 request->data_direction = SOP_BIDIRECTIONAL; 539 cdb[0] = BMIC_WRITE; 540 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 541 cdb[6] = cmd; 542 put_unaligned_be16(cdb_length, &cdb[7]); 543 break; 544 default: 545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 546 cmd); 547 break; 548 } 549 550 switch (request->data_direction) { 551 case SOP_READ_FLAG: 552 *dir = DMA_FROM_DEVICE; 553 break; 554 case SOP_WRITE_FLAG: 555 *dir = DMA_TO_DEVICE; 556 break; 557 case SOP_NO_DIRECTION_FLAG: 558 *dir = DMA_NONE; 559 break; 560 default: 561 *dir = DMA_BIDIRECTIONAL; 562 break; 563 } 564 565 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 566 buffer, buffer_length, *dir); 567 } 568 569 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 570 { 571 io_request->scmd = NULL; 572 io_request->status = 0; 573 io_request->error_info = NULL; 574 io_request->raid_bypass = false; 575 } 576 577 static struct pqi_io_request *pqi_alloc_io_request( 578 struct pqi_ctrl_info *ctrl_info) 579 { 580 struct pqi_io_request *io_request; 581 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 582 583 while (1) { 584 io_request = &ctrl_info->io_request_pool[i]; 585 if (atomic_inc_return(&io_request->refcount) == 1) 586 break; 587 atomic_dec(&io_request->refcount); 588 i = (i + 1) % ctrl_info->max_io_slots; 589 } 590 591 /* benignly racy */ 592 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 593 594 pqi_reinit_io_request(io_request); 595 596 return io_request; 597 } 598 599 static void pqi_free_io_request(struct pqi_io_request *io_request) 600 { 601 atomic_dec(&io_request->refcount); 602 } 603 604 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 605 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 606 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 607 { 608 int rc; 609 struct pqi_raid_path_request request; 610 enum dma_data_direction dir; 611 612 rc = pqi_build_raid_path_request(ctrl_info, &request, 613 cmd, scsi3addr, buffer, 614 buffer_length, vpd_page, &dir); 615 if (rc) 616 return rc; 617 618 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 619 error_info, timeout_msecs); 620 621 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 622 623 return rc; 624 } 625 626 /* helper functions for pqi_send_scsi_raid_request */ 627 628 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 629 u8 cmd, void *buffer, size_t buffer_length) 630 { 631 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 632 buffer, buffer_length, 0, NULL, NO_TIMEOUT); 633 } 634 635 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 636 u8 cmd, void *buffer, size_t buffer_length, 637 struct pqi_raid_error_info *error_info) 638 { 639 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 640 buffer, buffer_length, 0, error_info, NO_TIMEOUT); 641 } 642 643 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 644 struct bmic_identify_controller *buffer) 645 { 646 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 647 buffer, sizeof(*buffer)); 648 } 649 650 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 651 struct bmic_sense_subsystem_info *sense_info) 652 { 653 return pqi_send_ctrl_raid_request(ctrl_info, 654 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 655 sizeof(*sense_info)); 656 } 657 658 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 659 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 660 { 661 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 662 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); 663 } 664 665 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 666 struct pqi_scsi_dev *device, 667 struct bmic_identify_physical_device *buffer, size_t buffer_length) 668 { 669 int rc; 670 enum dma_data_direction dir; 671 u16 bmic_device_index; 672 struct pqi_raid_path_request request; 673 674 rc = pqi_build_raid_path_request(ctrl_info, &request, 675 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 676 buffer_length, 0, &dir); 677 if (rc) 678 return rc; 679 680 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 681 request.cdb[2] = (u8)bmic_device_index; 682 request.cdb[9] = (u8)(bmic_device_index >> 8); 683 684 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 685 0, NULL, NO_TIMEOUT); 686 687 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 688 689 return rc; 690 } 691 692 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 693 enum bmic_flush_cache_shutdown_event shutdown_event) 694 { 695 int rc; 696 struct bmic_flush_cache *flush_cache; 697 698 /* 699 * Don't bother trying to flush the cache if the controller is 700 * locked up. 701 */ 702 if (pqi_ctrl_offline(ctrl_info)) 703 return -ENXIO; 704 705 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 706 if (!flush_cache) 707 return -ENOMEM; 708 709 flush_cache->shutdown_event = shutdown_event; 710 711 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 712 sizeof(*flush_cache)); 713 714 kfree(flush_cache); 715 716 return rc; 717 } 718 719 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 720 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 721 struct pqi_raid_error_info *error_info) 722 { 723 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 724 buffer, buffer_length, error_info); 725 } 726 727 #define PQI_FETCH_PTRAID_DATA (1 << 31) 728 729 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 730 { 731 int rc; 732 struct bmic_diag_options *diag; 733 734 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 735 if (!diag) 736 return -ENOMEM; 737 738 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 739 diag, sizeof(*diag)); 740 if (rc) 741 goto out; 742 743 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 744 745 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 746 sizeof(*diag)); 747 748 out: 749 kfree(diag); 750 751 return rc; 752 } 753 754 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 755 void *buffer, size_t buffer_length) 756 { 757 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 758 buffer, buffer_length); 759 } 760 761 #pragma pack(1) 762 763 struct bmic_host_wellness_driver_version { 764 u8 start_tag[4]; 765 u8 driver_version_tag[2]; 766 __le16 driver_version_length; 767 char driver_version[32]; 768 u8 dont_write_tag[2]; 769 u8 end_tag[2]; 770 }; 771 772 #pragma pack() 773 774 static int pqi_write_driver_version_to_host_wellness( 775 struct pqi_ctrl_info *ctrl_info) 776 { 777 int rc; 778 struct bmic_host_wellness_driver_version *buffer; 779 size_t buffer_length; 780 781 buffer_length = sizeof(*buffer); 782 783 buffer = kmalloc(buffer_length, GFP_KERNEL); 784 if (!buffer) 785 return -ENOMEM; 786 787 buffer->start_tag[0] = '<'; 788 buffer->start_tag[1] = 'H'; 789 buffer->start_tag[2] = 'W'; 790 buffer->start_tag[3] = '>'; 791 buffer->driver_version_tag[0] = 'D'; 792 buffer->driver_version_tag[1] = 'V'; 793 put_unaligned_le16(sizeof(buffer->driver_version), 794 &buffer->driver_version_length); 795 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 796 sizeof(buffer->driver_version) - 1); 797 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 798 buffer->dont_write_tag[0] = 'D'; 799 buffer->dont_write_tag[1] = 'W'; 800 buffer->end_tag[0] = 'Z'; 801 buffer->end_tag[1] = 'Z'; 802 803 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 804 805 kfree(buffer); 806 807 return rc; 808 } 809 810 #pragma pack(1) 811 812 struct bmic_host_wellness_time { 813 u8 start_tag[4]; 814 u8 time_tag[2]; 815 __le16 time_length; 816 u8 time[8]; 817 u8 dont_write_tag[2]; 818 u8 end_tag[2]; 819 }; 820 821 #pragma pack() 822 823 static int pqi_write_current_time_to_host_wellness( 824 struct pqi_ctrl_info *ctrl_info) 825 { 826 int rc; 827 struct bmic_host_wellness_time *buffer; 828 size_t buffer_length; 829 time64_t local_time; 830 unsigned int year; 831 struct tm tm; 832 833 buffer_length = sizeof(*buffer); 834 835 buffer = kmalloc(buffer_length, GFP_KERNEL); 836 if (!buffer) 837 return -ENOMEM; 838 839 buffer->start_tag[0] = '<'; 840 buffer->start_tag[1] = 'H'; 841 buffer->start_tag[2] = 'W'; 842 buffer->start_tag[3] = '>'; 843 buffer->time_tag[0] = 'T'; 844 buffer->time_tag[1] = 'D'; 845 put_unaligned_le16(sizeof(buffer->time), 846 &buffer->time_length); 847 848 local_time = ktime_get_real_seconds(); 849 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 850 year = tm.tm_year + 1900; 851 852 buffer->time[0] = bin2bcd(tm.tm_hour); 853 buffer->time[1] = bin2bcd(tm.tm_min); 854 buffer->time[2] = bin2bcd(tm.tm_sec); 855 buffer->time[3] = 0; 856 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 857 buffer->time[5] = bin2bcd(tm.tm_mday); 858 buffer->time[6] = bin2bcd(year / 100); 859 buffer->time[7] = bin2bcd(year % 100); 860 861 buffer->dont_write_tag[0] = 'D'; 862 buffer->dont_write_tag[1] = 'W'; 863 buffer->end_tag[0] = 'Z'; 864 buffer->end_tag[1] = 'Z'; 865 866 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 867 868 kfree(buffer); 869 870 return rc; 871 } 872 873 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) 874 875 static void pqi_update_time_worker(struct work_struct *work) 876 { 877 int rc; 878 struct pqi_ctrl_info *ctrl_info; 879 880 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 881 update_time_work); 882 883 if (pqi_ctrl_offline(ctrl_info)) 884 return; 885 886 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 887 if (rc) 888 dev_warn(&ctrl_info->pci_dev->dev, 889 "error updating time on controller\n"); 890 891 schedule_delayed_work(&ctrl_info->update_time_work, 892 PQI_UPDATE_TIME_WORK_INTERVAL); 893 } 894 895 static inline void pqi_schedule_update_time_worker( 896 struct pqi_ctrl_info *ctrl_info) 897 { 898 schedule_delayed_work(&ctrl_info->update_time_work, 0); 899 } 900 901 static inline void pqi_cancel_update_time_worker( 902 struct pqi_ctrl_info *ctrl_info) 903 { 904 cancel_delayed_work_sync(&ctrl_info->update_time_work); 905 } 906 907 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 908 void *buffer, size_t buffer_length) 909 { 910 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, 911 buffer_length); 912 } 913 914 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 915 void **buffer) 916 { 917 int rc; 918 size_t lun_list_length; 919 size_t lun_data_length; 920 size_t new_lun_list_length; 921 void *lun_data = NULL; 922 struct report_lun_header *report_lun_header; 923 924 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 925 if (!report_lun_header) { 926 rc = -ENOMEM; 927 goto out; 928 } 929 930 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 931 sizeof(*report_lun_header)); 932 if (rc) 933 goto out; 934 935 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 936 937 again: 938 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 939 940 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 941 if (!lun_data) { 942 rc = -ENOMEM; 943 goto out; 944 } 945 946 if (lun_list_length == 0) { 947 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 948 goto out; 949 } 950 951 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 952 if (rc) 953 goto out; 954 955 new_lun_list_length = get_unaligned_be32( 956 &((struct report_lun_header *)lun_data)->list_length); 957 958 if (new_lun_list_length > lun_list_length) { 959 lun_list_length = new_lun_list_length; 960 kfree(lun_data); 961 goto again; 962 } 963 964 out: 965 kfree(report_lun_header); 966 967 if (rc) { 968 kfree(lun_data); 969 lun_data = NULL; 970 } 971 972 *buffer = lun_data; 973 974 return rc; 975 } 976 977 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 978 void **buffer) 979 { 980 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 981 buffer); 982 } 983 984 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 985 void **buffer) 986 { 987 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 988 } 989 990 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 991 struct report_phys_lun_extended **physdev_list, 992 struct report_log_lun_extended **logdev_list) 993 { 994 int rc; 995 size_t logdev_list_length; 996 size_t logdev_data_length; 997 struct report_log_lun_extended *internal_logdev_list; 998 struct report_log_lun_extended *logdev_data; 999 struct report_lun_header report_lun_header; 1000 1001 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1002 if (rc) 1003 dev_err(&ctrl_info->pci_dev->dev, 1004 "report physical LUNs failed\n"); 1005 1006 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1007 if (rc) 1008 dev_err(&ctrl_info->pci_dev->dev, 1009 "report logical LUNs failed\n"); 1010 1011 /* 1012 * Tack the controller itself onto the end of the logical device list. 1013 */ 1014 1015 logdev_data = *logdev_list; 1016 1017 if (logdev_data) { 1018 logdev_list_length = 1019 get_unaligned_be32(&logdev_data->header.list_length); 1020 } else { 1021 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1022 logdev_data = 1023 (struct report_log_lun_extended *)&report_lun_header; 1024 logdev_list_length = 0; 1025 } 1026 1027 logdev_data_length = sizeof(struct report_lun_header) + 1028 logdev_list_length; 1029 1030 internal_logdev_list = kmalloc(logdev_data_length + 1031 sizeof(struct report_log_lun_extended), GFP_KERNEL); 1032 if (!internal_logdev_list) { 1033 kfree(*logdev_list); 1034 *logdev_list = NULL; 1035 return -ENOMEM; 1036 } 1037 1038 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1039 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1040 sizeof(struct report_log_lun_extended_entry)); 1041 put_unaligned_be32(logdev_list_length + 1042 sizeof(struct report_log_lun_extended_entry), 1043 &internal_logdev_list->header.list_length); 1044 1045 kfree(*logdev_list); 1046 *logdev_list = internal_logdev_list; 1047 1048 return 0; 1049 } 1050 1051 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1052 int bus, int target, int lun) 1053 { 1054 device->bus = bus; 1055 device->target = target; 1056 device->lun = lun; 1057 } 1058 1059 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1060 { 1061 u8 *scsi3addr; 1062 u32 lunid; 1063 int bus; 1064 int target; 1065 int lun; 1066 1067 scsi3addr = device->scsi3addr; 1068 lunid = get_unaligned_le32(scsi3addr); 1069 1070 if (pqi_is_hba_lunid(scsi3addr)) { 1071 /* The specified device is the controller. */ 1072 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1073 device->target_lun_valid = true; 1074 return; 1075 } 1076 1077 if (pqi_is_logical_device(device)) { 1078 if (device->is_external_raid_device) { 1079 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1080 target = (lunid >> 16) & 0x3fff; 1081 lun = lunid & 0xff; 1082 } else { 1083 bus = PQI_RAID_VOLUME_BUS; 1084 target = 0; 1085 lun = lunid & 0x3fff; 1086 } 1087 pqi_set_bus_target_lun(device, bus, target, lun); 1088 device->target_lun_valid = true; 1089 return; 1090 } 1091 1092 /* 1093 * Defer target and LUN assignment for non-controller physical devices 1094 * because the SAS transport layer will make these assignments later. 1095 */ 1096 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1097 } 1098 1099 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1100 struct pqi_scsi_dev *device) 1101 { 1102 int rc; 1103 u8 raid_level; 1104 u8 *buffer; 1105 1106 raid_level = SA_RAID_UNKNOWN; 1107 1108 buffer = kmalloc(64, GFP_KERNEL); 1109 if (buffer) { 1110 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1111 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1112 if (rc == 0) { 1113 raid_level = buffer[8]; 1114 if (raid_level > SA_RAID_MAX) 1115 raid_level = SA_RAID_UNKNOWN; 1116 } 1117 kfree(buffer); 1118 } 1119 1120 device->raid_level = raid_level; 1121 } 1122 1123 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1124 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1125 { 1126 char *err_msg; 1127 u32 raid_map_size; 1128 u32 r5or6_blocks_per_row; 1129 1130 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1131 1132 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1133 err_msg = "RAID map too small"; 1134 goto bad_raid_map; 1135 } 1136 1137 if (device->raid_level == SA_RAID_1) { 1138 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1139 err_msg = "invalid RAID-1 map"; 1140 goto bad_raid_map; 1141 } 1142 } else if (device->raid_level == SA_RAID_ADM) { 1143 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1144 err_msg = "invalid RAID-1(ADM) map"; 1145 goto bad_raid_map; 1146 } 1147 } else if ((device->raid_level == SA_RAID_5 || 1148 device->raid_level == SA_RAID_6) && 1149 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1150 /* RAID 50/60 */ 1151 r5or6_blocks_per_row = 1152 get_unaligned_le16(&raid_map->strip_size) * 1153 get_unaligned_le16(&raid_map->data_disks_per_row); 1154 if (r5or6_blocks_per_row == 0) { 1155 err_msg = "invalid RAID-5 or RAID-6 map"; 1156 goto bad_raid_map; 1157 } 1158 } 1159 1160 return 0; 1161 1162 bad_raid_map: 1163 dev_warn(&ctrl_info->pci_dev->dev, 1164 "logical device %08x%08x %s\n", 1165 *((u32 *)&device->scsi3addr), 1166 *((u32 *)&device->scsi3addr[4]), err_msg); 1167 1168 return -EINVAL; 1169 } 1170 1171 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1172 struct pqi_scsi_dev *device) 1173 { 1174 int rc; 1175 u32 raid_map_size; 1176 struct raid_map *raid_map; 1177 1178 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1179 if (!raid_map) 1180 return -ENOMEM; 1181 1182 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1183 device->scsi3addr, raid_map, sizeof(*raid_map), 1184 0, NULL, NO_TIMEOUT); 1185 1186 if (rc) 1187 goto error; 1188 1189 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1190 1191 if (raid_map_size > sizeof(*raid_map)) { 1192 1193 kfree(raid_map); 1194 1195 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1196 if (!raid_map) 1197 return -ENOMEM; 1198 1199 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1200 device->scsi3addr, raid_map, raid_map_size, 1201 0, NULL, NO_TIMEOUT); 1202 if (rc) 1203 goto error; 1204 1205 if (get_unaligned_le32(&raid_map->structure_size) 1206 != raid_map_size) { 1207 dev_warn(&ctrl_info->pci_dev->dev, 1208 "Requested %d bytes, received %d bytes", 1209 raid_map_size, 1210 get_unaligned_le32(&raid_map->structure_size)); 1211 goto error; 1212 } 1213 } 1214 1215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1216 if (rc) 1217 goto error; 1218 1219 device->raid_map = raid_map; 1220 1221 return 0; 1222 1223 error: 1224 kfree(raid_map); 1225 1226 return rc; 1227 } 1228 1229 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1230 struct pqi_scsi_dev *device) 1231 { 1232 int rc; 1233 u8 *buffer; 1234 u8 bypass_status; 1235 1236 buffer = kmalloc(64, GFP_KERNEL); 1237 if (!buffer) 1238 return; 1239 1240 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1241 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1242 if (rc) 1243 goto out; 1244 1245 #define RAID_BYPASS_STATUS 4 1246 #define RAID_BYPASS_CONFIGURED 0x1 1247 #define RAID_BYPASS_ENABLED 0x2 1248 1249 bypass_status = buffer[RAID_BYPASS_STATUS]; 1250 device->raid_bypass_configured = 1251 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1252 if (device->raid_bypass_configured && 1253 (bypass_status & RAID_BYPASS_ENABLED) && 1254 pqi_get_raid_map(ctrl_info, device) == 0) 1255 device->raid_bypass_enabled = true; 1256 1257 out: 1258 kfree(buffer); 1259 } 1260 1261 /* 1262 * Use vendor-specific VPD to determine online/offline status of a volume. 1263 */ 1264 1265 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1266 struct pqi_scsi_dev *device) 1267 { 1268 int rc; 1269 size_t page_length; 1270 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1271 bool volume_offline = true; 1272 u32 volume_flags; 1273 struct ciss_vpd_logical_volume_status *vpd; 1274 1275 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1276 if (!vpd) 1277 goto no_buffer; 1278 1279 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1280 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1281 if (rc) 1282 goto out; 1283 1284 if (vpd->page_code != CISS_VPD_LV_STATUS) 1285 goto out; 1286 1287 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1288 volume_status) + vpd->page_length; 1289 if (page_length < sizeof(*vpd)) 1290 goto out; 1291 1292 volume_status = vpd->volume_status; 1293 volume_flags = get_unaligned_be32(&vpd->flags); 1294 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1295 1296 out: 1297 kfree(vpd); 1298 no_buffer: 1299 device->volume_status = volume_status; 1300 device->volume_offline = volume_offline; 1301 } 1302 1303 #define PQI_INQUIRY_PAGE0_RETRIES 3 1304 1305 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1306 struct pqi_scsi_dev *device) 1307 { 1308 int rc; 1309 u8 *buffer; 1310 unsigned int retries; 1311 1312 if (device->is_expander_smp_device) 1313 return 0; 1314 1315 buffer = kmalloc(64, GFP_KERNEL); 1316 if (!buffer) 1317 return -ENOMEM; 1318 1319 /* Send an inquiry to the device to see what it is. */ 1320 for (retries = 0;;) { 1321 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, 1322 buffer, 64); 1323 if (rc == 0) 1324 break; 1325 if (pqi_is_logical_device(device) || 1326 rc != PQI_CMD_STATUS_ABORTED || 1327 ++retries > PQI_INQUIRY_PAGE0_RETRIES) 1328 goto out; 1329 } 1330 1331 scsi_sanitize_inquiry_string(&buffer[8], 8); 1332 scsi_sanitize_inquiry_string(&buffer[16], 16); 1333 1334 device->devtype = buffer[0] & 0x1f; 1335 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1336 memcpy(device->model, &buffer[16], sizeof(device->model)); 1337 1338 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1339 if (device->is_external_raid_device) { 1340 device->raid_level = SA_RAID_UNKNOWN; 1341 device->volume_status = CISS_LV_OK; 1342 device->volume_offline = false; 1343 } else { 1344 pqi_get_raid_level(ctrl_info, device); 1345 pqi_get_raid_bypass_status(ctrl_info, device); 1346 pqi_get_volume_status(ctrl_info, device); 1347 } 1348 } 1349 1350 out: 1351 kfree(buffer); 1352 1353 return rc; 1354 } 1355 1356 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1357 struct pqi_scsi_dev *device, 1358 struct bmic_identify_physical_device *id_phys) 1359 { 1360 int rc; 1361 1362 memset(id_phys, 0, sizeof(*id_phys)); 1363 1364 rc = pqi_identify_physical_device(ctrl_info, device, 1365 id_phys, sizeof(*id_phys)); 1366 if (rc) { 1367 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1368 return; 1369 } 1370 1371 device->box_index = id_phys->box_index; 1372 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1373 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1374 device->queue_depth = 1375 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1376 device->device_type = id_phys->device_type; 1377 device->active_path_index = id_phys->active_path_number; 1378 device->path_map = id_phys->redundant_path_present_map; 1379 memcpy(&device->box, 1380 &id_phys->alternate_paths_phys_box_on_port, 1381 sizeof(device->box)); 1382 memcpy(&device->phys_connector, 1383 &id_phys->alternate_paths_phys_connector, 1384 sizeof(device->phys_connector)); 1385 device->bay = id_phys->phys_bay_in_box; 1386 } 1387 1388 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1389 struct pqi_scsi_dev *device) 1390 { 1391 char *status; 1392 static const char unknown_state_str[] = 1393 "Volume is in an unknown state (%u)"; 1394 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1395 1396 switch (device->volume_status) { 1397 case CISS_LV_OK: 1398 status = "Volume online"; 1399 break; 1400 case CISS_LV_FAILED: 1401 status = "Volume failed"; 1402 break; 1403 case CISS_LV_NOT_CONFIGURED: 1404 status = "Volume not configured"; 1405 break; 1406 case CISS_LV_DEGRADED: 1407 status = "Volume degraded"; 1408 break; 1409 case CISS_LV_READY_FOR_RECOVERY: 1410 status = "Volume ready for recovery operation"; 1411 break; 1412 case CISS_LV_UNDERGOING_RECOVERY: 1413 status = "Volume undergoing recovery"; 1414 break; 1415 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1416 status = "Wrong physical drive was replaced"; 1417 break; 1418 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1419 status = "A physical drive not properly connected"; 1420 break; 1421 case CISS_LV_HARDWARE_OVERHEATING: 1422 status = "Hardware is overheating"; 1423 break; 1424 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1425 status = "Hardware has overheated"; 1426 break; 1427 case CISS_LV_UNDERGOING_EXPANSION: 1428 status = "Volume undergoing expansion"; 1429 break; 1430 case CISS_LV_NOT_AVAILABLE: 1431 status = "Volume waiting for transforming volume"; 1432 break; 1433 case CISS_LV_QUEUED_FOR_EXPANSION: 1434 status = "Volume queued for expansion"; 1435 break; 1436 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1437 status = "Volume disabled due to SCSI ID conflict"; 1438 break; 1439 case CISS_LV_EJECTED: 1440 status = "Volume has been ejected"; 1441 break; 1442 case CISS_LV_UNDERGOING_ERASE: 1443 status = "Volume undergoing background erase"; 1444 break; 1445 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1446 status = "Volume ready for predictive spare rebuild"; 1447 break; 1448 case CISS_LV_UNDERGOING_RPI: 1449 status = "Volume undergoing rapid parity initialization"; 1450 break; 1451 case CISS_LV_PENDING_RPI: 1452 status = "Volume queued for rapid parity initialization"; 1453 break; 1454 case CISS_LV_ENCRYPTED_NO_KEY: 1455 status = "Encrypted volume inaccessible - key not present"; 1456 break; 1457 case CISS_LV_UNDERGOING_ENCRYPTION: 1458 status = "Volume undergoing encryption process"; 1459 break; 1460 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1461 status = "Volume undergoing encryption re-keying process"; 1462 break; 1463 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1464 status = "Volume encrypted but encryption is disabled"; 1465 break; 1466 case CISS_LV_PENDING_ENCRYPTION: 1467 status = "Volume pending migration to encrypted state"; 1468 break; 1469 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1470 status = "Volume pending encryption rekeying"; 1471 break; 1472 case CISS_LV_NOT_SUPPORTED: 1473 status = "Volume not supported on this controller"; 1474 break; 1475 case CISS_LV_STATUS_UNAVAILABLE: 1476 status = "Volume status not available"; 1477 break; 1478 default: 1479 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1480 unknown_state_str, device->volume_status); 1481 status = unknown_state_buffer; 1482 break; 1483 } 1484 1485 dev_info(&ctrl_info->pci_dev->dev, 1486 "scsi %d:%d:%d:%d %s\n", 1487 ctrl_info->scsi_host->host_no, 1488 device->bus, device->target, device->lun, status); 1489 } 1490 1491 static void pqi_rescan_worker(struct work_struct *work) 1492 { 1493 struct pqi_ctrl_info *ctrl_info; 1494 1495 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1496 rescan_work); 1497 1498 pqi_scan_scsi_devices(ctrl_info); 1499 } 1500 1501 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1502 struct pqi_scsi_dev *device) 1503 { 1504 int rc; 1505 1506 if (pqi_is_logical_device(device)) 1507 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1508 device->target, device->lun); 1509 else 1510 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1511 1512 return rc; 1513 } 1514 1515 #define PQI_PENDING_IO_TIMEOUT_SECS 20 1516 1517 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1518 struct pqi_scsi_dev *device) 1519 { 1520 int rc; 1521 1522 pqi_device_remove_start(device); 1523 1524 rc = pqi_device_wait_for_pending_io(ctrl_info, device, 1525 PQI_PENDING_IO_TIMEOUT_SECS); 1526 if (rc) 1527 dev_err(&ctrl_info->pci_dev->dev, 1528 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n", 1529 ctrl_info->scsi_host->host_no, device->bus, 1530 device->target, device->lun, 1531 atomic_read(&device->scsi_cmds_outstanding)); 1532 1533 if (pqi_is_logical_device(device)) 1534 scsi_remove_device(device->sdev); 1535 else 1536 pqi_remove_sas_device(device); 1537 } 1538 1539 /* Assumes the SCSI device list lock is held. */ 1540 1541 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1542 int bus, int target, int lun) 1543 { 1544 struct pqi_scsi_dev *device; 1545 1546 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1547 scsi_device_list_entry) 1548 if (device->bus == bus && device->target == target && 1549 device->lun == lun) 1550 return device; 1551 1552 return NULL; 1553 } 1554 1555 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1556 struct pqi_scsi_dev *dev2) 1557 { 1558 if (dev1->is_physical_device != dev2->is_physical_device) 1559 return false; 1560 1561 if (dev1->is_physical_device) 1562 return dev1->wwid == dev2->wwid; 1563 1564 return memcmp(dev1->volume_id, dev2->volume_id, 1565 sizeof(dev1->volume_id)) == 0; 1566 } 1567 1568 enum pqi_find_result { 1569 DEVICE_NOT_FOUND, 1570 DEVICE_CHANGED, 1571 DEVICE_SAME, 1572 }; 1573 1574 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1575 struct pqi_scsi_dev *device_to_find, 1576 struct pqi_scsi_dev **matching_device) 1577 { 1578 struct pqi_scsi_dev *device; 1579 1580 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1581 scsi_device_list_entry) { 1582 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1583 device->scsi3addr)) { 1584 *matching_device = device; 1585 if (pqi_device_equal(device_to_find, device)) { 1586 if (device_to_find->volume_offline) 1587 return DEVICE_CHANGED; 1588 return DEVICE_SAME; 1589 } 1590 return DEVICE_CHANGED; 1591 } 1592 } 1593 1594 return DEVICE_NOT_FOUND; 1595 } 1596 1597 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1598 { 1599 if (device->is_expander_smp_device) 1600 return "Enclosure SMP "; 1601 1602 return scsi_device_type(device->devtype); 1603 } 1604 1605 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1606 1607 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1608 char *action, struct pqi_scsi_dev *device) 1609 { 1610 ssize_t count; 1611 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1612 1613 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1614 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1615 1616 if (device->target_lun_valid) 1617 count += scnprintf(buffer + count, 1618 PQI_DEV_INFO_BUFFER_LENGTH - count, 1619 "%d:%d", 1620 device->target, 1621 device->lun); 1622 else 1623 count += scnprintf(buffer + count, 1624 PQI_DEV_INFO_BUFFER_LENGTH - count, 1625 "-:-"); 1626 1627 if (pqi_is_logical_device(device)) 1628 count += scnprintf(buffer + count, 1629 PQI_DEV_INFO_BUFFER_LENGTH - count, 1630 " %08x%08x", 1631 *((u32 *)&device->scsi3addr), 1632 *((u32 *)&device->scsi3addr[4])); 1633 else 1634 count += scnprintf(buffer + count, 1635 PQI_DEV_INFO_BUFFER_LENGTH - count, 1636 " %016llx", device->sas_address); 1637 1638 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1639 " %s %.8s %.16s ", 1640 pqi_device_type(device), 1641 device->vendor, 1642 device->model); 1643 1644 if (pqi_is_logical_device(device)) { 1645 if (device->devtype == TYPE_DISK) 1646 count += scnprintf(buffer + count, 1647 PQI_DEV_INFO_BUFFER_LENGTH - count, 1648 "SSDSmartPathCap%c En%c %-12s", 1649 device->raid_bypass_configured ? '+' : '-', 1650 device->raid_bypass_enabled ? '+' : '-', 1651 pqi_raid_level_to_string(device->raid_level)); 1652 } else { 1653 count += scnprintf(buffer + count, 1654 PQI_DEV_INFO_BUFFER_LENGTH - count, 1655 "AIO%c", device->aio_enabled ? '+' : '-'); 1656 if (device->devtype == TYPE_DISK || 1657 device->devtype == TYPE_ZBC) 1658 count += scnprintf(buffer + count, 1659 PQI_DEV_INFO_BUFFER_LENGTH - count, 1660 " qd=%-6d", device->queue_depth); 1661 } 1662 1663 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1664 } 1665 1666 /* Assumes the SCSI device list lock is held. */ 1667 1668 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1669 struct pqi_scsi_dev *new_device) 1670 { 1671 existing_device->devtype = new_device->devtype; 1672 existing_device->device_type = new_device->device_type; 1673 existing_device->bus = new_device->bus; 1674 if (new_device->target_lun_valid) { 1675 existing_device->target = new_device->target; 1676 existing_device->lun = new_device->lun; 1677 existing_device->target_lun_valid = true; 1678 } 1679 1680 /* By definition, the scsi3addr and wwid fields are already the same. */ 1681 1682 existing_device->is_physical_device = new_device->is_physical_device; 1683 existing_device->is_external_raid_device = 1684 new_device->is_external_raid_device; 1685 existing_device->is_expander_smp_device = 1686 new_device->is_expander_smp_device; 1687 existing_device->aio_enabled = new_device->aio_enabled; 1688 memcpy(existing_device->vendor, new_device->vendor, 1689 sizeof(existing_device->vendor)); 1690 memcpy(existing_device->model, new_device->model, 1691 sizeof(existing_device->model)); 1692 existing_device->sas_address = new_device->sas_address; 1693 existing_device->raid_level = new_device->raid_level; 1694 existing_device->queue_depth = new_device->queue_depth; 1695 existing_device->aio_handle = new_device->aio_handle; 1696 existing_device->volume_status = new_device->volume_status; 1697 existing_device->active_path_index = new_device->active_path_index; 1698 existing_device->path_map = new_device->path_map; 1699 existing_device->bay = new_device->bay; 1700 existing_device->box_index = new_device->box_index; 1701 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 1702 existing_device->phy_connected_dev_type = 1703 new_device->phy_connected_dev_type; 1704 memcpy(existing_device->box, new_device->box, 1705 sizeof(existing_device->box)); 1706 memcpy(existing_device->phys_connector, new_device->phys_connector, 1707 sizeof(existing_device->phys_connector)); 1708 existing_device->offload_to_mirror = 0; 1709 kfree(existing_device->raid_map); 1710 existing_device->raid_map = new_device->raid_map; 1711 existing_device->raid_bypass_configured = 1712 new_device->raid_bypass_configured; 1713 existing_device->raid_bypass_enabled = 1714 new_device->raid_bypass_enabled; 1715 existing_device->device_offline = false; 1716 1717 /* To prevent this from being freed later. */ 1718 new_device->raid_map = NULL; 1719 } 1720 1721 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1722 { 1723 if (device) { 1724 kfree(device->raid_map); 1725 kfree(device); 1726 } 1727 } 1728 1729 /* 1730 * Called when exposing a new device to the OS fails in order to re-adjust 1731 * our internal SCSI device list to match the SCSI ML's view. 1732 */ 1733 1734 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1735 struct pqi_scsi_dev *device) 1736 { 1737 unsigned long flags; 1738 1739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1740 list_del(&device->scsi_device_list_entry); 1741 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1742 1743 /* Allow the device structure to be freed later. */ 1744 device->keep_device = false; 1745 } 1746 1747 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 1748 { 1749 if (device->is_expander_smp_device) 1750 return device->sas_port != NULL; 1751 1752 return device->sdev != NULL; 1753 } 1754 1755 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1756 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1757 { 1758 int rc; 1759 unsigned int i; 1760 unsigned long flags; 1761 enum pqi_find_result find_result; 1762 struct pqi_scsi_dev *device; 1763 struct pqi_scsi_dev *next; 1764 struct pqi_scsi_dev *matching_device; 1765 LIST_HEAD(add_list); 1766 LIST_HEAD(delete_list); 1767 1768 /* 1769 * The idea here is to do as little work as possible while holding the 1770 * spinlock. That's why we go to great pains to defer anything other 1771 * than updating the internal device list until after we release the 1772 * spinlock. 1773 */ 1774 1775 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1776 1777 /* Assume that all devices in the existing list have gone away. */ 1778 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1779 scsi_device_list_entry) 1780 device->device_gone = true; 1781 1782 for (i = 0; i < num_new_devices; i++) { 1783 device = new_device_list[i]; 1784 1785 find_result = pqi_scsi_find_entry(ctrl_info, device, 1786 &matching_device); 1787 1788 switch (find_result) { 1789 case DEVICE_SAME: 1790 /* 1791 * The newly found device is already in the existing 1792 * device list. 1793 */ 1794 device->new_device = false; 1795 matching_device->device_gone = false; 1796 pqi_scsi_update_device(matching_device, device); 1797 break; 1798 case DEVICE_NOT_FOUND: 1799 /* 1800 * The newly found device is NOT in the existing device 1801 * list. 1802 */ 1803 device->new_device = true; 1804 break; 1805 case DEVICE_CHANGED: 1806 /* 1807 * The original device has gone away and we need to add 1808 * the new device. 1809 */ 1810 device->new_device = true; 1811 break; 1812 } 1813 } 1814 1815 /* Process all devices that have gone away. */ 1816 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1817 scsi_device_list_entry) { 1818 if (device->device_gone) { 1819 list_del(&device->scsi_device_list_entry); 1820 list_add_tail(&device->delete_list_entry, &delete_list); 1821 } 1822 } 1823 1824 /* Process all new devices. */ 1825 for (i = 0; i < num_new_devices; i++) { 1826 device = new_device_list[i]; 1827 if (!device->new_device) 1828 continue; 1829 if (device->volume_offline) 1830 continue; 1831 list_add_tail(&device->scsi_device_list_entry, 1832 &ctrl_info->scsi_device_list); 1833 list_add_tail(&device->add_list_entry, &add_list); 1834 /* To prevent this device structure from being freed later. */ 1835 device->keep_device = true; 1836 } 1837 1838 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1839 1840 if (pqi_ctrl_in_ofa(ctrl_info)) 1841 pqi_ctrl_ofa_done(ctrl_info); 1842 1843 /* Remove all devices that have gone away. */ 1844 list_for_each_entry_safe(device, next, &delete_list, 1845 delete_list_entry) { 1846 if (device->volume_offline) { 1847 pqi_dev_info(ctrl_info, "offline", device); 1848 pqi_show_volume_status(ctrl_info, device); 1849 } else { 1850 pqi_dev_info(ctrl_info, "removed", device); 1851 } 1852 if (pqi_is_device_added(device)) 1853 pqi_remove_device(ctrl_info, device); 1854 list_del(&device->delete_list_entry); 1855 pqi_free_device(device); 1856 } 1857 1858 /* 1859 * Notify the SCSI ML if the queue depth of any existing device has 1860 * changed. 1861 */ 1862 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1863 scsi_device_list_entry) { 1864 if (device->sdev && device->queue_depth != 1865 device->advertised_queue_depth) { 1866 device->advertised_queue_depth = device->queue_depth; 1867 scsi_change_queue_depth(device->sdev, 1868 device->advertised_queue_depth); 1869 } 1870 } 1871 1872 /* Expose any new devices. */ 1873 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1874 if (!pqi_is_device_added(device)) { 1875 pqi_dev_info(ctrl_info, "added", device); 1876 rc = pqi_add_device(ctrl_info, device); 1877 if (rc) { 1878 dev_warn(&ctrl_info->pci_dev->dev, 1879 "scsi %d:%d:%d:%d addition failed, device not added\n", 1880 ctrl_info->scsi_host->host_no, 1881 device->bus, device->target, 1882 device->lun); 1883 pqi_fixup_botched_add(ctrl_info, device); 1884 } 1885 } 1886 } 1887 } 1888 1889 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1890 { 1891 bool is_supported; 1892 1893 if (device->is_expander_smp_device) 1894 return true; 1895 1896 is_supported = false; 1897 1898 switch (device->devtype) { 1899 case TYPE_DISK: 1900 case TYPE_ZBC: 1901 case TYPE_TAPE: 1902 case TYPE_MEDIUM_CHANGER: 1903 case TYPE_ENCLOSURE: 1904 is_supported = true; 1905 break; 1906 case TYPE_RAID: 1907 /* 1908 * Only support the HBA controller itself as a RAID 1909 * controller. If it's a RAID controller other than 1910 * the HBA itself (an external RAID controller, for 1911 * example), we don't support it. 1912 */ 1913 if (pqi_is_hba_lunid(device->scsi3addr)) 1914 is_supported = true; 1915 break; 1916 } 1917 1918 return is_supported; 1919 } 1920 1921 static inline bool pqi_skip_device(u8 *scsi3addr) 1922 { 1923 /* Ignore all masked devices. */ 1924 if (MASKED_DEVICE(scsi3addr)) 1925 return true; 1926 1927 return false; 1928 } 1929 1930 static inline void pqi_mask_device(u8 *scsi3addr) 1931 { 1932 scsi3addr[3] |= 0xc0; 1933 } 1934 1935 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) 1936 { 1937 if (!device->is_physical_device) 1938 return false; 1939 1940 if (device->is_expander_smp_device) 1941 return true; 1942 1943 switch (device->devtype) { 1944 case TYPE_DISK: 1945 case TYPE_ZBC: 1946 case TYPE_ENCLOSURE: 1947 return true; 1948 } 1949 1950 return false; 1951 } 1952 1953 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 1954 { 1955 return !device->is_physical_device || 1956 !pqi_skip_device(device->scsi3addr); 1957 } 1958 1959 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1960 { 1961 int i; 1962 int rc; 1963 LIST_HEAD(new_device_list_head); 1964 struct report_phys_lun_extended *physdev_list = NULL; 1965 struct report_log_lun_extended *logdev_list = NULL; 1966 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1967 struct report_log_lun_extended_entry *log_lun_ext_entry; 1968 struct bmic_identify_physical_device *id_phys = NULL; 1969 u32 num_physicals; 1970 u32 num_logicals; 1971 struct pqi_scsi_dev **new_device_list = NULL; 1972 struct pqi_scsi_dev *device; 1973 struct pqi_scsi_dev *next; 1974 unsigned int num_new_devices; 1975 unsigned int num_valid_devices; 1976 bool is_physical_device; 1977 u8 *scsi3addr; 1978 unsigned int physical_index; 1979 unsigned int logical_index; 1980 static char *out_of_memory_msg = 1981 "failed to allocate memory, device discovery stopped"; 1982 1983 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1984 if (rc) 1985 goto out; 1986 1987 if (physdev_list) 1988 num_physicals = 1989 get_unaligned_be32(&physdev_list->header.list_length) 1990 / sizeof(physdev_list->lun_entries[0]); 1991 else 1992 num_physicals = 0; 1993 1994 if (logdev_list) 1995 num_logicals = 1996 get_unaligned_be32(&logdev_list->header.list_length) 1997 / sizeof(logdev_list->lun_entries[0]); 1998 else 1999 num_logicals = 0; 2000 2001 if (num_physicals) { 2002 /* 2003 * We need this buffer for calls to pqi_get_physical_disk_info() 2004 * below. We allocate it here instead of inside 2005 * pqi_get_physical_disk_info() because it's a fairly large 2006 * buffer. 2007 */ 2008 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2009 if (!id_phys) { 2010 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2011 out_of_memory_msg); 2012 rc = -ENOMEM; 2013 goto out; 2014 } 2015 2016 if (pqi_hide_vsep) { 2017 for (i = num_physicals - 1; i >= 0; i--) { 2018 phys_lun_ext_entry = 2019 &physdev_list->lun_entries[i]; 2020 if (CISS_GET_DRIVE_NUMBER( 2021 phys_lun_ext_entry->lunid) == 2022 PQI_VSEP_CISS_BTL) { 2023 pqi_mask_device( 2024 phys_lun_ext_entry->lunid); 2025 break; 2026 } 2027 } 2028 } 2029 } 2030 2031 num_new_devices = num_physicals + num_logicals; 2032 2033 new_device_list = kmalloc_array(num_new_devices, 2034 sizeof(*new_device_list), 2035 GFP_KERNEL); 2036 if (!new_device_list) { 2037 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2038 rc = -ENOMEM; 2039 goto out; 2040 } 2041 2042 for (i = 0; i < num_new_devices; i++) { 2043 device = kzalloc(sizeof(*device), GFP_KERNEL); 2044 if (!device) { 2045 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2046 out_of_memory_msg); 2047 rc = -ENOMEM; 2048 goto out; 2049 } 2050 list_add_tail(&device->new_device_list_entry, 2051 &new_device_list_head); 2052 } 2053 2054 device = NULL; 2055 num_valid_devices = 0; 2056 physical_index = 0; 2057 logical_index = 0; 2058 2059 for (i = 0; i < num_new_devices; i++) { 2060 2061 if ((!pqi_expose_ld_first && i < num_physicals) || 2062 (pqi_expose_ld_first && i >= num_logicals)) { 2063 is_physical_device = true; 2064 phys_lun_ext_entry = 2065 &physdev_list->lun_entries[physical_index++]; 2066 log_lun_ext_entry = NULL; 2067 scsi3addr = phys_lun_ext_entry->lunid; 2068 } else { 2069 is_physical_device = false; 2070 phys_lun_ext_entry = NULL; 2071 log_lun_ext_entry = 2072 &logdev_list->lun_entries[logical_index++]; 2073 scsi3addr = log_lun_ext_entry->lunid; 2074 } 2075 2076 if (is_physical_device && pqi_skip_device(scsi3addr)) 2077 continue; 2078 2079 if (device) 2080 device = list_next_entry(device, new_device_list_entry); 2081 else 2082 device = list_first_entry(&new_device_list_head, 2083 struct pqi_scsi_dev, new_device_list_entry); 2084 2085 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2086 device->is_physical_device = is_physical_device; 2087 if (is_physical_device) { 2088 if (phys_lun_ext_entry->device_type == 2089 SA_DEVICE_TYPE_EXPANDER_SMP) 2090 device->is_expander_smp_device = true; 2091 } else { 2092 device->is_external_raid_device = 2093 pqi_is_external_raid_addr(scsi3addr); 2094 } 2095 2096 /* Gather information about the device. */ 2097 rc = pqi_get_device_info(ctrl_info, device); 2098 if (rc == -ENOMEM) { 2099 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2100 out_of_memory_msg); 2101 goto out; 2102 } 2103 if (rc) { 2104 if (device->is_physical_device) 2105 dev_warn(&ctrl_info->pci_dev->dev, 2106 "obtaining device info failed, skipping physical device %016llx\n", 2107 get_unaligned_be64( 2108 &phys_lun_ext_entry->wwid)); 2109 else 2110 dev_warn(&ctrl_info->pci_dev->dev, 2111 "obtaining device info failed, skipping logical device %08x%08x\n", 2112 *((u32 *)&device->scsi3addr), 2113 *((u32 *)&device->scsi3addr[4])); 2114 rc = 0; 2115 continue; 2116 } 2117 2118 if (!pqi_is_supported_device(device)) 2119 continue; 2120 2121 pqi_assign_bus_target_lun(device); 2122 2123 if (device->is_physical_device) { 2124 device->wwid = phys_lun_ext_entry->wwid; 2125 if ((phys_lun_ext_entry->device_flags & 2126 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2127 phys_lun_ext_entry->aio_handle) { 2128 device->aio_enabled = true; 2129 device->aio_handle = 2130 phys_lun_ext_entry->aio_handle; 2131 } 2132 pqi_get_physical_disk_info(ctrl_info, device, id_phys); 2133 } else { 2134 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2135 sizeof(device->volume_id)); 2136 } 2137 2138 if (pqi_is_device_with_sas_address(device)) 2139 device->sas_address = get_unaligned_be64(&device->wwid); 2140 2141 new_device_list[num_valid_devices++] = device; 2142 } 2143 2144 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2145 2146 out: 2147 list_for_each_entry_safe(device, next, &new_device_list_head, 2148 new_device_list_entry) { 2149 if (device->keep_device) 2150 continue; 2151 list_del(&device->new_device_list_entry); 2152 pqi_free_device(device); 2153 } 2154 2155 kfree(new_device_list); 2156 kfree(physdev_list); 2157 kfree(logdev_list); 2158 kfree(id_phys); 2159 2160 return rc; 2161 } 2162 2163 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2164 { 2165 unsigned long flags; 2166 struct pqi_scsi_dev *device; 2167 2168 while (1) { 2169 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2170 2171 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 2172 struct pqi_scsi_dev, scsi_device_list_entry); 2173 if (device) 2174 list_del(&device->scsi_device_list_entry); 2175 2176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 2177 flags); 2178 2179 if (!device) 2180 break; 2181 2182 if (pqi_is_device_added(device)) 2183 pqi_remove_device(ctrl_info, device); 2184 pqi_free_device(device); 2185 } 2186 } 2187 2188 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2189 { 2190 int rc = 0; 2191 2192 if (pqi_ctrl_offline(ctrl_info)) 2193 return -ENXIO; 2194 2195 if (!mutex_trylock(&ctrl_info->scan_mutex)) { 2196 pqi_schedule_rescan_worker_delayed(ctrl_info); 2197 rc = -EINPROGRESS; 2198 } else { 2199 rc = pqi_update_scsi_devices(ctrl_info); 2200 if (rc) 2201 pqi_schedule_rescan_worker_delayed(ctrl_info); 2202 mutex_unlock(&ctrl_info->scan_mutex); 2203 } 2204 2205 return rc; 2206 } 2207 2208 static void pqi_scan_start(struct Scsi_Host *shost) 2209 { 2210 struct pqi_ctrl_info *ctrl_info; 2211 2212 ctrl_info = shost_to_hba(shost); 2213 if (pqi_ctrl_in_ofa(ctrl_info)) 2214 return; 2215 2216 pqi_scan_scsi_devices(ctrl_info); 2217 } 2218 2219 /* Returns TRUE if scan is finished. */ 2220 2221 static int pqi_scan_finished(struct Scsi_Host *shost, 2222 unsigned long elapsed_time) 2223 { 2224 struct pqi_ctrl_info *ctrl_info; 2225 2226 ctrl_info = shost_priv(shost); 2227 2228 return !mutex_is_locked(&ctrl_info->scan_mutex); 2229 } 2230 2231 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2232 { 2233 mutex_lock(&ctrl_info->scan_mutex); 2234 mutex_unlock(&ctrl_info->scan_mutex); 2235 } 2236 2237 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2238 { 2239 mutex_lock(&ctrl_info->lun_reset_mutex); 2240 mutex_unlock(&ctrl_info->lun_reset_mutex); 2241 } 2242 2243 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 2244 { 2245 mutex_lock(&ctrl_info->ofa_mutex); 2246 mutex_unlock(&ctrl_info->ofa_mutex); 2247 } 2248 2249 static inline void pqi_set_encryption_info( 2250 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2251 u64 first_block) 2252 { 2253 u32 volume_blk_size; 2254 2255 /* 2256 * Set the encryption tweak values based on logical block address. 2257 * If the block size is 512, the tweak value is equal to the LBA. 2258 * For other block sizes, tweak value is (LBA * block size) / 512. 2259 */ 2260 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2261 if (volume_blk_size != 512) 2262 first_block = (first_block * volume_blk_size) / 512; 2263 2264 encryption_info->data_encryption_key_index = 2265 get_unaligned_le16(&raid_map->data_encryption_key_index); 2266 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2267 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2268 } 2269 2270 /* 2271 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2272 */ 2273 2274 #define PQI_RAID_BYPASS_INELIGIBLE 1 2275 2276 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2277 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2278 struct pqi_queue_group *queue_group) 2279 { 2280 struct raid_map *raid_map; 2281 bool is_write = false; 2282 u32 map_index; 2283 u64 first_block; 2284 u64 last_block; 2285 u32 block_cnt; 2286 u32 blocks_per_row; 2287 u64 first_row; 2288 u64 last_row; 2289 u32 first_row_offset; 2290 u32 last_row_offset; 2291 u32 first_column; 2292 u32 last_column; 2293 u64 r0_first_row; 2294 u64 r0_last_row; 2295 u32 r5or6_blocks_per_row; 2296 u64 r5or6_first_row; 2297 u64 r5or6_last_row; 2298 u32 r5or6_first_row_offset; 2299 u32 r5or6_last_row_offset; 2300 u32 r5or6_first_column; 2301 u32 r5or6_last_column; 2302 u16 data_disks_per_row; 2303 u32 total_disks_per_row; 2304 u16 layout_map_count; 2305 u32 stripesize; 2306 u16 strip_size; 2307 u32 first_group; 2308 u32 last_group; 2309 u32 current_group; 2310 u32 map_row; 2311 u32 aio_handle; 2312 u64 disk_block; 2313 u32 disk_block_cnt; 2314 u8 cdb[16]; 2315 u8 cdb_length; 2316 int offload_to_mirror; 2317 struct pqi_encryption_info *encryption_info_ptr; 2318 struct pqi_encryption_info encryption_info; 2319 #if BITS_PER_LONG == 32 2320 u64 tmpdiv; 2321 #endif 2322 2323 /* Check for valid opcode, get LBA and block count. */ 2324 switch (scmd->cmnd[0]) { 2325 case WRITE_6: 2326 is_write = true; 2327 fallthrough; 2328 case READ_6: 2329 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2330 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2331 block_cnt = (u32)scmd->cmnd[4]; 2332 if (block_cnt == 0) 2333 block_cnt = 256; 2334 break; 2335 case WRITE_10: 2336 is_write = true; 2337 fallthrough; 2338 case READ_10: 2339 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2340 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2341 break; 2342 case WRITE_12: 2343 is_write = true; 2344 fallthrough; 2345 case READ_12: 2346 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2347 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2348 break; 2349 case WRITE_16: 2350 is_write = true; 2351 fallthrough; 2352 case READ_16: 2353 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2354 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2355 break; 2356 default: 2357 /* Process via normal I/O path. */ 2358 return PQI_RAID_BYPASS_INELIGIBLE; 2359 } 2360 2361 /* Check for write to non-RAID-0. */ 2362 if (is_write && device->raid_level != SA_RAID_0) 2363 return PQI_RAID_BYPASS_INELIGIBLE; 2364 2365 if (unlikely(block_cnt == 0)) 2366 return PQI_RAID_BYPASS_INELIGIBLE; 2367 2368 last_block = first_block + block_cnt - 1; 2369 raid_map = device->raid_map; 2370 2371 /* Check for invalid block or wraparound. */ 2372 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2373 last_block < first_block) 2374 return PQI_RAID_BYPASS_INELIGIBLE; 2375 2376 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2377 strip_size = get_unaligned_le16(&raid_map->strip_size); 2378 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2379 2380 /* Calculate stripe information for the request. */ 2381 blocks_per_row = data_disks_per_row * strip_size; 2382 #if BITS_PER_LONG == 32 2383 tmpdiv = first_block; 2384 do_div(tmpdiv, blocks_per_row); 2385 first_row = tmpdiv; 2386 tmpdiv = last_block; 2387 do_div(tmpdiv, blocks_per_row); 2388 last_row = tmpdiv; 2389 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2390 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2391 tmpdiv = first_row_offset; 2392 do_div(tmpdiv, strip_size); 2393 first_column = tmpdiv; 2394 tmpdiv = last_row_offset; 2395 do_div(tmpdiv, strip_size); 2396 last_column = tmpdiv; 2397 #else 2398 first_row = first_block / blocks_per_row; 2399 last_row = last_block / blocks_per_row; 2400 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2401 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2402 first_column = first_row_offset / strip_size; 2403 last_column = last_row_offset / strip_size; 2404 #endif 2405 2406 /* If this isn't a single row/column then give to the controller. */ 2407 if (first_row != last_row || first_column != last_column) 2408 return PQI_RAID_BYPASS_INELIGIBLE; 2409 2410 /* Proceeding with driver mapping. */ 2411 total_disks_per_row = data_disks_per_row + 2412 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2413 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2414 get_unaligned_le16(&raid_map->row_cnt); 2415 map_index = (map_row * total_disks_per_row) + first_column; 2416 2417 /* RAID 1 */ 2418 if (device->raid_level == SA_RAID_1) { 2419 if (device->offload_to_mirror) 2420 map_index += data_disks_per_row; 2421 device->offload_to_mirror = !device->offload_to_mirror; 2422 } else if (device->raid_level == SA_RAID_ADM) { 2423 /* RAID ADM */ 2424 /* 2425 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2426 * divisible by 3. 2427 */ 2428 offload_to_mirror = device->offload_to_mirror; 2429 if (offload_to_mirror == 0) { 2430 /* use physical disk in the first mirrored group. */ 2431 map_index %= data_disks_per_row; 2432 } else { 2433 do { 2434 /* 2435 * Determine mirror group that map_index 2436 * indicates. 2437 */ 2438 current_group = map_index / data_disks_per_row; 2439 2440 if (offload_to_mirror != current_group) { 2441 if (current_group < 2442 layout_map_count - 1) { 2443 /* 2444 * Select raid index from 2445 * next group. 2446 */ 2447 map_index += data_disks_per_row; 2448 current_group++; 2449 } else { 2450 /* 2451 * Select raid index from first 2452 * group. 2453 */ 2454 map_index %= data_disks_per_row; 2455 current_group = 0; 2456 } 2457 } 2458 } while (offload_to_mirror != current_group); 2459 } 2460 2461 /* Set mirror group to use next time. */ 2462 offload_to_mirror = 2463 (offload_to_mirror >= layout_map_count - 1) ? 2464 0 : offload_to_mirror + 1; 2465 WARN_ON(offload_to_mirror >= layout_map_count); 2466 device->offload_to_mirror = offload_to_mirror; 2467 /* 2468 * Avoid direct use of device->offload_to_mirror within this 2469 * function since multiple threads might simultaneously 2470 * increment it beyond the range of device->layout_map_count -1. 2471 */ 2472 } else if ((device->raid_level == SA_RAID_5 || 2473 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2474 /* RAID 50/60 */ 2475 /* Verify first and last block are in same RAID group */ 2476 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2477 stripesize = r5or6_blocks_per_row * layout_map_count; 2478 #if BITS_PER_LONG == 32 2479 tmpdiv = first_block; 2480 first_group = do_div(tmpdiv, stripesize); 2481 tmpdiv = first_group; 2482 do_div(tmpdiv, r5or6_blocks_per_row); 2483 first_group = tmpdiv; 2484 tmpdiv = last_block; 2485 last_group = do_div(tmpdiv, stripesize); 2486 tmpdiv = last_group; 2487 do_div(tmpdiv, r5or6_blocks_per_row); 2488 last_group = tmpdiv; 2489 #else 2490 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2491 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2492 #endif 2493 if (first_group != last_group) 2494 return PQI_RAID_BYPASS_INELIGIBLE; 2495 2496 /* Verify request is in a single row of RAID 5/6 */ 2497 #if BITS_PER_LONG == 32 2498 tmpdiv = first_block; 2499 do_div(tmpdiv, stripesize); 2500 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2501 tmpdiv = last_block; 2502 do_div(tmpdiv, stripesize); 2503 r5or6_last_row = r0_last_row = tmpdiv; 2504 #else 2505 first_row = r5or6_first_row = r0_first_row = 2506 first_block / stripesize; 2507 r5or6_last_row = r0_last_row = last_block / stripesize; 2508 #endif 2509 if (r5or6_first_row != r5or6_last_row) 2510 return PQI_RAID_BYPASS_INELIGIBLE; 2511 2512 /* Verify request is in a single column */ 2513 #if BITS_PER_LONG == 32 2514 tmpdiv = first_block; 2515 first_row_offset = do_div(tmpdiv, stripesize); 2516 tmpdiv = first_row_offset; 2517 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2518 r5or6_first_row_offset = first_row_offset; 2519 tmpdiv = last_block; 2520 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2521 tmpdiv = r5or6_last_row_offset; 2522 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2523 tmpdiv = r5or6_first_row_offset; 2524 do_div(tmpdiv, strip_size); 2525 first_column = r5or6_first_column = tmpdiv; 2526 tmpdiv = r5or6_last_row_offset; 2527 do_div(tmpdiv, strip_size); 2528 r5or6_last_column = tmpdiv; 2529 #else 2530 first_row_offset = r5or6_first_row_offset = 2531 (u32)((first_block % stripesize) % 2532 r5or6_blocks_per_row); 2533 2534 r5or6_last_row_offset = 2535 (u32)((last_block % stripesize) % 2536 r5or6_blocks_per_row); 2537 2538 first_column = r5or6_first_row_offset / strip_size; 2539 r5or6_first_column = first_column; 2540 r5or6_last_column = r5or6_last_row_offset / strip_size; 2541 #endif 2542 if (r5or6_first_column != r5or6_last_column) 2543 return PQI_RAID_BYPASS_INELIGIBLE; 2544 2545 /* Request is eligible */ 2546 map_row = 2547 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2548 get_unaligned_le16(&raid_map->row_cnt); 2549 2550 map_index = (first_group * 2551 (get_unaligned_le16(&raid_map->row_cnt) * 2552 total_disks_per_row)) + 2553 (map_row * total_disks_per_row) + first_column; 2554 } 2555 2556 aio_handle = raid_map->disk_data[map_index].aio_handle; 2557 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2558 first_row * strip_size + 2559 (first_row_offset - first_column * strip_size); 2560 disk_block_cnt = block_cnt; 2561 2562 /* Handle differing logical/physical block sizes. */ 2563 if (raid_map->phys_blk_shift) { 2564 disk_block <<= raid_map->phys_blk_shift; 2565 disk_block_cnt <<= raid_map->phys_blk_shift; 2566 } 2567 2568 if (unlikely(disk_block_cnt > 0xffff)) 2569 return PQI_RAID_BYPASS_INELIGIBLE; 2570 2571 /* Build the new CDB for the physical disk I/O. */ 2572 if (disk_block > 0xffffffff) { 2573 cdb[0] = is_write ? WRITE_16 : READ_16; 2574 cdb[1] = 0; 2575 put_unaligned_be64(disk_block, &cdb[2]); 2576 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2577 cdb[14] = 0; 2578 cdb[15] = 0; 2579 cdb_length = 16; 2580 } else { 2581 cdb[0] = is_write ? WRITE_10 : READ_10; 2582 cdb[1] = 0; 2583 put_unaligned_be32((u32)disk_block, &cdb[2]); 2584 cdb[6] = 0; 2585 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2586 cdb[9] = 0; 2587 cdb_length = 10; 2588 } 2589 2590 if (get_unaligned_le16(&raid_map->flags) & 2591 RAID_MAP_ENCRYPTION_ENABLED) { 2592 pqi_set_encryption_info(&encryption_info, raid_map, 2593 first_block); 2594 encryption_info_ptr = &encryption_info; 2595 } else { 2596 encryption_info_ptr = NULL; 2597 } 2598 2599 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2600 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2601 } 2602 2603 #define PQI_STATUS_IDLE 0x0 2604 2605 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2606 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2607 2608 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2609 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2610 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2611 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2612 #define PQI_DEVICE_STATE_ERROR 0x4 2613 2614 #define PQI_MODE_READY_TIMEOUT_SECS 30 2615 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2616 2617 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2618 { 2619 struct pqi_device_registers __iomem *pqi_registers; 2620 unsigned long timeout; 2621 u64 signature; 2622 u8 status; 2623 2624 pqi_registers = ctrl_info->pqi_registers; 2625 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; 2626 2627 while (1) { 2628 signature = readq(&pqi_registers->signature); 2629 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2630 sizeof(signature)) == 0) 2631 break; 2632 if (time_after(jiffies, timeout)) { 2633 dev_err(&ctrl_info->pci_dev->dev, 2634 "timed out waiting for PQI signature\n"); 2635 return -ETIMEDOUT; 2636 } 2637 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2638 } 2639 2640 while (1) { 2641 status = readb(&pqi_registers->function_and_status_code); 2642 if (status == PQI_STATUS_IDLE) 2643 break; 2644 if (time_after(jiffies, timeout)) { 2645 dev_err(&ctrl_info->pci_dev->dev, 2646 "timed out waiting for PQI IDLE\n"); 2647 return -ETIMEDOUT; 2648 } 2649 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2650 } 2651 2652 while (1) { 2653 if (readl(&pqi_registers->device_status) == 2654 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2655 break; 2656 if (time_after(jiffies, timeout)) { 2657 dev_err(&ctrl_info->pci_dev->dev, 2658 "timed out waiting for PQI all registers ready\n"); 2659 return -ETIMEDOUT; 2660 } 2661 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2662 } 2663 2664 return 0; 2665 } 2666 2667 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2668 { 2669 struct pqi_scsi_dev *device; 2670 2671 device = io_request->scmd->device->hostdata; 2672 device->raid_bypass_enabled = false; 2673 device->aio_enabled = false; 2674 } 2675 2676 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2677 { 2678 struct pqi_ctrl_info *ctrl_info; 2679 struct pqi_scsi_dev *device; 2680 2681 device = sdev->hostdata; 2682 if (device->device_offline) 2683 return; 2684 2685 device->device_offline = true; 2686 ctrl_info = shost_to_hba(sdev->host); 2687 pqi_schedule_rescan_worker(ctrl_info); 2688 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 2689 path, ctrl_info->scsi_host->host_no, device->bus, 2690 device->target, device->lun); 2691 } 2692 2693 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2694 { 2695 u8 scsi_status; 2696 u8 host_byte; 2697 struct scsi_cmnd *scmd; 2698 struct pqi_raid_error_info *error_info; 2699 size_t sense_data_length; 2700 int residual_count; 2701 int xfer_count; 2702 struct scsi_sense_hdr sshdr; 2703 2704 scmd = io_request->scmd; 2705 if (!scmd) 2706 return; 2707 2708 error_info = io_request->error_info; 2709 scsi_status = error_info->status; 2710 host_byte = DID_OK; 2711 2712 switch (error_info->data_out_result) { 2713 case PQI_DATA_IN_OUT_GOOD: 2714 break; 2715 case PQI_DATA_IN_OUT_UNDERFLOW: 2716 xfer_count = 2717 get_unaligned_le32(&error_info->data_out_transferred); 2718 residual_count = scsi_bufflen(scmd) - xfer_count; 2719 scsi_set_resid(scmd, residual_count); 2720 if (xfer_count < scmd->underflow) 2721 host_byte = DID_SOFT_ERROR; 2722 break; 2723 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2724 case PQI_DATA_IN_OUT_ABORTED: 2725 host_byte = DID_ABORT; 2726 break; 2727 case PQI_DATA_IN_OUT_TIMEOUT: 2728 host_byte = DID_TIME_OUT; 2729 break; 2730 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2731 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2732 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2733 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2734 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2735 case PQI_DATA_IN_OUT_ERROR: 2736 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2737 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2738 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2739 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2740 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2741 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2742 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2743 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2744 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2745 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2746 default: 2747 host_byte = DID_ERROR; 2748 break; 2749 } 2750 2751 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2752 if (sense_data_length == 0) 2753 sense_data_length = 2754 get_unaligned_le16(&error_info->response_data_length); 2755 if (sense_data_length) { 2756 if (sense_data_length > sizeof(error_info->data)) 2757 sense_data_length = sizeof(error_info->data); 2758 2759 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2760 scsi_normalize_sense(error_info->data, 2761 sense_data_length, &sshdr) && 2762 sshdr.sense_key == HARDWARE_ERROR && 2763 sshdr.asc == 0x3e) { 2764 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 2765 struct pqi_scsi_dev *device = scmd->device->hostdata; 2766 2767 switch (sshdr.ascq) { 2768 case 0x1: /* LOGICAL UNIT FAILURE */ 2769 if (printk_ratelimit()) 2770 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 2771 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2772 pqi_take_device_offline(scmd->device, "RAID"); 2773 host_byte = DID_NO_CONNECT; 2774 break; 2775 2776 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 2777 if (printk_ratelimit()) 2778 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 2779 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2780 break; 2781 } 2782 } 2783 2784 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2785 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2786 memcpy(scmd->sense_buffer, error_info->data, 2787 sense_data_length); 2788 } 2789 2790 scmd->result = scsi_status; 2791 set_host_byte(scmd, host_byte); 2792 } 2793 2794 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2795 { 2796 u8 scsi_status; 2797 u8 host_byte; 2798 struct scsi_cmnd *scmd; 2799 struct pqi_aio_error_info *error_info; 2800 size_t sense_data_length; 2801 int residual_count; 2802 int xfer_count; 2803 bool device_offline; 2804 2805 scmd = io_request->scmd; 2806 error_info = io_request->error_info; 2807 host_byte = DID_OK; 2808 sense_data_length = 0; 2809 device_offline = false; 2810 2811 switch (error_info->service_response) { 2812 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2813 scsi_status = error_info->status; 2814 break; 2815 case PQI_AIO_SERV_RESPONSE_FAILURE: 2816 switch (error_info->status) { 2817 case PQI_AIO_STATUS_IO_ABORTED: 2818 scsi_status = SAM_STAT_TASK_ABORTED; 2819 break; 2820 case PQI_AIO_STATUS_UNDERRUN: 2821 scsi_status = SAM_STAT_GOOD; 2822 residual_count = get_unaligned_le32( 2823 &error_info->residual_count); 2824 scsi_set_resid(scmd, residual_count); 2825 xfer_count = scsi_bufflen(scmd) - residual_count; 2826 if (xfer_count < scmd->underflow) 2827 host_byte = DID_SOFT_ERROR; 2828 break; 2829 case PQI_AIO_STATUS_OVERRUN: 2830 scsi_status = SAM_STAT_GOOD; 2831 break; 2832 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2833 pqi_aio_path_disabled(io_request); 2834 scsi_status = SAM_STAT_GOOD; 2835 io_request->status = -EAGAIN; 2836 break; 2837 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2838 case PQI_AIO_STATUS_INVALID_DEVICE: 2839 if (!io_request->raid_bypass) { 2840 device_offline = true; 2841 pqi_take_device_offline(scmd->device, "AIO"); 2842 host_byte = DID_NO_CONNECT; 2843 } 2844 scsi_status = SAM_STAT_CHECK_CONDITION; 2845 break; 2846 case PQI_AIO_STATUS_IO_ERROR: 2847 default: 2848 scsi_status = SAM_STAT_CHECK_CONDITION; 2849 break; 2850 } 2851 break; 2852 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2853 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2854 scsi_status = SAM_STAT_GOOD; 2855 break; 2856 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2857 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2858 default: 2859 scsi_status = SAM_STAT_CHECK_CONDITION; 2860 break; 2861 } 2862 2863 if (error_info->data_present) { 2864 sense_data_length = 2865 get_unaligned_le16(&error_info->data_length); 2866 if (sense_data_length) { 2867 if (sense_data_length > sizeof(error_info->data)) 2868 sense_data_length = sizeof(error_info->data); 2869 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2870 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2871 memcpy(scmd->sense_buffer, error_info->data, 2872 sense_data_length); 2873 } 2874 } 2875 2876 if (device_offline && sense_data_length == 0) 2877 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2878 0x3e, 0x1); 2879 2880 scmd->result = scsi_status; 2881 set_host_byte(scmd, host_byte); 2882 } 2883 2884 static void pqi_process_io_error(unsigned int iu_type, 2885 struct pqi_io_request *io_request) 2886 { 2887 switch (iu_type) { 2888 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2889 pqi_process_raid_io_error(io_request); 2890 break; 2891 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2892 pqi_process_aio_io_error(io_request); 2893 break; 2894 } 2895 } 2896 2897 static int pqi_interpret_task_management_response( 2898 struct pqi_task_management_response *response) 2899 { 2900 int rc; 2901 2902 switch (response->response_code) { 2903 case SOP_TMF_COMPLETE: 2904 case SOP_TMF_FUNCTION_SUCCEEDED: 2905 rc = 0; 2906 break; 2907 case SOP_TMF_REJECTED: 2908 rc = -EAGAIN; 2909 break; 2910 default: 2911 rc = -EIO; 2912 break; 2913 } 2914 2915 return rc; 2916 } 2917 2918 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2919 struct pqi_queue_group *queue_group) 2920 { 2921 unsigned int num_responses; 2922 pqi_index_t oq_pi; 2923 pqi_index_t oq_ci; 2924 struct pqi_io_request *io_request; 2925 struct pqi_io_response *response; 2926 u16 request_id; 2927 2928 num_responses = 0; 2929 oq_ci = queue_group->oq_ci_copy; 2930 2931 while (1) { 2932 oq_pi = readl(queue_group->oq_pi); 2933 if (oq_pi == oq_ci) 2934 break; 2935 2936 num_responses++; 2937 response = queue_group->oq_element_array + 2938 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2939 2940 request_id = get_unaligned_le16(&response->request_id); 2941 WARN_ON(request_id >= ctrl_info->max_io_slots); 2942 2943 io_request = &ctrl_info->io_request_pool[request_id]; 2944 WARN_ON(atomic_read(&io_request->refcount) == 0); 2945 2946 switch (response->header.iu_type) { 2947 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2948 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2949 if (io_request->scmd) 2950 io_request->scmd->result = 0; 2951 fallthrough; 2952 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2953 break; 2954 case PQI_RESPONSE_IU_VENDOR_GENERAL: 2955 io_request->status = 2956 get_unaligned_le16( 2957 &((struct pqi_vendor_general_response *) 2958 response)->status); 2959 break; 2960 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2961 io_request->status = 2962 pqi_interpret_task_management_response( 2963 (void *)response); 2964 break; 2965 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2966 pqi_aio_path_disabled(io_request); 2967 io_request->status = -EAGAIN; 2968 break; 2969 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2970 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2971 io_request->error_info = ctrl_info->error_buffer + 2972 (get_unaligned_le16(&response->error_index) * 2973 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2974 pqi_process_io_error(response->header.iu_type, 2975 io_request); 2976 break; 2977 default: 2978 dev_err(&ctrl_info->pci_dev->dev, 2979 "unexpected IU type: 0x%x\n", 2980 response->header.iu_type); 2981 break; 2982 } 2983 2984 io_request->io_complete_callback(io_request, 2985 io_request->context); 2986 2987 /* 2988 * Note that the I/O request structure CANNOT BE TOUCHED after 2989 * returning from the I/O completion callback! 2990 */ 2991 2992 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2993 } 2994 2995 if (num_responses) { 2996 queue_group->oq_ci_copy = oq_ci; 2997 writel(oq_ci, queue_group->oq_ci); 2998 } 2999 3000 return num_responses; 3001 } 3002 3003 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3004 unsigned int ci, unsigned int elements_in_queue) 3005 { 3006 unsigned int num_elements_used; 3007 3008 if (pi >= ci) 3009 num_elements_used = pi - ci; 3010 else 3011 num_elements_used = elements_in_queue - ci + pi; 3012 3013 return elements_in_queue - num_elements_used - 1; 3014 } 3015 3016 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3017 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3018 { 3019 pqi_index_t iq_pi; 3020 pqi_index_t iq_ci; 3021 unsigned long flags; 3022 void *next_element; 3023 struct pqi_queue_group *queue_group; 3024 3025 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3026 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3027 3028 while (1) { 3029 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3030 3031 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3032 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3033 3034 if (pqi_num_elements_free(iq_pi, iq_ci, 3035 ctrl_info->num_elements_per_iq)) 3036 break; 3037 3038 spin_unlock_irqrestore( 3039 &queue_group->submit_lock[RAID_PATH], flags); 3040 3041 if (pqi_ctrl_offline(ctrl_info)) 3042 return; 3043 } 3044 3045 next_element = queue_group->iq_element_array[RAID_PATH] + 3046 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3047 3048 memcpy(next_element, iu, iu_length); 3049 3050 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3051 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3052 3053 /* 3054 * This write notifies the controller that an IU is available to be 3055 * processed. 3056 */ 3057 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3058 3059 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3060 } 3061 3062 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3063 struct pqi_event *event) 3064 { 3065 struct pqi_event_acknowledge_request request; 3066 3067 memset(&request, 0, sizeof(request)); 3068 3069 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3070 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3071 &request.header.iu_length); 3072 request.event_type = event->event_type; 3073 request.event_id = event->event_id; 3074 request.additional_event_id = event->additional_event_id; 3075 3076 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3077 } 3078 3079 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3080 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3081 3082 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3083 struct pqi_ctrl_info *ctrl_info) 3084 { 3085 unsigned long timeout; 3086 u8 status; 3087 3088 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 3089 3090 while (1) { 3091 status = pqi_read_soft_reset_status(ctrl_info); 3092 if (status & PQI_SOFT_RESET_INITIATE) 3093 return RESET_INITIATE_DRIVER; 3094 3095 if (status & PQI_SOFT_RESET_ABORT) 3096 return RESET_ABORT; 3097 3098 if (time_after(jiffies, timeout)) { 3099 dev_err(&ctrl_info->pci_dev->dev, 3100 "timed out waiting for soft reset status\n"); 3101 return RESET_TIMEDOUT; 3102 } 3103 3104 if (!sis_is_firmware_running(ctrl_info)) 3105 return RESET_NORESPONSE; 3106 3107 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3108 } 3109 } 3110 3111 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, 3112 enum pqi_soft_reset_status reset_status) 3113 { 3114 int rc; 3115 3116 switch (reset_status) { 3117 case RESET_INITIATE_DRIVER: 3118 case RESET_TIMEDOUT: 3119 dev_info(&ctrl_info->pci_dev->dev, 3120 "resetting controller %u\n", ctrl_info->ctrl_id); 3121 sis_soft_reset(ctrl_info); 3122 fallthrough; 3123 case RESET_INITIATE_FIRMWARE: 3124 rc = pqi_ofa_ctrl_restart(ctrl_info); 3125 pqi_ofa_free_host_buffer(ctrl_info); 3126 dev_info(&ctrl_info->pci_dev->dev, 3127 "Online Firmware Activation for controller %u: %s\n", 3128 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); 3129 break; 3130 case RESET_ABORT: 3131 pqi_ofa_ctrl_unquiesce(ctrl_info); 3132 dev_info(&ctrl_info->pci_dev->dev, 3133 "Online Firmware Activation for controller %u: %s\n", 3134 ctrl_info->ctrl_id, "ABORTED"); 3135 break; 3136 case RESET_NORESPONSE: 3137 pqi_ofa_free_host_buffer(ctrl_info); 3138 pqi_take_ctrl_offline(ctrl_info); 3139 break; 3140 } 3141 } 3142 3143 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3144 struct pqi_event *event) 3145 { 3146 u16 event_id; 3147 enum pqi_soft_reset_status status; 3148 3149 event_id = get_unaligned_le16(&event->event_id); 3150 3151 mutex_lock(&ctrl_info->ofa_mutex); 3152 3153 if (event_id == PQI_EVENT_OFA_QUIESCE) { 3154 dev_info(&ctrl_info->pci_dev->dev, 3155 "Received Online Firmware Activation quiesce event for controller %u\n", 3156 ctrl_info->ctrl_id); 3157 pqi_ofa_ctrl_quiesce(ctrl_info); 3158 pqi_acknowledge_event(ctrl_info, event); 3159 if (ctrl_info->soft_reset_handshake_supported) { 3160 status = pqi_poll_for_soft_reset_status(ctrl_info); 3161 pqi_process_soft_reset(ctrl_info, status); 3162 } else { 3163 pqi_process_soft_reset(ctrl_info, 3164 RESET_INITIATE_FIRMWARE); 3165 } 3166 3167 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3168 pqi_acknowledge_event(ctrl_info, event); 3169 pqi_ofa_setup_host_buffer(ctrl_info, 3170 le32_to_cpu(event->ofa_bytes_requested)); 3171 pqi_ofa_host_memory_update(ctrl_info); 3172 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3173 pqi_ofa_free_host_buffer(ctrl_info); 3174 pqi_acknowledge_event(ctrl_info, event); 3175 dev_info(&ctrl_info->pci_dev->dev, 3176 "Online Firmware Activation(%u) cancel reason : %u\n", 3177 ctrl_info->ctrl_id, event->ofa_cancel_reason); 3178 } 3179 3180 mutex_unlock(&ctrl_info->ofa_mutex); 3181 } 3182 3183 static void pqi_event_worker(struct work_struct *work) 3184 { 3185 unsigned int i; 3186 struct pqi_ctrl_info *ctrl_info; 3187 struct pqi_event *event; 3188 3189 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3190 3191 pqi_ctrl_busy(ctrl_info); 3192 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 3193 if (pqi_ctrl_offline(ctrl_info)) 3194 goto out; 3195 3196 pqi_schedule_rescan_worker_delayed(ctrl_info); 3197 3198 event = ctrl_info->events; 3199 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3200 if (event->pending) { 3201 event->pending = false; 3202 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3203 pqi_ctrl_unbusy(ctrl_info); 3204 pqi_ofa_process_event(ctrl_info, event); 3205 return; 3206 } 3207 pqi_acknowledge_event(ctrl_info, event); 3208 } 3209 event++; 3210 } 3211 3212 out: 3213 pqi_ctrl_unbusy(ctrl_info); 3214 } 3215 3216 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) 3217 3218 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3219 { 3220 int num_interrupts; 3221 u32 heartbeat_count; 3222 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 3223 heartbeat_timer); 3224 3225 pqi_check_ctrl_health(ctrl_info); 3226 if (pqi_ctrl_offline(ctrl_info)) 3227 return; 3228 3229 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3230 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3231 3232 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3233 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3234 dev_err(&ctrl_info->pci_dev->dev, 3235 "no heartbeat detected - last heartbeat count: %u\n", 3236 heartbeat_count); 3237 pqi_take_ctrl_offline(ctrl_info); 3238 return; 3239 } 3240 } else { 3241 ctrl_info->previous_num_interrupts = num_interrupts; 3242 } 3243 3244 ctrl_info->previous_heartbeat_count = heartbeat_count; 3245 mod_timer(&ctrl_info->heartbeat_timer, 3246 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3247 } 3248 3249 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3250 { 3251 if (!ctrl_info->heartbeat_counter) 3252 return; 3253 3254 ctrl_info->previous_num_interrupts = 3255 atomic_read(&ctrl_info->num_interrupts); 3256 ctrl_info->previous_heartbeat_count = 3257 pqi_read_heartbeat_counter(ctrl_info); 3258 3259 ctrl_info->heartbeat_timer.expires = 3260 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3261 add_timer(&ctrl_info->heartbeat_timer); 3262 } 3263 3264 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3265 { 3266 del_timer_sync(&ctrl_info->heartbeat_timer); 3267 } 3268 3269 static inline int pqi_event_type_to_event_index(unsigned int event_type) 3270 { 3271 int index; 3272 3273 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 3274 if (event_type == pqi_supported_event_types[index]) 3275 return index; 3276 3277 return -1; 3278 } 3279 3280 static inline bool pqi_is_supported_event(unsigned int event_type) 3281 { 3282 return pqi_event_type_to_event_index(event_type) != -1; 3283 } 3284 3285 static void pqi_ofa_capture_event_payload(struct pqi_event *event, 3286 struct pqi_event_response *response) 3287 { 3288 u16 event_id; 3289 3290 event_id = get_unaligned_le16(&event->event_id); 3291 3292 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3293 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3294 event->ofa_bytes_requested = 3295 response->data.ofa_memory_allocation.bytes_requested; 3296 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3297 event->ofa_cancel_reason = 3298 response->data.ofa_cancelled.reason; 3299 } 3300 } 3301 } 3302 3303 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3304 { 3305 unsigned int num_events; 3306 pqi_index_t oq_pi; 3307 pqi_index_t oq_ci; 3308 struct pqi_event_queue *event_queue; 3309 struct pqi_event_response *response; 3310 struct pqi_event *event; 3311 int event_index; 3312 3313 event_queue = &ctrl_info->event_queue; 3314 num_events = 0; 3315 oq_ci = event_queue->oq_ci_copy; 3316 3317 while (1) { 3318 oq_pi = readl(event_queue->oq_pi); 3319 if (oq_pi == oq_ci) 3320 break; 3321 3322 num_events++; 3323 response = event_queue->oq_element_array + 3324 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3325 3326 event_index = 3327 pqi_event_type_to_event_index(response->event_type); 3328 3329 if (event_index >= 0) { 3330 if (response->request_acknowlege) { 3331 event = &ctrl_info->events[event_index]; 3332 event->pending = true; 3333 event->event_type = response->event_type; 3334 event->event_id = response->event_id; 3335 event->additional_event_id = 3336 response->additional_event_id; 3337 pqi_ofa_capture_event_payload(event, response); 3338 } 3339 } 3340 3341 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3342 } 3343 3344 if (num_events) { 3345 event_queue->oq_ci_copy = oq_ci; 3346 writel(oq_ci, event_queue->oq_ci); 3347 schedule_work(&ctrl_info->event_work); 3348 } 3349 3350 return num_events; 3351 } 3352 3353 #define PQI_LEGACY_INTX_MASK 0x1 3354 3355 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 3356 bool enable_intx) 3357 { 3358 u32 intx_mask; 3359 struct pqi_device_registers __iomem *pqi_registers; 3360 volatile void __iomem *register_addr; 3361 3362 pqi_registers = ctrl_info->pqi_registers; 3363 3364 if (enable_intx) 3365 register_addr = &pqi_registers->legacy_intx_mask_clear; 3366 else 3367 register_addr = &pqi_registers->legacy_intx_mask_set; 3368 3369 intx_mask = readl(register_addr); 3370 intx_mask |= PQI_LEGACY_INTX_MASK; 3371 writel(intx_mask, register_addr); 3372 } 3373 3374 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3375 enum pqi_irq_mode new_mode) 3376 { 3377 switch (ctrl_info->irq_mode) { 3378 case IRQ_MODE_MSIX: 3379 switch (new_mode) { 3380 case IRQ_MODE_MSIX: 3381 break; 3382 case IRQ_MODE_INTX: 3383 pqi_configure_legacy_intx(ctrl_info, true); 3384 sis_enable_intx(ctrl_info); 3385 break; 3386 case IRQ_MODE_NONE: 3387 break; 3388 } 3389 break; 3390 case IRQ_MODE_INTX: 3391 switch (new_mode) { 3392 case IRQ_MODE_MSIX: 3393 pqi_configure_legacy_intx(ctrl_info, false); 3394 sis_enable_msix(ctrl_info); 3395 break; 3396 case IRQ_MODE_INTX: 3397 break; 3398 case IRQ_MODE_NONE: 3399 pqi_configure_legacy_intx(ctrl_info, false); 3400 break; 3401 } 3402 break; 3403 case IRQ_MODE_NONE: 3404 switch (new_mode) { 3405 case IRQ_MODE_MSIX: 3406 sis_enable_msix(ctrl_info); 3407 break; 3408 case IRQ_MODE_INTX: 3409 pqi_configure_legacy_intx(ctrl_info, true); 3410 sis_enable_intx(ctrl_info); 3411 break; 3412 case IRQ_MODE_NONE: 3413 break; 3414 } 3415 break; 3416 } 3417 3418 ctrl_info->irq_mode = new_mode; 3419 } 3420 3421 #define PQI_LEGACY_INTX_PENDING 0x1 3422 3423 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3424 { 3425 bool valid_irq; 3426 u32 intx_status; 3427 3428 switch (ctrl_info->irq_mode) { 3429 case IRQ_MODE_MSIX: 3430 valid_irq = true; 3431 break; 3432 case IRQ_MODE_INTX: 3433 intx_status = 3434 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3435 if (intx_status & PQI_LEGACY_INTX_PENDING) 3436 valid_irq = true; 3437 else 3438 valid_irq = false; 3439 break; 3440 case IRQ_MODE_NONE: 3441 default: 3442 valid_irq = false; 3443 break; 3444 } 3445 3446 return valid_irq; 3447 } 3448 3449 static irqreturn_t pqi_irq_handler(int irq, void *data) 3450 { 3451 struct pqi_ctrl_info *ctrl_info; 3452 struct pqi_queue_group *queue_group; 3453 unsigned int num_responses_handled; 3454 3455 queue_group = data; 3456 ctrl_info = queue_group->ctrl_info; 3457 3458 if (!pqi_is_valid_irq(ctrl_info)) 3459 return IRQ_NONE; 3460 3461 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3462 3463 if (irq == ctrl_info->event_irq) 3464 num_responses_handled += pqi_process_event_intr(ctrl_info); 3465 3466 if (num_responses_handled) 3467 atomic_inc(&ctrl_info->num_interrupts); 3468 3469 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3470 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3471 3472 return IRQ_HANDLED; 3473 } 3474 3475 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3476 { 3477 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3478 int i; 3479 int rc; 3480 3481 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3482 3483 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3484 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3485 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3486 if (rc) { 3487 dev_err(&pci_dev->dev, 3488 "irq %u init failed with error %d\n", 3489 pci_irq_vector(pci_dev, i), rc); 3490 return rc; 3491 } 3492 ctrl_info->num_msix_vectors_initialized++; 3493 } 3494 3495 return 0; 3496 } 3497 3498 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3499 { 3500 int i; 3501 3502 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3503 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3504 &ctrl_info->queue_groups[i]); 3505 3506 ctrl_info->num_msix_vectors_initialized = 0; 3507 } 3508 3509 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3510 { 3511 int num_vectors_enabled; 3512 3513 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3514 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3515 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3516 if (num_vectors_enabled < 0) { 3517 dev_err(&ctrl_info->pci_dev->dev, 3518 "MSI-X init failed with error %d\n", 3519 num_vectors_enabled); 3520 return num_vectors_enabled; 3521 } 3522 3523 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3524 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3525 return 0; 3526 } 3527 3528 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3529 { 3530 if (ctrl_info->num_msix_vectors_enabled) { 3531 pci_free_irq_vectors(ctrl_info->pci_dev); 3532 ctrl_info->num_msix_vectors_enabled = 0; 3533 } 3534 } 3535 3536 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3537 { 3538 unsigned int i; 3539 size_t alloc_length; 3540 size_t element_array_length_per_iq; 3541 size_t element_array_length_per_oq; 3542 void *element_array; 3543 void __iomem *next_queue_index; 3544 void *aligned_pointer; 3545 unsigned int num_inbound_queues; 3546 unsigned int num_outbound_queues; 3547 unsigned int num_queue_indexes; 3548 struct pqi_queue_group *queue_group; 3549 3550 element_array_length_per_iq = 3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3552 ctrl_info->num_elements_per_iq; 3553 element_array_length_per_oq = 3554 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3555 ctrl_info->num_elements_per_oq; 3556 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3557 num_outbound_queues = ctrl_info->num_queue_groups; 3558 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3559 3560 aligned_pointer = NULL; 3561 3562 for (i = 0; i < num_inbound_queues; i++) { 3563 aligned_pointer = PTR_ALIGN(aligned_pointer, 3564 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3565 aligned_pointer += element_array_length_per_iq; 3566 } 3567 3568 for (i = 0; i < num_outbound_queues; i++) { 3569 aligned_pointer = PTR_ALIGN(aligned_pointer, 3570 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3571 aligned_pointer += element_array_length_per_oq; 3572 } 3573 3574 aligned_pointer = PTR_ALIGN(aligned_pointer, 3575 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3576 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3577 PQI_EVENT_OQ_ELEMENT_LENGTH; 3578 3579 for (i = 0; i < num_queue_indexes; i++) { 3580 aligned_pointer = PTR_ALIGN(aligned_pointer, 3581 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3582 aligned_pointer += sizeof(pqi_index_t); 3583 } 3584 3585 alloc_length = (size_t)aligned_pointer + 3586 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3587 3588 alloc_length += PQI_EXTRA_SGL_MEMORY; 3589 3590 ctrl_info->queue_memory_base = 3591 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3592 &ctrl_info->queue_memory_base_dma_handle, 3593 GFP_KERNEL); 3594 3595 if (!ctrl_info->queue_memory_base) 3596 return -ENOMEM; 3597 3598 ctrl_info->queue_memory_length = alloc_length; 3599 3600 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3601 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3602 3603 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3604 queue_group = &ctrl_info->queue_groups[i]; 3605 queue_group->iq_element_array[RAID_PATH] = element_array; 3606 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3607 ctrl_info->queue_memory_base_dma_handle + 3608 (element_array - ctrl_info->queue_memory_base); 3609 element_array += element_array_length_per_iq; 3610 element_array = PTR_ALIGN(element_array, 3611 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3612 queue_group->iq_element_array[AIO_PATH] = element_array; 3613 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3614 ctrl_info->queue_memory_base_dma_handle + 3615 (element_array - ctrl_info->queue_memory_base); 3616 element_array += element_array_length_per_iq; 3617 element_array = PTR_ALIGN(element_array, 3618 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3619 } 3620 3621 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3622 queue_group = &ctrl_info->queue_groups[i]; 3623 queue_group->oq_element_array = element_array; 3624 queue_group->oq_element_array_bus_addr = 3625 ctrl_info->queue_memory_base_dma_handle + 3626 (element_array - ctrl_info->queue_memory_base); 3627 element_array += element_array_length_per_oq; 3628 element_array = PTR_ALIGN(element_array, 3629 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3630 } 3631 3632 ctrl_info->event_queue.oq_element_array = element_array; 3633 ctrl_info->event_queue.oq_element_array_bus_addr = 3634 ctrl_info->queue_memory_base_dma_handle + 3635 (element_array - ctrl_info->queue_memory_base); 3636 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3637 PQI_EVENT_OQ_ELEMENT_LENGTH; 3638 3639 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3640 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3641 3642 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3643 queue_group = &ctrl_info->queue_groups[i]; 3644 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3645 queue_group->iq_ci_bus_addr[RAID_PATH] = 3646 ctrl_info->queue_memory_base_dma_handle + 3647 (next_queue_index - 3648 (void __iomem *)ctrl_info->queue_memory_base); 3649 next_queue_index += sizeof(pqi_index_t); 3650 next_queue_index = PTR_ALIGN(next_queue_index, 3651 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3652 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3653 queue_group->iq_ci_bus_addr[AIO_PATH] = 3654 ctrl_info->queue_memory_base_dma_handle + 3655 (next_queue_index - 3656 (void __iomem *)ctrl_info->queue_memory_base); 3657 next_queue_index += sizeof(pqi_index_t); 3658 next_queue_index = PTR_ALIGN(next_queue_index, 3659 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3660 queue_group->oq_pi = next_queue_index; 3661 queue_group->oq_pi_bus_addr = 3662 ctrl_info->queue_memory_base_dma_handle + 3663 (next_queue_index - 3664 (void __iomem *)ctrl_info->queue_memory_base); 3665 next_queue_index += sizeof(pqi_index_t); 3666 next_queue_index = PTR_ALIGN(next_queue_index, 3667 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3668 } 3669 3670 ctrl_info->event_queue.oq_pi = next_queue_index; 3671 ctrl_info->event_queue.oq_pi_bus_addr = 3672 ctrl_info->queue_memory_base_dma_handle + 3673 (next_queue_index - 3674 (void __iomem *)ctrl_info->queue_memory_base); 3675 3676 return 0; 3677 } 3678 3679 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3680 { 3681 unsigned int i; 3682 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3683 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3684 3685 /* 3686 * Initialize the backpointers to the controller structure in 3687 * each operational queue group structure. 3688 */ 3689 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3690 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3691 3692 /* 3693 * Assign IDs to all operational queues. Note that the IDs 3694 * assigned to operational IQs are independent of the IDs 3695 * assigned to operational OQs. 3696 */ 3697 ctrl_info->event_queue.oq_id = next_oq_id++; 3698 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3699 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3700 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3701 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3702 } 3703 3704 /* 3705 * Assign MSI-X table entry indexes to all queues. Note that the 3706 * interrupt for the event queue is shared with the first queue group. 3707 */ 3708 ctrl_info->event_queue.int_msg_num = 0; 3709 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3710 ctrl_info->queue_groups[i].int_msg_num = i; 3711 3712 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3713 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3715 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3717 } 3718 } 3719 3720 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3721 { 3722 size_t alloc_length; 3723 struct pqi_admin_queues_aligned *admin_queues_aligned; 3724 struct pqi_admin_queues *admin_queues; 3725 3726 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3727 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3728 3729 ctrl_info->admin_queue_memory_base = 3730 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3731 &ctrl_info->admin_queue_memory_base_dma_handle, 3732 GFP_KERNEL); 3733 3734 if (!ctrl_info->admin_queue_memory_base) 3735 return -ENOMEM; 3736 3737 ctrl_info->admin_queue_memory_length = alloc_length; 3738 3739 admin_queues = &ctrl_info->admin_queues; 3740 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3741 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3742 admin_queues->iq_element_array = 3743 &admin_queues_aligned->iq_element_array; 3744 admin_queues->oq_element_array = 3745 &admin_queues_aligned->oq_element_array; 3746 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3747 admin_queues->oq_pi = 3748 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3749 3750 admin_queues->iq_element_array_bus_addr = 3751 ctrl_info->admin_queue_memory_base_dma_handle + 3752 (admin_queues->iq_element_array - 3753 ctrl_info->admin_queue_memory_base); 3754 admin_queues->oq_element_array_bus_addr = 3755 ctrl_info->admin_queue_memory_base_dma_handle + 3756 (admin_queues->oq_element_array - 3757 ctrl_info->admin_queue_memory_base); 3758 admin_queues->iq_ci_bus_addr = 3759 ctrl_info->admin_queue_memory_base_dma_handle + 3760 ((void *)admin_queues->iq_ci - 3761 ctrl_info->admin_queue_memory_base); 3762 admin_queues->oq_pi_bus_addr = 3763 ctrl_info->admin_queue_memory_base_dma_handle + 3764 ((void __iomem *)admin_queues->oq_pi - 3765 (void __iomem *)ctrl_info->admin_queue_memory_base); 3766 3767 return 0; 3768 } 3769 3770 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ 3771 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3772 3773 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3774 { 3775 struct pqi_device_registers __iomem *pqi_registers; 3776 struct pqi_admin_queues *admin_queues; 3777 unsigned long timeout; 3778 u8 status; 3779 u32 reg; 3780 3781 pqi_registers = ctrl_info->pqi_registers; 3782 admin_queues = &ctrl_info->admin_queues; 3783 3784 writeq((u64)admin_queues->iq_element_array_bus_addr, 3785 &pqi_registers->admin_iq_element_array_addr); 3786 writeq((u64)admin_queues->oq_element_array_bus_addr, 3787 &pqi_registers->admin_oq_element_array_addr); 3788 writeq((u64)admin_queues->iq_ci_bus_addr, 3789 &pqi_registers->admin_iq_ci_addr); 3790 writeq((u64)admin_queues->oq_pi_bus_addr, 3791 &pqi_registers->admin_oq_pi_addr); 3792 3793 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3794 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 3795 (admin_queues->int_msg_num << 16); 3796 writel(reg, &pqi_registers->admin_iq_num_elements); 3797 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3798 &pqi_registers->function_and_status_code); 3799 3800 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3801 while (1) { 3802 status = readb(&pqi_registers->function_and_status_code); 3803 if (status == PQI_STATUS_IDLE) 3804 break; 3805 if (time_after(jiffies, timeout)) 3806 return -ETIMEDOUT; 3807 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3808 } 3809 3810 /* 3811 * The offset registers are not initialized to the correct 3812 * offsets until *after* the create admin queue pair command 3813 * completes successfully. 3814 */ 3815 admin_queues->iq_pi = ctrl_info->iomem_base + 3816 PQI_DEVICE_REGISTERS_OFFSET + 3817 readq(&pqi_registers->admin_iq_pi_offset); 3818 admin_queues->oq_ci = ctrl_info->iomem_base + 3819 PQI_DEVICE_REGISTERS_OFFSET + 3820 readq(&pqi_registers->admin_oq_ci_offset); 3821 3822 return 0; 3823 } 3824 3825 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3826 struct pqi_general_admin_request *request) 3827 { 3828 struct pqi_admin_queues *admin_queues; 3829 void *next_element; 3830 pqi_index_t iq_pi; 3831 3832 admin_queues = &ctrl_info->admin_queues; 3833 iq_pi = admin_queues->iq_pi_copy; 3834 3835 next_element = admin_queues->iq_element_array + 3836 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3837 3838 memcpy(next_element, request, sizeof(*request)); 3839 3840 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3841 admin_queues->iq_pi_copy = iq_pi; 3842 3843 /* 3844 * This write notifies the controller that an IU is available to be 3845 * processed. 3846 */ 3847 writel(iq_pi, admin_queues->iq_pi); 3848 } 3849 3850 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3851 3852 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3853 struct pqi_general_admin_response *response) 3854 { 3855 struct pqi_admin_queues *admin_queues; 3856 pqi_index_t oq_pi; 3857 pqi_index_t oq_ci; 3858 unsigned long timeout; 3859 3860 admin_queues = &ctrl_info->admin_queues; 3861 oq_ci = admin_queues->oq_ci_copy; 3862 3863 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; 3864 3865 while (1) { 3866 oq_pi = readl(admin_queues->oq_pi); 3867 if (oq_pi != oq_ci) 3868 break; 3869 if (time_after(jiffies, timeout)) { 3870 dev_err(&ctrl_info->pci_dev->dev, 3871 "timed out waiting for admin response\n"); 3872 return -ETIMEDOUT; 3873 } 3874 if (!sis_is_firmware_running(ctrl_info)) 3875 return -ENXIO; 3876 usleep_range(1000, 2000); 3877 } 3878 3879 memcpy(response, admin_queues->oq_element_array + 3880 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3881 3882 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3883 admin_queues->oq_ci_copy = oq_ci; 3884 writel(oq_ci, admin_queues->oq_ci); 3885 3886 return 0; 3887 } 3888 3889 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3890 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3891 struct pqi_io_request *io_request) 3892 { 3893 struct pqi_io_request *next; 3894 void *next_element; 3895 pqi_index_t iq_pi; 3896 pqi_index_t iq_ci; 3897 size_t iu_length; 3898 unsigned long flags; 3899 unsigned int num_elements_needed; 3900 unsigned int num_elements_to_end_of_queue; 3901 size_t copy_count; 3902 struct pqi_iu_header *request; 3903 3904 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3905 3906 if (io_request) { 3907 io_request->queue_group = queue_group; 3908 list_add_tail(&io_request->request_list_entry, 3909 &queue_group->request_list[path]); 3910 } 3911 3912 iq_pi = queue_group->iq_pi_copy[path]; 3913 3914 list_for_each_entry_safe(io_request, next, 3915 &queue_group->request_list[path], request_list_entry) { 3916 3917 request = io_request->iu; 3918 3919 iu_length = get_unaligned_le16(&request->iu_length) + 3920 PQI_REQUEST_HEADER_LENGTH; 3921 num_elements_needed = 3922 DIV_ROUND_UP(iu_length, 3923 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3924 3925 iq_ci = readl(queue_group->iq_ci[path]); 3926 3927 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3928 ctrl_info->num_elements_per_iq)) 3929 break; 3930 3931 put_unaligned_le16(queue_group->oq_id, 3932 &request->response_queue_id); 3933 3934 next_element = queue_group->iq_element_array[path] + 3935 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3936 3937 num_elements_to_end_of_queue = 3938 ctrl_info->num_elements_per_iq - iq_pi; 3939 3940 if (num_elements_needed <= num_elements_to_end_of_queue) { 3941 memcpy(next_element, request, iu_length); 3942 } else { 3943 copy_count = num_elements_to_end_of_queue * 3944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3945 memcpy(next_element, request, copy_count); 3946 memcpy(queue_group->iq_element_array[path], 3947 (u8 *)request + copy_count, 3948 iu_length - copy_count); 3949 } 3950 3951 iq_pi = (iq_pi + num_elements_needed) % 3952 ctrl_info->num_elements_per_iq; 3953 3954 list_del(&io_request->request_list_entry); 3955 } 3956 3957 if (iq_pi != queue_group->iq_pi_copy[path]) { 3958 queue_group->iq_pi_copy[path] = iq_pi; 3959 /* 3960 * This write notifies the controller that one or more IUs are 3961 * available to be processed. 3962 */ 3963 writel(iq_pi, queue_group->iq_pi[path]); 3964 } 3965 3966 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3967 } 3968 3969 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3970 3971 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3972 struct completion *wait) 3973 { 3974 int rc; 3975 3976 while (1) { 3977 if (wait_for_completion_io_timeout(wait, 3978 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { 3979 rc = 0; 3980 break; 3981 } 3982 3983 pqi_check_ctrl_health(ctrl_info); 3984 if (pqi_ctrl_offline(ctrl_info)) { 3985 rc = -ENXIO; 3986 break; 3987 } 3988 } 3989 3990 return rc; 3991 } 3992 3993 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3994 void *context) 3995 { 3996 struct completion *waiting = context; 3997 3998 complete(waiting); 3999 } 4000 4001 static int pqi_process_raid_io_error_synchronous( 4002 struct pqi_raid_error_info *error_info) 4003 { 4004 int rc = -EIO; 4005 4006 switch (error_info->data_out_result) { 4007 case PQI_DATA_IN_OUT_GOOD: 4008 if (error_info->status == SAM_STAT_GOOD) 4009 rc = 0; 4010 break; 4011 case PQI_DATA_IN_OUT_UNDERFLOW: 4012 if (error_info->status == SAM_STAT_GOOD || 4013 error_info->status == SAM_STAT_CHECK_CONDITION) 4014 rc = 0; 4015 break; 4016 case PQI_DATA_IN_OUT_ABORTED: 4017 rc = PQI_CMD_STATUS_ABORTED; 4018 break; 4019 } 4020 4021 return rc; 4022 } 4023 4024 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4025 struct pqi_iu_header *request, unsigned int flags, 4026 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 4027 { 4028 int rc = 0; 4029 struct pqi_io_request *io_request; 4030 unsigned long start_jiffies; 4031 unsigned long msecs_blocked; 4032 size_t iu_length; 4033 DECLARE_COMPLETION_ONSTACK(wait); 4034 4035 /* 4036 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 4037 * are mutually exclusive. 4038 */ 4039 4040 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4041 if (down_interruptible(&ctrl_info->sync_request_sem)) 4042 return -ERESTARTSYS; 4043 } else { 4044 if (timeout_msecs == NO_TIMEOUT) { 4045 down(&ctrl_info->sync_request_sem); 4046 } else { 4047 start_jiffies = jiffies; 4048 if (down_timeout(&ctrl_info->sync_request_sem, 4049 msecs_to_jiffies(timeout_msecs))) 4050 return -ETIMEDOUT; 4051 msecs_blocked = 4052 jiffies_to_msecs(jiffies - start_jiffies); 4053 if (msecs_blocked >= timeout_msecs) { 4054 rc = -ETIMEDOUT; 4055 goto out; 4056 } 4057 timeout_msecs -= msecs_blocked; 4058 } 4059 } 4060 4061 pqi_ctrl_busy(ctrl_info); 4062 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 4063 if (timeout_msecs == 0) { 4064 pqi_ctrl_unbusy(ctrl_info); 4065 rc = -ETIMEDOUT; 4066 goto out; 4067 } 4068 4069 if (pqi_ctrl_offline(ctrl_info)) { 4070 pqi_ctrl_unbusy(ctrl_info); 4071 rc = -ENXIO; 4072 goto out; 4073 } 4074 4075 atomic_inc(&ctrl_info->sync_cmds_outstanding); 4076 4077 io_request = pqi_alloc_io_request(ctrl_info); 4078 4079 put_unaligned_le16(io_request->index, 4080 &(((struct pqi_raid_path_request *)request)->request_id)); 4081 4082 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4083 ((struct pqi_raid_path_request *)request)->error_index = 4084 ((struct pqi_raid_path_request *)request)->request_id; 4085 4086 iu_length = get_unaligned_le16(&request->iu_length) + 4087 PQI_REQUEST_HEADER_LENGTH; 4088 memcpy(io_request->iu, request, iu_length); 4089 4090 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4091 io_request->context = &wait; 4092 4093 pqi_start_io(ctrl_info, 4094 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4095 io_request); 4096 4097 pqi_ctrl_unbusy(ctrl_info); 4098 4099 if (timeout_msecs == NO_TIMEOUT) { 4100 pqi_wait_for_completion_io(ctrl_info, &wait); 4101 } else { 4102 if (!wait_for_completion_io_timeout(&wait, 4103 msecs_to_jiffies(timeout_msecs))) { 4104 dev_warn(&ctrl_info->pci_dev->dev, 4105 "command timed out\n"); 4106 rc = -ETIMEDOUT; 4107 } 4108 } 4109 4110 if (error_info) { 4111 if (io_request->error_info) 4112 memcpy(error_info, io_request->error_info, 4113 sizeof(*error_info)); 4114 else 4115 memset(error_info, 0, sizeof(*error_info)); 4116 } else if (rc == 0 && io_request->error_info) { 4117 rc = pqi_process_raid_io_error_synchronous( 4118 io_request->error_info); 4119 } 4120 4121 pqi_free_io_request(io_request); 4122 4123 atomic_dec(&ctrl_info->sync_cmds_outstanding); 4124 out: 4125 up(&ctrl_info->sync_request_sem); 4126 4127 return rc; 4128 } 4129 4130 static int pqi_validate_admin_response( 4131 struct pqi_general_admin_response *response, u8 expected_function_code) 4132 { 4133 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4134 return -EINVAL; 4135 4136 if (get_unaligned_le16(&response->header.iu_length) != 4137 PQI_GENERAL_ADMIN_IU_LENGTH) 4138 return -EINVAL; 4139 4140 if (response->function_code != expected_function_code) 4141 return -EINVAL; 4142 4143 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4144 return -EINVAL; 4145 4146 return 0; 4147 } 4148 4149 static int pqi_submit_admin_request_synchronous( 4150 struct pqi_ctrl_info *ctrl_info, 4151 struct pqi_general_admin_request *request, 4152 struct pqi_general_admin_response *response) 4153 { 4154 int rc; 4155 4156 pqi_submit_admin_request(ctrl_info, request); 4157 4158 rc = pqi_poll_for_admin_response(ctrl_info, response); 4159 4160 if (rc == 0) 4161 rc = pqi_validate_admin_response(response, 4162 request->function_code); 4163 4164 return rc; 4165 } 4166 4167 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4168 { 4169 int rc; 4170 struct pqi_general_admin_request request; 4171 struct pqi_general_admin_response response; 4172 struct pqi_device_capability *capability; 4173 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4174 4175 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4176 if (!capability) 4177 return -ENOMEM; 4178 4179 memset(&request, 0, sizeof(request)); 4180 4181 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4182 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4183 &request.header.iu_length); 4184 request.function_code = 4185 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4186 put_unaligned_le32(sizeof(*capability), 4187 &request.data.report_device_capability.buffer_length); 4188 4189 rc = pqi_map_single(ctrl_info->pci_dev, 4190 &request.data.report_device_capability.sg_descriptor, 4191 capability, sizeof(*capability), 4192 DMA_FROM_DEVICE); 4193 if (rc) 4194 goto out; 4195 4196 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4197 &response); 4198 4199 pqi_pci_unmap(ctrl_info->pci_dev, 4200 &request.data.report_device_capability.sg_descriptor, 1, 4201 DMA_FROM_DEVICE); 4202 4203 if (rc) 4204 goto out; 4205 4206 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4207 rc = -EIO; 4208 goto out; 4209 } 4210 4211 ctrl_info->max_inbound_queues = 4212 get_unaligned_le16(&capability->max_inbound_queues); 4213 ctrl_info->max_elements_per_iq = 4214 get_unaligned_le16(&capability->max_elements_per_iq); 4215 ctrl_info->max_iq_element_length = 4216 get_unaligned_le16(&capability->max_iq_element_length) 4217 * 16; 4218 ctrl_info->max_outbound_queues = 4219 get_unaligned_le16(&capability->max_outbound_queues); 4220 ctrl_info->max_elements_per_oq = 4221 get_unaligned_le16(&capability->max_elements_per_oq); 4222 ctrl_info->max_oq_element_length = 4223 get_unaligned_le16(&capability->max_oq_element_length) 4224 * 16; 4225 4226 sop_iu_layer_descriptor = 4227 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4228 4229 ctrl_info->max_inbound_iu_length_per_firmware = 4230 get_unaligned_le16( 4231 &sop_iu_layer_descriptor->max_inbound_iu_length); 4232 ctrl_info->inbound_spanning_supported = 4233 sop_iu_layer_descriptor->inbound_spanning_supported; 4234 ctrl_info->outbound_spanning_supported = 4235 sop_iu_layer_descriptor->outbound_spanning_supported; 4236 4237 out: 4238 kfree(capability); 4239 4240 return rc; 4241 } 4242 4243 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4244 { 4245 if (ctrl_info->max_iq_element_length < 4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4247 dev_err(&ctrl_info->pci_dev->dev, 4248 "max. inbound queue element length of %d is less than the required length of %d\n", 4249 ctrl_info->max_iq_element_length, 4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4251 return -EINVAL; 4252 } 4253 4254 if (ctrl_info->max_oq_element_length < 4255 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4256 dev_err(&ctrl_info->pci_dev->dev, 4257 "max. outbound queue element length of %d is less than the required length of %d\n", 4258 ctrl_info->max_oq_element_length, 4259 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4260 return -EINVAL; 4261 } 4262 4263 if (ctrl_info->max_inbound_iu_length_per_firmware < 4264 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4265 dev_err(&ctrl_info->pci_dev->dev, 4266 "max. inbound IU length of %u is less than the min. required length of %d\n", 4267 ctrl_info->max_inbound_iu_length_per_firmware, 4268 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4269 return -EINVAL; 4270 } 4271 4272 if (!ctrl_info->inbound_spanning_supported) { 4273 dev_err(&ctrl_info->pci_dev->dev, 4274 "the controller does not support inbound spanning\n"); 4275 return -EINVAL; 4276 } 4277 4278 if (ctrl_info->outbound_spanning_supported) { 4279 dev_err(&ctrl_info->pci_dev->dev, 4280 "the controller supports outbound spanning but this driver does not\n"); 4281 return -EINVAL; 4282 } 4283 4284 return 0; 4285 } 4286 4287 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4288 { 4289 int rc; 4290 struct pqi_event_queue *event_queue; 4291 struct pqi_general_admin_request request; 4292 struct pqi_general_admin_response response; 4293 4294 event_queue = &ctrl_info->event_queue; 4295 4296 /* 4297 * Create OQ (Outbound Queue - device to host queue) to dedicate 4298 * to events. 4299 */ 4300 memset(&request, 0, sizeof(request)); 4301 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4302 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4303 &request.header.iu_length); 4304 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4305 put_unaligned_le16(event_queue->oq_id, 4306 &request.data.create_operational_oq.queue_id); 4307 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4308 &request.data.create_operational_oq.element_array_addr); 4309 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4310 &request.data.create_operational_oq.pi_addr); 4311 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4312 &request.data.create_operational_oq.num_elements); 4313 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4314 &request.data.create_operational_oq.element_length); 4315 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4316 put_unaligned_le16(event_queue->int_msg_num, 4317 &request.data.create_operational_oq.int_msg_num); 4318 4319 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4320 &response); 4321 if (rc) 4322 return rc; 4323 4324 event_queue->oq_ci = ctrl_info->iomem_base + 4325 PQI_DEVICE_REGISTERS_OFFSET + 4326 get_unaligned_le64( 4327 &response.data.create_operational_oq.oq_ci_offset); 4328 4329 return 0; 4330 } 4331 4332 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4333 unsigned int group_number) 4334 { 4335 int rc; 4336 struct pqi_queue_group *queue_group; 4337 struct pqi_general_admin_request request; 4338 struct pqi_general_admin_response response; 4339 4340 queue_group = &ctrl_info->queue_groups[group_number]; 4341 4342 /* 4343 * Create IQ (Inbound Queue - host to device queue) for 4344 * RAID path. 4345 */ 4346 memset(&request, 0, sizeof(request)); 4347 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4348 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4349 &request.header.iu_length); 4350 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4351 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4352 &request.data.create_operational_iq.queue_id); 4353 put_unaligned_le64( 4354 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4355 &request.data.create_operational_iq.element_array_addr); 4356 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4357 &request.data.create_operational_iq.ci_addr); 4358 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4359 &request.data.create_operational_iq.num_elements); 4360 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4361 &request.data.create_operational_iq.element_length); 4362 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4363 4364 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4365 &response); 4366 if (rc) { 4367 dev_err(&ctrl_info->pci_dev->dev, 4368 "error creating inbound RAID queue\n"); 4369 return rc; 4370 } 4371 4372 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4373 PQI_DEVICE_REGISTERS_OFFSET + 4374 get_unaligned_le64( 4375 &response.data.create_operational_iq.iq_pi_offset); 4376 4377 /* 4378 * Create IQ (Inbound Queue - host to device queue) for 4379 * Advanced I/O (AIO) path. 4380 */ 4381 memset(&request, 0, sizeof(request)); 4382 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4383 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4384 &request.header.iu_length); 4385 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4386 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4387 &request.data.create_operational_iq.queue_id); 4388 put_unaligned_le64((u64)queue_group-> 4389 iq_element_array_bus_addr[AIO_PATH], 4390 &request.data.create_operational_iq.element_array_addr); 4391 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4392 &request.data.create_operational_iq.ci_addr); 4393 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4394 &request.data.create_operational_iq.num_elements); 4395 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4396 &request.data.create_operational_iq.element_length); 4397 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4398 4399 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4400 &response); 4401 if (rc) { 4402 dev_err(&ctrl_info->pci_dev->dev, 4403 "error creating inbound AIO queue\n"); 4404 return rc; 4405 } 4406 4407 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4408 PQI_DEVICE_REGISTERS_OFFSET + 4409 get_unaligned_le64( 4410 &response.data.create_operational_iq.iq_pi_offset); 4411 4412 /* 4413 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4414 * assumed to be for RAID path I/O unless we change the queue's 4415 * property. 4416 */ 4417 memset(&request, 0, sizeof(request)); 4418 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4419 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4420 &request.header.iu_length); 4421 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4422 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4423 &request.data.change_operational_iq_properties.queue_id); 4424 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4425 &request.data.change_operational_iq_properties.vendor_specific); 4426 4427 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4428 &response); 4429 if (rc) { 4430 dev_err(&ctrl_info->pci_dev->dev, 4431 "error changing queue property\n"); 4432 return rc; 4433 } 4434 4435 /* 4436 * Create OQ (Outbound Queue - device to host queue). 4437 */ 4438 memset(&request, 0, sizeof(request)); 4439 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4440 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4441 &request.header.iu_length); 4442 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4443 put_unaligned_le16(queue_group->oq_id, 4444 &request.data.create_operational_oq.queue_id); 4445 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4446 &request.data.create_operational_oq.element_array_addr); 4447 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4448 &request.data.create_operational_oq.pi_addr); 4449 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4450 &request.data.create_operational_oq.num_elements); 4451 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4452 &request.data.create_operational_oq.element_length); 4453 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4454 put_unaligned_le16(queue_group->int_msg_num, 4455 &request.data.create_operational_oq.int_msg_num); 4456 4457 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4458 &response); 4459 if (rc) { 4460 dev_err(&ctrl_info->pci_dev->dev, 4461 "error creating outbound queue\n"); 4462 return rc; 4463 } 4464 4465 queue_group->oq_ci = ctrl_info->iomem_base + 4466 PQI_DEVICE_REGISTERS_OFFSET + 4467 get_unaligned_le64( 4468 &response.data.create_operational_oq.oq_ci_offset); 4469 4470 return 0; 4471 } 4472 4473 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4474 { 4475 int rc; 4476 unsigned int i; 4477 4478 rc = pqi_create_event_queue(ctrl_info); 4479 if (rc) { 4480 dev_err(&ctrl_info->pci_dev->dev, 4481 "error creating event queue\n"); 4482 return rc; 4483 } 4484 4485 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4486 rc = pqi_create_queue_group(ctrl_info, i); 4487 if (rc) { 4488 dev_err(&ctrl_info->pci_dev->dev, 4489 "error creating queue group number %u/%u\n", 4490 i, ctrl_info->num_queue_groups); 4491 return rc; 4492 } 4493 } 4494 4495 return 0; 4496 } 4497 4498 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4499 (offsetof(struct pqi_event_config, descriptors) + \ 4500 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4501 4502 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4503 bool enable_events) 4504 { 4505 int rc; 4506 unsigned int i; 4507 struct pqi_event_config *event_config; 4508 struct pqi_event_descriptor *event_descriptor; 4509 struct pqi_general_management_request request; 4510 4511 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4512 GFP_KERNEL); 4513 if (!event_config) 4514 return -ENOMEM; 4515 4516 memset(&request, 0, sizeof(request)); 4517 4518 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4519 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4520 data.report_event_configuration.sg_descriptors[1]) - 4521 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4522 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4523 &request.data.report_event_configuration.buffer_length); 4524 4525 rc = pqi_map_single(ctrl_info->pci_dev, 4526 request.data.report_event_configuration.sg_descriptors, 4527 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4528 DMA_FROM_DEVICE); 4529 if (rc) 4530 goto out; 4531 4532 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4533 0, NULL, NO_TIMEOUT); 4534 4535 pqi_pci_unmap(ctrl_info->pci_dev, 4536 request.data.report_event_configuration.sg_descriptors, 1, 4537 DMA_FROM_DEVICE); 4538 4539 if (rc) 4540 goto out; 4541 4542 for (i = 0; i < event_config->num_event_descriptors; i++) { 4543 event_descriptor = &event_config->descriptors[i]; 4544 if (enable_events && 4545 pqi_is_supported_event(event_descriptor->event_type)) 4546 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4547 &event_descriptor->oq_id); 4548 else 4549 put_unaligned_le16(0, &event_descriptor->oq_id); 4550 } 4551 4552 memset(&request, 0, sizeof(request)); 4553 4554 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4555 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4556 data.report_event_configuration.sg_descriptors[1]) - 4557 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4558 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4559 &request.data.report_event_configuration.buffer_length); 4560 4561 rc = pqi_map_single(ctrl_info->pci_dev, 4562 request.data.report_event_configuration.sg_descriptors, 4563 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4564 DMA_TO_DEVICE); 4565 if (rc) 4566 goto out; 4567 4568 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4569 NULL, NO_TIMEOUT); 4570 4571 pqi_pci_unmap(ctrl_info->pci_dev, 4572 request.data.report_event_configuration.sg_descriptors, 1, 4573 DMA_TO_DEVICE); 4574 4575 out: 4576 kfree(event_config); 4577 4578 return rc; 4579 } 4580 4581 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4582 { 4583 return pqi_configure_events(ctrl_info, true); 4584 } 4585 4586 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4587 { 4588 return pqi_configure_events(ctrl_info, false); 4589 } 4590 4591 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4592 { 4593 unsigned int i; 4594 struct device *dev; 4595 size_t sg_chain_buffer_length; 4596 struct pqi_io_request *io_request; 4597 4598 if (!ctrl_info->io_request_pool) 4599 return; 4600 4601 dev = &ctrl_info->pci_dev->dev; 4602 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4603 io_request = ctrl_info->io_request_pool; 4604 4605 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4606 kfree(io_request->iu); 4607 if (!io_request->sg_chain_buffer) 4608 break; 4609 dma_free_coherent(dev, sg_chain_buffer_length, 4610 io_request->sg_chain_buffer, 4611 io_request->sg_chain_buffer_dma_handle); 4612 io_request++; 4613 } 4614 4615 kfree(ctrl_info->io_request_pool); 4616 ctrl_info->io_request_pool = NULL; 4617 } 4618 4619 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4620 { 4621 4622 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4623 ctrl_info->error_buffer_length, 4624 &ctrl_info->error_buffer_dma_handle, 4625 GFP_KERNEL); 4626 if (!ctrl_info->error_buffer) 4627 return -ENOMEM; 4628 4629 return 0; 4630 } 4631 4632 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4633 { 4634 unsigned int i; 4635 void *sg_chain_buffer; 4636 size_t sg_chain_buffer_length; 4637 dma_addr_t sg_chain_buffer_dma_handle; 4638 struct device *dev; 4639 struct pqi_io_request *io_request; 4640 4641 ctrl_info->io_request_pool = 4642 kcalloc(ctrl_info->max_io_slots, 4643 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4644 4645 if (!ctrl_info->io_request_pool) { 4646 dev_err(&ctrl_info->pci_dev->dev, 4647 "failed to allocate I/O request pool\n"); 4648 goto error; 4649 } 4650 4651 dev = &ctrl_info->pci_dev->dev; 4652 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4653 io_request = ctrl_info->io_request_pool; 4654 4655 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4656 io_request->iu = 4657 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4658 4659 if (!io_request->iu) { 4660 dev_err(&ctrl_info->pci_dev->dev, 4661 "failed to allocate IU buffers\n"); 4662 goto error; 4663 } 4664 4665 sg_chain_buffer = dma_alloc_coherent(dev, 4666 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4667 GFP_KERNEL); 4668 4669 if (!sg_chain_buffer) { 4670 dev_err(&ctrl_info->pci_dev->dev, 4671 "failed to allocate PQI scatter-gather chain buffers\n"); 4672 goto error; 4673 } 4674 4675 io_request->index = i; 4676 io_request->sg_chain_buffer = sg_chain_buffer; 4677 io_request->sg_chain_buffer_dma_handle = 4678 sg_chain_buffer_dma_handle; 4679 io_request++; 4680 } 4681 4682 return 0; 4683 4684 error: 4685 pqi_free_all_io_requests(ctrl_info); 4686 4687 return -ENOMEM; 4688 } 4689 4690 /* 4691 * Calculate required resources that are sized based on max. outstanding 4692 * requests and max. transfer size. 4693 */ 4694 4695 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4696 { 4697 u32 max_transfer_size; 4698 u32 max_sg_entries; 4699 4700 ctrl_info->scsi_ml_can_queue = 4701 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4702 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4703 4704 ctrl_info->error_buffer_length = 4705 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4706 4707 if (reset_devices) 4708 max_transfer_size = min(ctrl_info->max_transfer_size, 4709 PQI_MAX_TRANSFER_SIZE_KDUMP); 4710 else 4711 max_transfer_size = min(ctrl_info->max_transfer_size, 4712 PQI_MAX_TRANSFER_SIZE); 4713 4714 max_sg_entries = max_transfer_size / PAGE_SIZE; 4715 4716 /* +1 to cover when the buffer is not page-aligned. */ 4717 max_sg_entries++; 4718 4719 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4720 4721 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4722 4723 ctrl_info->sg_chain_buffer_length = 4724 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4725 PQI_EXTRA_SGL_MEMORY; 4726 ctrl_info->sg_tablesize = max_sg_entries; 4727 ctrl_info->max_sectors = max_transfer_size / 512; 4728 } 4729 4730 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4731 { 4732 int num_queue_groups; 4733 u16 num_elements_per_iq; 4734 u16 num_elements_per_oq; 4735 4736 if (reset_devices) { 4737 num_queue_groups = 1; 4738 } else { 4739 int num_cpus; 4740 int max_queue_groups; 4741 4742 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4743 ctrl_info->max_outbound_queues - 1); 4744 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4745 4746 num_cpus = num_online_cpus(); 4747 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4748 num_queue_groups = min(num_queue_groups, max_queue_groups); 4749 } 4750 4751 ctrl_info->num_queue_groups = num_queue_groups; 4752 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4753 4754 /* 4755 * Make sure that the max. inbound IU length is an even multiple 4756 * of our inbound element length. 4757 */ 4758 ctrl_info->max_inbound_iu_length = 4759 (ctrl_info->max_inbound_iu_length_per_firmware / 4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4762 4763 num_elements_per_iq = 4764 (ctrl_info->max_inbound_iu_length / 4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4766 4767 /* Add one because one element in each queue is unusable. */ 4768 num_elements_per_iq++; 4769 4770 num_elements_per_iq = min(num_elements_per_iq, 4771 ctrl_info->max_elements_per_iq); 4772 4773 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4774 num_elements_per_oq = min(num_elements_per_oq, 4775 ctrl_info->max_elements_per_oq); 4776 4777 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4778 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4779 4780 ctrl_info->max_sg_per_iu = 4781 ((ctrl_info->max_inbound_iu_length - 4782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4783 sizeof(struct pqi_sg_descriptor)) + 4784 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4785 } 4786 4787 static inline void pqi_set_sg_descriptor( 4788 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4789 { 4790 u64 address = (u64)sg_dma_address(sg); 4791 unsigned int length = sg_dma_len(sg); 4792 4793 put_unaligned_le64(address, &sg_descriptor->address); 4794 put_unaligned_le32(length, &sg_descriptor->length); 4795 put_unaligned_le32(0, &sg_descriptor->flags); 4796 } 4797 4798 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4799 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4800 struct pqi_io_request *io_request) 4801 { 4802 int i; 4803 u16 iu_length; 4804 int sg_count; 4805 bool chained; 4806 unsigned int num_sg_in_iu; 4807 unsigned int max_sg_per_iu; 4808 struct scatterlist *sg; 4809 struct pqi_sg_descriptor *sg_descriptor; 4810 4811 sg_count = scsi_dma_map(scmd); 4812 if (sg_count < 0) 4813 return sg_count; 4814 4815 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4816 PQI_REQUEST_HEADER_LENGTH; 4817 4818 if (sg_count == 0) 4819 goto out; 4820 4821 sg = scsi_sglist(scmd); 4822 sg_descriptor = request->sg_descriptors; 4823 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4824 chained = false; 4825 num_sg_in_iu = 0; 4826 i = 0; 4827 4828 while (1) { 4829 pqi_set_sg_descriptor(sg_descriptor, sg); 4830 if (!chained) 4831 num_sg_in_iu++; 4832 i++; 4833 if (i == sg_count) 4834 break; 4835 sg_descriptor++; 4836 if (i == max_sg_per_iu) { 4837 put_unaligned_le64( 4838 (u64)io_request->sg_chain_buffer_dma_handle, 4839 &sg_descriptor->address); 4840 put_unaligned_le32((sg_count - num_sg_in_iu) 4841 * sizeof(*sg_descriptor), 4842 &sg_descriptor->length); 4843 put_unaligned_le32(CISS_SG_CHAIN, 4844 &sg_descriptor->flags); 4845 chained = true; 4846 num_sg_in_iu++; 4847 sg_descriptor = io_request->sg_chain_buffer; 4848 } 4849 sg = sg_next(sg); 4850 } 4851 4852 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4853 request->partial = chained; 4854 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4855 4856 out: 4857 put_unaligned_le16(iu_length, &request->header.iu_length); 4858 4859 return 0; 4860 } 4861 4862 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4863 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4864 struct pqi_io_request *io_request) 4865 { 4866 int i; 4867 u16 iu_length; 4868 int sg_count; 4869 bool chained; 4870 unsigned int num_sg_in_iu; 4871 unsigned int max_sg_per_iu; 4872 struct scatterlist *sg; 4873 struct pqi_sg_descriptor *sg_descriptor; 4874 4875 sg_count = scsi_dma_map(scmd); 4876 if (sg_count < 0) 4877 return sg_count; 4878 4879 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4880 PQI_REQUEST_HEADER_LENGTH; 4881 num_sg_in_iu = 0; 4882 4883 if (sg_count == 0) 4884 goto out; 4885 4886 sg = scsi_sglist(scmd); 4887 sg_descriptor = request->sg_descriptors; 4888 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4889 chained = false; 4890 i = 0; 4891 4892 while (1) { 4893 pqi_set_sg_descriptor(sg_descriptor, sg); 4894 if (!chained) 4895 num_sg_in_iu++; 4896 i++; 4897 if (i == sg_count) 4898 break; 4899 sg_descriptor++; 4900 if (i == max_sg_per_iu) { 4901 put_unaligned_le64( 4902 (u64)io_request->sg_chain_buffer_dma_handle, 4903 &sg_descriptor->address); 4904 put_unaligned_le32((sg_count - num_sg_in_iu) 4905 * sizeof(*sg_descriptor), 4906 &sg_descriptor->length); 4907 put_unaligned_le32(CISS_SG_CHAIN, 4908 &sg_descriptor->flags); 4909 chained = true; 4910 num_sg_in_iu++; 4911 sg_descriptor = io_request->sg_chain_buffer; 4912 } 4913 sg = sg_next(sg); 4914 } 4915 4916 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4917 request->partial = chained; 4918 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4919 4920 out: 4921 put_unaligned_le16(iu_length, &request->header.iu_length); 4922 request->num_sg_descriptors = num_sg_in_iu; 4923 4924 return 0; 4925 } 4926 4927 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4928 void *context) 4929 { 4930 struct scsi_cmnd *scmd; 4931 4932 scmd = io_request->scmd; 4933 pqi_free_io_request(io_request); 4934 scsi_dma_unmap(scmd); 4935 pqi_scsi_done(scmd); 4936 } 4937 4938 static int pqi_raid_submit_scsi_cmd_with_io_request( 4939 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4940 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4941 struct pqi_queue_group *queue_group) 4942 { 4943 int rc; 4944 size_t cdb_length; 4945 struct pqi_raid_path_request *request; 4946 4947 io_request->io_complete_callback = pqi_raid_io_complete; 4948 io_request->scmd = scmd; 4949 4950 request = io_request->iu; 4951 memset(request, 0, 4952 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4953 4954 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4955 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4956 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4957 put_unaligned_le16(io_request->index, &request->request_id); 4958 request->error_index = request->request_id; 4959 memcpy(request->lun_number, device->scsi3addr, 4960 sizeof(request->lun_number)); 4961 4962 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4963 memcpy(request->cdb, scmd->cmnd, cdb_length); 4964 4965 switch (cdb_length) { 4966 case 6: 4967 case 10: 4968 case 12: 4969 case 16: 4970 /* No bytes in the Additional CDB bytes field */ 4971 request->additional_cdb_bytes_usage = 4972 SOP_ADDITIONAL_CDB_BYTES_0; 4973 break; 4974 case 20: 4975 /* 4 bytes in the Additional cdb field */ 4976 request->additional_cdb_bytes_usage = 4977 SOP_ADDITIONAL_CDB_BYTES_4; 4978 break; 4979 case 24: 4980 /* 8 bytes in the Additional cdb field */ 4981 request->additional_cdb_bytes_usage = 4982 SOP_ADDITIONAL_CDB_BYTES_8; 4983 break; 4984 case 28: 4985 /* 12 bytes in the Additional cdb field */ 4986 request->additional_cdb_bytes_usage = 4987 SOP_ADDITIONAL_CDB_BYTES_12; 4988 break; 4989 case 32: 4990 default: 4991 /* 16 bytes in the Additional cdb field */ 4992 request->additional_cdb_bytes_usage = 4993 SOP_ADDITIONAL_CDB_BYTES_16; 4994 break; 4995 } 4996 4997 switch (scmd->sc_data_direction) { 4998 case DMA_TO_DEVICE: 4999 request->data_direction = SOP_READ_FLAG; 5000 break; 5001 case DMA_FROM_DEVICE: 5002 request->data_direction = SOP_WRITE_FLAG; 5003 break; 5004 case DMA_NONE: 5005 request->data_direction = SOP_NO_DIRECTION_FLAG; 5006 break; 5007 case DMA_BIDIRECTIONAL: 5008 request->data_direction = SOP_BIDIRECTIONAL; 5009 break; 5010 default: 5011 dev_err(&ctrl_info->pci_dev->dev, 5012 "unknown data direction: %d\n", 5013 scmd->sc_data_direction); 5014 break; 5015 } 5016 5017 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5018 if (rc) { 5019 pqi_free_io_request(io_request); 5020 return SCSI_MLQUEUE_HOST_BUSY; 5021 } 5022 5023 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5024 5025 return 0; 5026 } 5027 5028 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5029 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5030 struct pqi_queue_group *queue_group) 5031 { 5032 struct pqi_io_request *io_request; 5033 5034 io_request = pqi_alloc_io_request(ctrl_info); 5035 5036 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5037 device, scmd, queue_group); 5038 } 5039 5040 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 5041 { 5042 if (!pqi_ctrl_blocked(ctrl_info)) 5043 schedule_work(&ctrl_info->raid_bypass_retry_work); 5044 } 5045 5046 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5047 { 5048 struct scsi_cmnd *scmd; 5049 struct pqi_scsi_dev *device; 5050 struct pqi_ctrl_info *ctrl_info; 5051 5052 if (!io_request->raid_bypass) 5053 return false; 5054 5055 scmd = io_request->scmd; 5056 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5057 return false; 5058 if (host_byte(scmd->result) == DID_NO_CONNECT) 5059 return false; 5060 5061 device = scmd->device->hostdata; 5062 if (pqi_device_offline(device)) 5063 return false; 5064 5065 ctrl_info = shost_to_hba(scmd->device->host); 5066 if (pqi_ctrl_offline(ctrl_info)) 5067 return false; 5068 5069 return true; 5070 } 5071 5072 static inline void pqi_add_to_raid_bypass_retry_list( 5073 struct pqi_ctrl_info *ctrl_info, 5074 struct pqi_io_request *io_request, bool at_head) 5075 { 5076 unsigned long flags; 5077 5078 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5079 if (at_head) 5080 list_add(&io_request->request_list_entry, 5081 &ctrl_info->raid_bypass_retry_list); 5082 else 5083 list_add_tail(&io_request->request_list_entry, 5084 &ctrl_info->raid_bypass_retry_list); 5085 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5086 } 5087 5088 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 5089 void *context) 5090 { 5091 struct scsi_cmnd *scmd; 5092 5093 scmd = io_request->scmd; 5094 pqi_free_io_request(io_request); 5095 pqi_scsi_done(scmd); 5096 } 5097 5098 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 5099 { 5100 struct scsi_cmnd *scmd; 5101 struct pqi_ctrl_info *ctrl_info; 5102 5103 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 5104 scmd = io_request->scmd; 5105 scmd->result = 0; 5106 ctrl_info = shost_to_hba(scmd->device->host); 5107 5108 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 5109 pqi_schedule_bypass_retry(ctrl_info); 5110 } 5111 5112 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 5113 { 5114 struct scsi_cmnd *scmd; 5115 struct pqi_scsi_dev *device; 5116 struct pqi_ctrl_info *ctrl_info; 5117 struct pqi_queue_group *queue_group; 5118 5119 scmd = io_request->scmd; 5120 device = scmd->device->hostdata; 5121 if (pqi_device_in_reset(device)) { 5122 pqi_free_io_request(io_request); 5123 set_host_byte(scmd, DID_RESET); 5124 pqi_scsi_done(scmd); 5125 return 0; 5126 } 5127 5128 ctrl_info = shost_to_hba(scmd->device->host); 5129 queue_group = io_request->queue_group; 5130 5131 pqi_reinit_io_request(io_request); 5132 5133 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5134 device, scmd, queue_group); 5135 } 5136 5137 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 5138 struct pqi_ctrl_info *ctrl_info) 5139 { 5140 unsigned long flags; 5141 struct pqi_io_request *io_request; 5142 5143 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5144 io_request = list_first_entry_or_null( 5145 &ctrl_info->raid_bypass_retry_list, 5146 struct pqi_io_request, request_list_entry); 5147 if (io_request) 5148 list_del(&io_request->request_list_entry); 5149 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5150 5151 return io_request; 5152 } 5153 5154 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 5155 { 5156 int rc; 5157 struct pqi_io_request *io_request; 5158 5159 pqi_ctrl_busy(ctrl_info); 5160 5161 while (1) { 5162 if (pqi_ctrl_blocked(ctrl_info)) 5163 break; 5164 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 5165 if (!io_request) 5166 break; 5167 rc = pqi_retry_raid_bypass(io_request); 5168 if (rc) { 5169 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 5170 true); 5171 pqi_schedule_bypass_retry(ctrl_info); 5172 break; 5173 } 5174 } 5175 5176 pqi_ctrl_unbusy(ctrl_info); 5177 } 5178 5179 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 5180 { 5181 struct pqi_ctrl_info *ctrl_info; 5182 5183 ctrl_info = container_of(work, struct pqi_ctrl_info, 5184 raid_bypass_retry_work); 5185 pqi_retry_raid_bypass_requests(ctrl_info); 5186 } 5187 5188 static void pqi_clear_all_queued_raid_bypass_retries( 5189 struct pqi_ctrl_info *ctrl_info) 5190 { 5191 unsigned long flags; 5192 5193 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5194 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 5195 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5196 } 5197 5198 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5199 void *context) 5200 { 5201 struct scsi_cmnd *scmd; 5202 5203 scmd = io_request->scmd; 5204 scsi_dma_unmap(scmd); 5205 if (io_request->status == -EAGAIN) 5206 set_host_byte(scmd, DID_IMM_RETRY); 5207 else if (pqi_raid_bypass_retry_needed(io_request)) { 5208 pqi_queue_raid_bypass_retry(io_request); 5209 return; 5210 } 5211 pqi_free_io_request(io_request); 5212 pqi_scsi_done(scmd); 5213 } 5214 5215 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5216 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5217 struct pqi_queue_group *queue_group) 5218 { 5219 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5220 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 5221 } 5222 5223 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5224 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5225 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5226 struct pqi_encryption_info *encryption_info, bool raid_bypass) 5227 { 5228 int rc; 5229 struct pqi_io_request *io_request; 5230 struct pqi_aio_path_request *request; 5231 5232 io_request = pqi_alloc_io_request(ctrl_info); 5233 io_request->io_complete_callback = pqi_aio_io_complete; 5234 io_request->scmd = scmd; 5235 io_request->raid_bypass = raid_bypass; 5236 5237 request = io_request->iu; 5238 memset(request, 0, 5239 offsetof(struct pqi_raid_path_request, sg_descriptors)); 5240 5241 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5242 put_unaligned_le32(aio_handle, &request->nexus_id); 5243 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5244 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5245 put_unaligned_le16(io_request->index, &request->request_id); 5246 request->error_index = request->request_id; 5247 if (cdb_length > sizeof(request->cdb)) 5248 cdb_length = sizeof(request->cdb); 5249 request->cdb_length = cdb_length; 5250 memcpy(request->cdb, cdb, cdb_length); 5251 5252 switch (scmd->sc_data_direction) { 5253 case DMA_TO_DEVICE: 5254 request->data_direction = SOP_READ_FLAG; 5255 break; 5256 case DMA_FROM_DEVICE: 5257 request->data_direction = SOP_WRITE_FLAG; 5258 break; 5259 case DMA_NONE: 5260 request->data_direction = SOP_NO_DIRECTION_FLAG; 5261 break; 5262 case DMA_BIDIRECTIONAL: 5263 request->data_direction = SOP_BIDIRECTIONAL; 5264 break; 5265 default: 5266 dev_err(&ctrl_info->pci_dev->dev, 5267 "unknown data direction: %d\n", 5268 scmd->sc_data_direction); 5269 break; 5270 } 5271 5272 if (encryption_info) { 5273 request->encryption_enable = true; 5274 put_unaligned_le16(encryption_info->data_encryption_key_index, 5275 &request->data_encryption_key_index); 5276 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5277 &request->encrypt_tweak_lower); 5278 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5279 &request->encrypt_tweak_upper); 5280 } 5281 5282 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5283 if (rc) { 5284 pqi_free_io_request(io_request); 5285 return SCSI_MLQUEUE_HOST_BUSY; 5286 } 5287 5288 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5289 5290 return 0; 5291 } 5292 5293 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5294 struct scsi_cmnd *scmd) 5295 { 5296 u16 hw_queue; 5297 5298 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5299 if (hw_queue > ctrl_info->max_hw_queue_index) 5300 hw_queue = 0; 5301 5302 return hw_queue; 5303 } 5304 5305 /* 5306 * This function gets called just before we hand the completed SCSI request 5307 * back to the SML. 5308 */ 5309 5310 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5311 { 5312 struct pqi_scsi_dev *device; 5313 5314 if (!scmd->device) { 5315 set_host_byte(scmd, DID_NO_CONNECT); 5316 return; 5317 } 5318 5319 device = scmd->device->hostdata; 5320 if (!device) { 5321 set_host_byte(scmd, DID_NO_CONNECT); 5322 return; 5323 } 5324 5325 atomic_dec(&device->scsi_cmds_outstanding); 5326 } 5327 5328 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 5329 struct scsi_cmnd *scmd) 5330 { 5331 int rc; 5332 struct pqi_ctrl_info *ctrl_info; 5333 struct pqi_scsi_dev *device; 5334 u16 hw_queue; 5335 struct pqi_queue_group *queue_group; 5336 bool raid_bypassed; 5337 5338 device = scmd->device->hostdata; 5339 ctrl_info = shost_to_hba(shost); 5340 5341 if (!device) { 5342 set_host_byte(scmd, DID_NO_CONNECT); 5343 pqi_scsi_done(scmd); 5344 return 0; 5345 } 5346 5347 atomic_inc(&device->scsi_cmds_outstanding); 5348 5349 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, 5350 device)) { 5351 set_host_byte(scmd, DID_NO_CONNECT); 5352 pqi_scsi_done(scmd); 5353 return 0; 5354 } 5355 5356 pqi_ctrl_busy(ctrl_info); 5357 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || 5358 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { 5359 rc = SCSI_MLQUEUE_HOST_BUSY; 5360 goto out; 5361 } 5362 5363 /* 5364 * This is necessary because the SML doesn't zero out this field during 5365 * error recovery. 5366 */ 5367 scmd->result = 0; 5368 5369 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 5370 queue_group = &ctrl_info->queue_groups[hw_queue]; 5371 5372 if (pqi_is_logical_device(device)) { 5373 raid_bypassed = false; 5374 if (device->raid_bypass_enabled && 5375 !blk_rq_is_passthrough(scmd->request)) { 5376 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5377 scmd, queue_group); 5378 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 5379 raid_bypassed = true; 5380 } 5381 if (!raid_bypassed) 5382 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5383 queue_group); 5384 } else { 5385 if (device->aio_enabled) 5386 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 5387 queue_group); 5388 else 5389 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5390 queue_group); 5391 } 5392 5393 out: 5394 pqi_ctrl_unbusy(ctrl_info); 5395 if (rc) 5396 atomic_dec(&device->scsi_cmds_outstanding); 5397 5398 return rc; 5399 } 5400 5401 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5402 struct pqi_queue_group *queue_group) 5403 { 5404 unsigned int path; 5405 unsigned long flags; 5406 bool list_is_empty; 5407 5408 for (path = 0; path < 2; path++) { 5409 while (1) { 5410 spin_lock_irqsave( 5411 &queue_group->submit_lock[path], flags); 5412 list_is_empty = 5413 list_empty(&queue_group->request_list[path]); 5414 spin_unlock_irqrestore( 5415 &queue_group->submit_lock[path], flags); 5416 if (list_is_empty) 5417 break; 5418 pqi_check_ctrl_health(ctrl_info); 5419 if (pqi_ctrl_offline(ctrl_info)) 5420 return -ENXIO; 5421 usleep_range(1000, 2000); 5422 } 5423 } 5424 5425 return 0; 5426 } 5427 5428 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5429 { 5430 int rc; 5431 unsigned int i; 5432 unsigned int path; 5433 struct pqi_queue_group *queue_group; 5434 pqi_index_t iq_pi; 5435 pqi_index_t iq_ci; 5436 5437 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5438 queue_group = &ctrl_info->queue_groups[i]; 5439 5440 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5441 if (rc) 5442 return rc; 5443 5444 for (path = 0; path < 2; path++) { 5445 iq_pi = queue_group->iq_pi_copy[path]; 5446 5447 while (1) { 5448 iq_ci = readl(queue_group->iq_ci[path]); 5449 if (iq_ci == iq_pi) 5450 break; 5451 pqi_check_ctrl_health(ctrl_info); 5452 if (pqi_ctrl_offline(ctrl_info)) 5453 return -ENXIO; 5454 usleep_range(1000, 2000); 5455 } 5456 } 5457 } 5458 5459 return 0; 5460 } 5461 5462 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5463 struct pqi_scsi_dev *device) 5464 { 5465 unsigned int i; 5466 unsigned int path; 5467 struct pqi_queue_group *queue_group; 5468 unsigned long flags; 5469 struct pqi_io_request *io_request; 5470 struct pqi_io_request *next; 5471 struct scsi_cmnd *scmd; 5472 struct pqi_scsi_dev *scsi_device; 5473 5474 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5475 queue_group = &ctrl_info->queue_groups[i]; 5476 5477 for (path = 0; path < 2; path++) { 5478 spin_lock_irqsave( 5479 &queue_group->submit_lock[path], flags); 5480 5481 list_for_each_entry_safe(io_request, next, 5482 &queue_group->request_list[path], 5483 request_list_entry) { 5484 scmd = io_request->scmd; 5485 if (!scmd) 5486 continue; 5487 5488 scsi_device = scmd->device->hostdata; 5489 if (scsi_device != device) 5490 continue; 5491 5492 list_del(&io_request->request_list_entry); 5493 set_host_byte(scmd, DID_RESET); 5494 pqi_scsi_done(scmd); 5495 } 5496 5497 spin_unlock_irqrestore( 5498 &queue_group->submit_lock[path], flags); 5499 } 5500 } 5501 } 5502 5503 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) 5504 { 5505 unsigned int i; 5506 unsigned int path; 5507 struct pqi_queue_group *queue_group; 5508 unsigned long flags; 5509 struct pqi_io_request *io_request; 5510 struct pqi_io_request *next; 5511 struct scsi_cmnd *scmd; 5512 5513 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5514 queue_group = &ctrl_info->queue_groups[i]; 5515 5516 for (path = 0; path < 2; path++) { 5517 spin_lock_irqsave(&queue_group->submit_lock[path], 5518 flags); 5519 5520 list_for_each_entry_safe(io_request, next, 5521 &queue_group->request_list[path], 5522 request_list_entry) { 5523 5524 scmd = io_request->scmd; 5525 if (!scmd) 5526 continue; 5527 5528 list_del(&io_request->request_list_entry); 5529 set_host_byte(scmd, DID_RESET); 5530 pqi_scsi_done(scmd); 5531 } 5532 5533 spin_unlock_irqrestore( 5534 &queue_group->submit_lock[path], flags); 5535 } 5536 } 5537 } 5538 5539 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5540 struct pqi_scsi_dev *device, unsigned long timeout_secs) 5541 { 5542 unsigned long timeout; 5543 5544 timeout = (timeout_secs * PQI_HZ) + jiffies; 5545 5546 while (atomic_read(&device->scsi_cmds_outstanding)) { 5547 pqi_check_ctrl_health(ctrl_info); 5548 if (pqi_ctrl_offline(ctrl_info)) 5549 return -ENXIO; 5550 if (timeout_secs != NO_TIMEOUT) { 5551 if (time_after(jiffies, timeout)) { 5552 dev_err(&ctrl_info->pci_dev->dev, 5553 "timed out waiting for pending IO\n"); 5554 return -ETIMEDOUT; 5555 } 5556 } 5557 usleep_range(1000, 2000); 5558 } 5559 5560 return 0; 5561 } 5562 5563 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5564 unsigned long timeout_secs) 5565 { 5566 bool io_pending; 5567 unsigned long flags; 5568 unsigned long timeout; 5569 struct pqi_scsi_dev *device; 5570 5571 timeout = (timeout_secs * PQI_HZ) + jiffies; 5572 while (1) { 5573 io_pending = false; 5574 5575 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5576 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5577 scsi_device_list_entry) { 5578 if (atomic_read(&device->scsi_cmds_outstanding)) { 5579 io_pending = true; 5580 break; 5581 } 5582 } 5583 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5584 flags); 5585 5586 if (!io_pending) 5587 break; 5588 5589 pqi_check_ctrl_health(ctrl_info); 5590 if (pqi_ctrl_offline(ctrl_info)) 5591 return -ENXIO; 5592 5593 if (timeout_secs != NO_TIMEOUT) { 5594 if (time_after(jiffies, timeout)) { 5595 dev_err(&ctrl_info->pci_dev->dev, 5596 "timed out waiting for pending IO\n"); 5597 return -ETIMEDOUT; 5598 } 5599 } 5600 usleep_range(1000, 2000); 5601 } 5602 5603 return 0; 5604 } 5605 5606 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) 5607 { 5608 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { 5609 pqi_check_ctrl_health(ctrl_info); 5610 if (pqi_ctrl_offline(ctrl_info)) 5611 return -ENXIO; 5612 usleep_range(1000, 2000); 5613 } 5614 5615 return 0; 5616 } 5617 5618 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5619 void *context) 5620 { 5621 struct completion *waiting = context; 5622 5623 complete(waiting); 5624 } 5625 5626 #define PQI_LUN_RESET_TIMEOUT_SECS 30 5627 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 5628 5629 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5630 struct pqi_scsi_dev *device, struct completion *wait) 5631 { 5632 int rc; 5633 5634 while (1) { 5635 if (wait_for_completion_io_timeout(wait, 5636 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { 5637 rc = 0; 5638 break; 5639 } 5640 5641 pqi_check_ctrl_health(ctrl_info); 5642 if (pqi_ctrl_offline(ctrl_info)) { 5643 rc = -ENXIO; 5644 break; 5645 } 5646 } 5647 5648 return rc; 5649 } 5650 5651 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5652 struct pqi_scsi_dev *device) 5653 { 5654 int rc; 5655 struct pqi_io_request *io_request; 5656 DECLARE_COMPLETION_ONSTACK(wait); 5657 struct pqi_task_management_request *request; 5658 5659 io_request = pqi_alloc_io_request(ctrl_info); 5660 io_request->io_complete_callback = pqi_lun_reset_complete; 5661 io_request->context = &wait; 5662 5663 request = io_request->iu; 5664 memset(request, 0, sizeof(*request)); 5665 5666 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5667 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5668 &request->header.iu_length); 5669 put_unaligned_le16(io_request->index, &request->request_id); 5670 memcpy(request->lun_number, device->scsi3addr, 5671 sizeof(request->lun_number)); 5672 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5673 if (ctrl_info->tmf_iu_timeout_supported) 5674 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS, 5675 &request->timeout); 5676 5677 pqi_start_io(ctrl_info, 5678 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5679 io_request); 5680 5681 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5682 if (rc == 0) 5683 rc = io_request->status; 5684 5685 pqi_free_io_request(io_request); 5686 5687 return rc; 5688 } 5689 5690 /* Performs a reset at the LUN level. */ 5691 5692 #define PQI_LUN_RESET_RETRIES 3 5693 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5694 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120 5695 5696 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5697 struct pqi_scsi_dev *device) 5698 { 5699 int rc; 5700 unsigned int retries; 5701 unsigned long timeout_secs; 5702 5703 for (retries = 0;;) { 5704 rc = pqi_lun_reset(ctrl_info, device); 5705 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) 5706 break; 5707 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5708 } 5709 5710 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT; 5711 5712 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); 5713 5714 return rc == 0 ? SUCCESS : FAILED; 5715 } 5716 5717 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5718 struct pqi_scsi_dev *device) 5719 { 5720 int rc; 5721 5722 mutex_lock(&ctrl_info->lun_reset_mutex); 5723 5724 pqi_ctrl_block_requests(ctrl_info); 5725 pqi_ctrl_wait_until_quiesced(ctrl_info); 5726 pqi_fail_io_queued_for_device(ctrl_info, device); 5727 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5728 pqi_device_reset_start(device); 5729 pqi_ctrl_unblock_requests(ctrl_info); 5730 5731 if (rc) 5732 rc = FAILED; 5733 else 5734 rc = _pqi_device_reset(ctrl_info, device); 5735 5736 pqi_device_reset_done(device); 5737 5738 mutex_unlock(&ctrl_info->lun_reset_mutex); 5739 5740 return rc; 5741 } 5742 5743 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5744 { 5745 int rc; 5746 struct Scsi_Host *shost; 5747 struct pqi_ctrl_info *ctrl_info; 5748 struct pqi_scsi_dev *device; 5749 5750 shost = scmd->device->host; 5751 ctrl_info = shost_to_hba(shost); 5752 device = scmd->device->hostdata; 5753 5754 dev_err(&ctrl_info->pci_dev->dev, 5755 "resetting scsi %d:%d:%d:%d\n", 5756 shost->host_no, device->bus, device->target, device->lun); 5757 5758 pqi_check_ctrl_health(ctrl_info); 5759 if (pqi_ctrl_offline(ctrl_info) || 5760 pqi_device_reset_blocked(ctrl_info)) { 5761 rc = FAILED; 5762 goto out; 5763 } 5764 5765 pqi_wait_until_ofa_finished(ctrl_info); 5766 5767 atomic_inc(&ctrl_info->sync_cmds_outstanding); 5768 rc = pqi_device_reset(ctrl_info, device); 5769 atomic_dec(&ctrl_info->sync_cmds_outstanding); 5770 5771 out: 5772 dev_err(&ctrl_info->pci_dev->dev, 5773 "reset of scsi %d:%d:%d:%d: %s\n", 5774 shost->host_no, device->bus, device->target, device->lun, 5775 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5776 5777 return rc; 5778 } 5779 5780 static int pqi_slave_alloc(struct scsi_device *sdev) 5781 { 5782 struct pqi_scsi_dev *device; 5783 unsigned long flags; 5784 struct pqi_ctrl_info *ctrl_info; 5785 struct scsi_target *starget; 5786 struct sas_rphy *rphy; 5787 5788 ctrl_info = shost_to_hba(sdev->host); 5789 5790 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5791 5792 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5793 starget = scsi_target(sdev); 5794 rphy = target_to_rphy(starget); 5795 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5796 if (device) { 5797 device->target = sdev_id(sdev); 5798 device->lun = sdev->lun; 5799 device->target_lun_valid = true; 5800 } 5801 } else { 5802 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5803 sdev_id(sdev), sdev->lun); 5804 } 5805 5806 if (device) { 5807 sdev->hostdata = device; 5808 device->sdev = sdev; 5809 if (device->queue_depth) { 5810 device->advertised_queue_depth = device->queue_depth; 5811 scsi_change_queue_depth(sdev, 5812 device->advertised_queue_depth); 5813 } 5814 if (pqi_is_logical_device(device)) 5815 pqi_disable_write_same(sdev); 5816 else 5817 sdev->allow_restart = 1; 5818 } 5819 5820 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5821 5822 return 0; 5823 } 5824 5825 static int pqi_map_queues(struct Scsi_Host *shost) 5826 { 5827 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5828 5829 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 5830 ctrl_info->pci_dev, 0); 5831 } 5832 5833 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5834 void __user *arg) 5835 { 5836 struct pci_dev *pci_dev; 5837 u32 subsystem_vendor; 5838 u32 subsystem_device; 5839 cciss_pci_info_struct pciinfo; 5840 5841 if (!arg) 5842 return -EINVAL; 5843 5844 pci_dev = ctrl_info->pci_dev; 5845 5846 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5847 pciinfo.bus = pci_dev->bus->number; 5848 pciinfo.dev_fn = pci_dev->devfn; 5849 subsystem_vendor = pci_dev->subsystem_vendor; 5850 subsystem_device = pci_dev->subsystem_device; 5851 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5852 subsystem_vendor; 5853 5854 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5855 return -EFAULT; 5856 5857 return 0; 5858 } 5859 5860 static int pqi_getdrivver_ioctl(void __user *arg) 5861 { 5862 u32 version; 5863 5864 if (!arg) 5865 return -EINVAL; 5866 5867 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5868 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5869 5870 if (copy_to_user(arg, &version, sizeof(version))) 5871 return -EFAULT; 5872 5873 return 0; 5874 } 5875 5876 struct ciss_error_info { 5877 u8 scsi_status; 5878 int command_status; 5879 size_t sense_data_length; 5880 }; 5881 5882 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5883 struct ciss_error_info *ciss_error_info) 5884 { 5885 int ciss_cmd_status; 5886 size_t sense_data_length; 5887 5888 switch (pqi_error_info->data_out_result) { 5889 case PQI_DATA_IN_OUT_GOOD: 5890 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5891 break; 5892 case PQI_DATA_IN_OUT_UNDERFLOW: 5893 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5894 break; 5895 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5896 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5897 break; 5898 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5899 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5900 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5901 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5902 case PQI_DATA_IN_OUT_ERROR: 5903 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5904 break; 5905 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5906 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5907 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5908 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5909 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5910 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5911 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5912 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5913 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5914 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5915 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5916 break; 5917 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5918 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5919 break; 5920 case PQI_DATA_IN_OUT_ABORTED: 5921 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5922 break; 5923 case PQI_DATA_IN_OUT_TIMEOUT: 5924 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5925 break; 5926 default: 5927 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5928 break; 5929 } 5930 5931 sense_data_length = 5932 get_unaligned_le16(&pqi_error_info->sense_data_length); 5933 if (sense_data_length == 0) 5934 sense_data_length = 5935 get_unaligned_le16(&pqi_error_info->response_data_length); 5936 if (sense_data_length) 5937 if (sense_data_length > sizeof(pqi_error_info->data)) 5938 sense_data_length = sizeof(pqi_error_info->data); 5939 5940 ciss_error_info->scsi_status = pqi_error_info->status; 5941 ciss_error_info->command_status = ciss_cmd_status; 5942 ciss_error_info->sense_data_length = sense_data_length; 5943 } 5944 5945 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5946 { 5947 int rc; 5948 char *kernel_buffer = NULL; 5949 u16 iu_length; 5950 size_t sense_data_length; 5951 IOCTL_Command_struct iocommand; 5952 struct pqi_raid_path_request request; 5953 struct pqi_raid_error_info pqi_error_info; 5954 struct ciss_error_info ciss_error_info; 5955 5956 if (pqi_ctrl_offline(ctrl_info)) 5957 return -ENXIO; 5958 if (!arg) 5959 return -EINVAL; 5960 if (!capable(CAP_SYS_RAWIO)) 5961 return -EPERM; 5962 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5963 return -EFAULT; 5964 if (iocommand.buf_size < 1 && 5965 iocommand.Request.Type.Direction != XFER_NONE) 5966 return -EINVAL; 5967 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5968 return -EINVAL; 5969 if (iocommand.Request.Type.Type != TYPE_CMD) 5970 return -EINVAL; 5971 5972 switch (iocommand.Request.Type.Direction) { 5973 case XFER_NONE: 5974 case XFER_WRITE: 5975 case XFER_READ: 5976 case XFER_READ | XFER_WRITE: 5977 break; 5978 default: 5979 return -EINVAL; 5980 } 5981 5982 if (iocommand.buf_size > 0) { 5983 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5984 if (!kernel_buffer) 5985 return -ENOMEM; 5986 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5987 if (copy_from_user(kernel_buffer, iocommand.buf, 5988 iocommand.buf_size)) { 5989 rc = -EFAULT; 5990 goto out; 5991 } 5992 } else { 5993 memset(kernel_buffer, 0, iocommand.buf_size); 5994 } 5995 } 5996 5997 memset(&request, 0, sizeof(request)); 5998 5999 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6000 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6001 PQI_REQUEST_HEADER_LENGTH; 6002 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6003 sizeof(request.lun_number)); 6004 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6005 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6006 6007 switch (iocommand.Request.Type.Direction) { 6008 case XFER_NONE: 6009 request.data_direction = SOP_NO_DIRECTION_FLAG; 6010 break; 6011 case XFER_WRITE: 6012 request.data_direction = SOP_WRITE_FLAG; 6013 break; 6014 case XFER_READ: 6015 request.data_direction = SOP_READ_FLAG; 6016 break; 6017 case XFER_READ | XFER_WRITE: 6018 request.data_direction = SOP_BIDIRECTIONAL; 6019 break; 6020 } 6021 6022 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6023 6024 if (iocommand.buf_size > 0) { 6025 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6026 6027 rc = pqi_map_single(ctrl_info->pci_dev, 6028 &request.sg_descriptors[0], kernel_buffer, 6029 iocommand.buf_size, DMA_BIDIRECTIONAL); 6030 if (rc) 6031 goto out; 6032 6033 iu_length += sizeof(request.sg_descriptors[0]); 6034 } 6035 6036 put_unaligned_le16(iu_length, &request.header.iu_length); 6037 6038 if (ctrl_info->raid_iu_timeout_supported) 6039 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6040 6041 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6042 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 6043 6044 if (iocommand.buf_size > 0) 6045 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6046 DMA_BIDIRECTIONAL); 6047 6048 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6049 6050 if (rc == 0) { 6051 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6052 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6053 iocommand.error_info.CommandStatus = 6054 ciss_error_info.command_status; 6055 sense_data_length = ciss_error_info.sense_data_length; 6056 if (sense_data_length) { 6057 if (sense_data_length > 6058 sizeof(iocommand.error_info.SenseInfo)) 6059 sense_data_length = 6060 sizeof(iocommand.error_info.SenseInfo); 6061 memcpy(iocommand.error_info.SenseInfo, 6062 pqi_error_info.data, sense_data_length); 6063 iocommand.error_info.SenseLen = sense_data_length; 6064 } 6065 } 6066 6067 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6068 rc = -EFAULT; 6069 goto out; 6070 } 6071 6072 if (rc == 0 && iocommand.buf_size > 0 && 6073 (iocommand.Request.Type.Direction & XFER_READ)) { 6074 if (copy_to_user(iocommand.buf, kernel_buffer, 6075 iocommand.buf_size)) { 6076 rc = -EFAULT; 6077 } 6078 } 6079 6080 out: 6081 kfree(kernel_buffer); 6082 6083 return rc; 6084 } 6085 6086 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6087 void __user *arg) 6088 { 6089 int rc; 6090 struct pqi_ctrl_info *ctrl_info; 6091 6092 ctrl_info = shost_to_hba(sdev->host); 6093 6094 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) 6095 return -EBUSY; 6096 6097 switch (cmd) { 6098 case CCISS_DEREGDISK: 6099 case CCISS_REGNEWDISK: 6100 case CCISS_REGNEWD: 6101 rc = pqi_scan_scsi_devices(ctrl_info); 6102 break; 6103 case CCISS_GETPCIINFO: 6104 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6105 break; 6106 case CCISS_GETDRIVVER: 6107 rc = pqi_getdrivver_ioctl(arg); 6108 break; 6109 case CCISS_PASSTHRU: 6110 rc = pqi_passthru_ioctl(ctrl_info, arg); 6111 break; 6112 default: 6113 rc = -EINVAL; 6114 break; 6115 } 6116 6117 return rc; 6118 } 6119 6120 static ssize_t pqi_firmware_version_show(struct device *dev, 6121 struct device_attribute *attr, char *buffer) 6122 { 6123 struct Scsi_Host *shost; 6124 struct pqi_ctrl_info *ctrl_info; 6125 6126 shost = class_to_shost(dev); 6127 ctrl_info = shost_to_hba(shost); 6128 6129 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6130 } 6131 6132 static ssize_t pqi_driver_version_show(struct device *dev, 6133 struct device_attribute *attr, char *buffer) 6134 { 6135 return snprintf(buffer, PAGE_SIZE, "%s\n", 6136 DRIVER_VERSION BUILD_TIMESTAMP); 6137 } 6138 6139 static ssize_t pqi_serial_number_show(struct device *dev, 6140 struct device_attribute *attr, char *buffer) 6141 { 6142 struct Scsi_Host *shost; 6143 struct pqi_ctrl_info *ctrl_info; 6144 6145 shost = class_to_shost(dev); 6146 ctrl_info = shost_to_hba(shost); 6147 6148 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6149 } 6150 6151 static ssize_t pqi_model_show(struct device *dev, 6152 struct device_attribute *attr, char *buffer) 6153 { 6154 struct Scsi_Host *shost; 6155 struct pqi_ctrl_info *ctrl_info; 6156 6157 shost = class_to_shost(dev); 6158 ctrl_info = shost_to_hba(shost); 6159 6160 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6161 } 6162 6163 static ssize_t pqi_vendor_show(struct device *dev, 6164 struct device_attribute *attr, char *buffer) 6165 { 6166 struct Scsi_Host *shost; 6167 struct pqi_ctrl_info *ctrl_info; 6168 6169 shost = class_to_shost(dev); 6170 ctrl_info = shost_to_hba(shost); 6171 6172 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6173 } 6174 6175 static ssize_t pqi_host_rescan_store(struct device *dev, 6176 struct device_attribute *attr, const char *buffer, size_t count) 6177 { 6178 struct Scsi_Host *shost = class_to_shost(dev); 6179 6180 pqi_scan_start(shost); 6181 6182 return count; 6183 } 6184 6185 static ssize_t pqi_lockup_action_show(struct device *dev, 6186 struct device_attribute *attr, char *buffer) 6187 { 6188 int count = 0; 6189 unsigned int i; 6190 6191 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6192 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6193 count += scnprintf(buffer + count, PAGE_SIZE - count, 6194 "[%s] ", pqi_lockup_actions[i].name); 6195 else 6196 count += scnprintf(buffer + count, PAGE_SIZE - count, 6197 "%s ", pqi_lockup_actions[i].name); 6198 } 6199 6200 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6201 6202 return count; 6203 } 6204 6205 static ssize_t pqi_lockup_action_store(struct device *dev, 6206 struct device_attribute *attr, const char *buffer, size_t count) 6207 { 6208 unsigned int i; 6209 char *action_name; 6210 char action_name_buffer[32]; 6211 6212 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6213 action_name = strstrip(action_name_buffer); 6214 6215 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6216 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6217 pqi_lockup_action = pqi_lockup_actions[i].action; 6218 return count; 6219 } 6220 } 6221 6222 return -EINVAL; 6223 } 6224 6225 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); 6226 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 6227 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 6228 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 6229 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 6230 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6231 static DEVICE_ATTR(lockup_action, 0644, 6232 pqi_lockup_action_show, pqi_lockup_action_store); 6233 6234 static struct device_attribute *pqi_shost_attrs[] = { 6235 &dev_attr_driver_version, 6236 &dev_attr_firmware_version, 6237 &dev_attr_model, 6238 &dev_attr_serial_number, 6239 &dev_attr_vendor, 6240 &dev_attr_rescan, 6241 &dev_attr_lockup_action, 6242 NULL 6243 }; 6244 6245 static ssize_t pqi_unique_id_show(struct device *dev, 6246 struct device_attribute *attr, char *buffer) 6247 { 6248 struct pqi_ctrl_info *ctrl_info; 6249 struct scsi_device *sdev; 6250 struct pqi_scsi_dev *device; 6251 unsigned long flags; 6252 u8 unique_id[16]; 6253 6254 sdev = to_scsi_device(dev); 6255 ctrl_info = shost_to_hba(sdev->host); 6256 6257 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6258 6259 device = sdev->hostdata; 6260 if (!device) { 6261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6262 flags); 6263 return -ENODEV; 6264 } 6265 6266 if (device->is_physical_device) { 6267 memset(unique_id, 0, 8); 6268 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); 6269 } else { 6270 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 6271 } 6272 6273 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6274 6275 return snprintf(buffer, PAGE_SIZE, 6276 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", 6277 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 6278 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 6279 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 6280 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 6281 } 6282 6283 static ssize_t pqi_lunid_show(struct device *dev, 6284 struct device_attribute *attr, char *buffer) 6285 { 6286 struct pqi_ctrl_info *ctrl_info; 6287 struct scsi_device *sdev; 6288 struct pqi_scsi_dev *device; 6289 unsigned long flags; 6290 u8 lunid[8]; 6291 6292 sdev = to_scsi_device(dev); 6293 ctrl_info = shost_to_hba(sdev->host); 6294 6295 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6296 6297 device = sdev->hostdata; 6298 if (!device) { 6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6300 flags); 6301 return -ENODEV; 6302 } 6303 6304 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 6305 6306 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6307 6308 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 6309 } 6310 6311 #define MAX_PATHS 8 6312 6313 static ssize_t pqi_path_info_show(struct device *dev, 6314 struct device_attribute *attr, char *buf) 6315 { 6316 struct pqi_ctrl_info *ctrl_info; 6317 struct scsi_device *sdev; 6318 struct pqi_scsi_dev *device; 6319 unsigned long flags; 6320 int i; 6321 int output_len = 0; 6322 u8 box; 6323 u8 bay; 6324 u8 path_map_index; 6325 char *active; 6326 u8 phys_connector[2]; 6327 6328 sdev = to_scsi_device(dev); 6329 ctrl_info = shost_to_hba(sdev->host); 6330 6331 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6332 6333 device = sdev->hostdata; 6334 if (!device) { 6335 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6336 flags); 6337 return -ENODEV; 6338 } 6339 6340 bay = device->bay; 6341 for (i = 0; i < MAX_PATHS; i++) { 6342 path_map_index = 1 << i; 6343 if (i == device->active_path_index) 6344 active = "Active"; 6345 else if (device->path_map & path_map_index) 6346 active = "Inactive"; 6347 else 6348 continue; 6349 6350 output_len += scnprintf(buf + output_len, 6351 PAGE_SIZE - output_len, 6352 "[%d:%d:%d:%d] %20.20s ", 6353 ctrl_info->scsi_host->host_no, 6354 device->bus, device->target, 6355 device->lun, 6356 scsi_device_type(device->devtype)); 6357 6358 if (device->devtype == TYPE_RAID || 6359 pqi_is_logical_device(device)) 6360 goto end_buffer; 6361 6362 memcpy(&phys_connector, &device->phys_connector[i], 6363 sizeof(phys_connector)); 6364 if (phys_connector[0] < '0') 6365 phys_connector[0] = '0'; 6366 if (phys_connector[1] < '0') 6367 phys_connector[1] = '0'; 6368 6369 output_len += scnprintf(buf + output_len, 6370 PAGE_SIZE - output_len, 6371 "PORT: %.2s ", phys_connector); 6372 6373 box = device->box[i]; 6374 if (box != 0 && box != 0xFF) 6375 output_len += scnprintf(buf + output_len, 6376 PAGE_SIZE - output_len, 6377 "BOX: %hhu ", box); 6378 6379 if ((device->devtype == TYPE_DISK || 6380 device->devtype == TYPE_ZBC) && 6381 pqi_expose_device(device)) 6382 output_len += scnprintf(buf + output_len, 6383 PAGE_SIZE - output_len, 6384 "BAY: %hhu ", bay); 6385 6386 end_buffer: 6387 output_len += scnprintf(buf + output_len, 6388 PAGE_SIZE - output_len, 6389 "%s\n", active); 6390 } 6391 6392 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6393 6394 return output_len; 6395 } 6396 6397 static ssize_t pqi_sas_address_show(struct device *dev, 6398 struct device_attribute *attr, char *buffer) 6399 { 6400 struct pqi_ctrl_info *ctrl_info; 6401 struct scsi_device *sdev; 6402 struct pqi_scsi_dev *device; 6403 unsigned long flags; 6404 u64 sas_address; 6405 6406 sdev = to_scsi_device(dev); 6407 ctrl_info = shost_to_hba(sdev->host); 6408 6409 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6410 6411 device = sdev->hostdata; 6412 if (pqi_is_logical_device(device)) { 6413 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6414 flags); 6415 return -ENODEV; 6416 } 6417 6418 sas_address = device->sas_address; 6419 6420 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6421 6422 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 6423 } 6424 6425 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 6426 struct device_attribute *attr, char *buffer) 6427 { 6428 struct pqi_ctrl_info *ctrl_info; 6429 struct scsi_device *sdev; 6430 struct pqi_scsi_dev *device; 6431 unsigned long flags; 6432 6433 sdev = to_scsi_device(dev); 6434 ctrl_info = shost_to_hba(sdev->host); 6435 6436 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6437 6438 device = sdev->hostdata; 6439 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 6440 buffer[1] = '\n'; 6441 buffer[2] = '\0'; 6442 6443 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6444 6445 return 2; 6446 } 6447 6448 static ssize_t pqi_raid_level_show(struct device *dev, 6449 struct device_attribute *attr, char *buffer) 6450 { 6451 struct pqi_ctrl_info *ctrl_info; 6452 struct scsi_device *sdev; 6453 struct pqi_scsi_dev *device; 6454 unsigned long flags; 6455 char *raid_level; 6456 6457 sdev = to_scsi_device(dev); 6458 ctrl_info = shost_to_hba(sdev->host); 6459 6460 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6461 6462 device = sdev->hostdata; 6463 6464 if (pqi_is_logical_device(device)) 6465 raid_level = pqi_raid_level_to_string(device->raid_level); 6466 else 6467 raid_level = "N/A"; 6468 6469 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6470 6471 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 6472 } 6473 6474 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 6475 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 6476 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 6477 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 6478 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 6479 pqi_ssd_smart_path_enabled_show, NULL); 6480 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 6481 6482 static struct device_attribute *pqi_sdev_attrs[] = { 6483 &dev_attr_lunid, 6484 &dev_attr_unique_id, 6485 &dev_attr_path_info, 6486 &dev_attr_sas_address, 6487 &dev_attr_ssd_smart_path_enabled, 6488 &dev_attr_raid_level, 6489 NULL 6490 }; 6491 6492 static struct scsi_host_template pqi_driver_template = { 6493 .module = THIS_MODULE, 6494 .name = DRIVER_NAME_SHORT, 6495 .proc_name = DRIVER_NAME_SHORT, 6496 .queuecommand = pqi_scsi_queue_command, 6497 .scan_start = pqi_scan_start, 6498 .scan_finished = pqi_scan_finished, 6499 .this_id = -1, 6500 .eh_device_reset_handler = pqi_eh_device_reset_handler, 6501 .ioctl = pqi_ioctl, 6502 .slave_alloc = pqi_slave_alloc, 6503 .map_queues = pqi_map_queues, 6504 .sdev_attrs = pqi_sdev_attrs, 6505 .shost_attrs = pqi_shost_attrs, 6506 }; 6507 6508 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 6509 { 6510 int rc; 6511 struct Scsi_Host *shost; 6512 6513 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 6514 if (!shost) { 6515 dev_err(&ctrl_info->pci_dev->dev, 6516 "scsi_host_alloc failed for controller %u\n", 6517 ctrl_info->ctrl_id); 6518 return -ENOMEM; 6519 } 6520 6521 shost->io_port = 0; 6522 shost->n_io_port = 0; 6523 shost->this_id = -1; 6524 shost->max_channel = PQI_MAX_BUS; 6525 shost->max_cmd_len = MAX_COMMAND_SIZE; 6526 shost->max_lun = ~0; 6527 shost->max_id = ~0; 6528 shost->max_sectors = ctrl_info->max_sectors; 6529 shost->can_queue = ctrl_info->scsi_ml_can_queue; 6530 shost->cmd_per_lun = shost->can_queue; 6531 shost->sg_tablesize = ctrl_info->sg_tablesize; 6532 shost->transportt = pqi_sas_transport_template; 6533 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 6534 shost->unique_id = shost->irq; 6535 shost->nr_hw_queues = ctrl_info->num_queue_groups; 6536 shost->hostdata[0] = (unsigned long)ctrl_info; 6537 6538 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 6539 if (rc) { 6540 dev_err(&ctrl_info->pci_dev->dev, 6541 "scsi_add_host failed for controller %u\n", 6542 ctrl_info->ctrl_id); 6543 goto free_host; 6544 } 6545 6546 rc = pqi_add_sas_host(shost, ctrl_info); 6547 if (rc) { 6548 dev_err(&ctrl_info->pci_dev->dev, 6549 "add SAS host failed for controller %u\n", 6550 ctrl_info->ctrl_id); 6551 goto remove_host; 6552 } 6553 6554 ctrl_info->scsi_host = shost; 6555 6556 return 0; 6557 6558 remove_host: 6559 scsi_remove_host(shost); 6560 free_host: 6561 scsi_host_put(shost); 6562 6563 return rc; 6564 } 6565 6566 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 6567 { 6568 struct Scsi_Host *shost; 6569 6570 pqi_delete_sas_host(ctrl_info); 6571 6572 shost = ctrl_info->scsi_host; 6573 if (!shost) 6574 return; 6575 6576 scsi_remove_host(shost); 6577 scsi_host_put(shost); 6578 } 6579 6580 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 6581 { 6582 int rc = 0; 6583 struct pqi_device_registers __iomem *pqi_registers; 6584 unsigned long timeout; 6585 unsigned int timeout_msecs; 6586 union pqi_reset_register reset_reg; 6587 6588 pqi_registers = ctrl_info->pqi_registers; 6589 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 6590 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 6591 6592 while (1) { 6593 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 6594 reset_reg.all_bits = readl(&pqi_registers->device_reset); 6595 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 6596 break; 6597 pqi_check_ctrl_health(ctrl_info); 6598 if (pqi_ctrl_offline(ctrl_info)) { 6599 rc = -ENXIO; 6600 break; 6601 } 6602 if (time_after(jiffies, timeout)) { 6603 rc = -ETIMEDOUT; 6604 break; 6605 } 6606 } 6607 6608 return rc; 6609 } 6610 6611 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 6612 { 6613 int rc; 6614 union pqi_reset_register reset_reg; 6615 6616 if (ctrl_info->pqi_reset_quiesce_supported) { 6617 rc = sis_pqi_reset_quiesce(ctrl_info); 6618 if (rc) { 6619 dev_err(&ctrl_info->pci_dev->dev, 6620 "PQI reset failed during quiesce with error %d\n", 6621 rc); 6622 return rc; 6623 } 6624 } 6625 6626 reset_reg.all_bits = 0; 6627 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 6628 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 6629 6630 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 6631 6632 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 6633 if (rc) 6634 dev_err(&ctrl_info->pci_dev->dev, 6635 "PQI reset failed with error %d\n", rc); 6636 6637 return rc; 6638 } 6639 6640 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 6641 { 6642 int rc; 6643 struct bmic_sense_subsystem_info *sense_info; 6644 6645 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 6646 if (!sense_info) 6647 return -ENOMEM; 6648 6649 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 6650 if (rc) 6651 goto out; 6652 6653 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 6654 sizeof(sense_info->ctrl_serial_number)); 6655 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 6656 6657 out: 6658 kfree(sense_info); 6659 6660 return rc; 6661 } 6662 6663 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 6664 { 6665 int rc; 6666 struct bmic_identify_controller *identify; 6667 6668 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 6669 if (!identify) 6670 return -ENOMEM; 6671 6672 rc = pqi_identify_controller(ctrl_info, identify); 6673 if (rc) 6674 goto out; 6675 6676 memcpy(ctrl_info->firmware_version, identify->firmware_version, 6677 sizeof(identify->firmware_version)); 6678 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 6679 snprintf(ctrl_info->firmware_version + 6680 strlen(ctrl_info->firmware_version), 6681 sizeof(ctrl_info->firmware_version), 6682 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 6683 6684 memcpy(ctrl_info->model, identify->product_id, 6685 sizeof(identify->product_id)); 6686 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 6687 6688 memcpy(ctrl_info->vendor, identify->vendor_id, 6689 sizeof(identify->vendor_id)); 6690 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 6691 6692 out: 6693 kfree(identify); 6694 6695 return rc; 6696 } 6697 6698 struct pqi_config_table_section_info { 6699 struct pqi_ctrl_info *ctrl_info; 6700 void *section; 6701 u32 section_offset; 6702 void __iomem *section_iomem_addr; 6703 }; 6704 6705 static inline bool pqi_is_firmware_feature_supported( 6706 struct pqi_config_table_firmware_features *firmware_features, 6707 unsigned int bit_position) 6708 { 6709 unsigned int byte_index; 6710 6711 byte_index = bit_position / BITS_PER_BYTE; 6712 6713 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 6714 return false; 6715 6716 return firmware_features->features_supported[byte_index] & 6717 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6718 } 6719 6720 static inline bool pqi_is_firmware_feature_enabled( 6721 struct pqi_config_table_firmware_features *firmware_features, 6722 void __iomem *firmware_features_iomem_addr, 6723 unsigned int bit_position) 6724 { 6725 unsigned int byte_index; 6726 u8 __iomem *features_enabled_iomem_addr; 6727 6728 byte_index = (bit_position / BITS_PER_BYTE) + 6729 (le16_to_cpu(firmware_features->num_elements) * 2); 6730 6731 features_enabled_iomem_addr = firmware_features_iomem_addr + 6732 offsetof(struct pqi_config_table_firmware_features, 6733 features_supported) + byte_index; 6734 6735 return *((__force u8 *)features_enabled_iomem_addr) & 6736 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6737 } 6738 6739 static inline void pqi_request_firmware_feature( 6740 struct pqi_config_table_firmware_features *firmware_features, 6741 unsigned int bit_position) 6742 { 6743 unsigned int byte_index; 6744 6745 byte_index = (bit_position / BITS_PER_BYTE) + 6746 le16_to_cpu(firmware_features->num_elements); 6747 6748 firmware_features->features_supported[byte_index] |= 6749 (1 << (bit_position % BITS_PER_BYTE)); 6750 } 6751 6752 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 6753 u16 first_section, u16 last_section) 6754 { 6755 struct pqi_vendor_general_request request; 6756 6757 memset(&request, 0, sizeof(request)); 6758 6759 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 6760 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 6761 &request.header.iu_length); 6762 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 6763 &request.function_code); 6764 put_unaligned_le16(first_section, 6765 &request.data.config_table_update.first_section); 6766 put_unaligned_le16(last_section, 6767 &request.data.config_table_update.last_section); 6768 6769 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6770 0, NULL, NO_TIMEOUT); 6771 } 6772 6773 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 6774 struct pqi_config_table_firmware_features *firmware_features, 6775 void __iomem *firmware_features_iomem_addr) 6776 { 6777 void *features_requested; 6778 void __iomem *features_requested_iomem_addr; 6779 6780 features_requested = firmware_features->features_supported + 6781 le16_to_cpu(firmware_features->num_elements); 6782 6783 features_requested_iomem_addr = firmware_features_iomem_addr + 6784 (features_requested - (void *)firmware_features); 6785 6786 memcpy_toio(features_requested_iomem_addr, features_requested, 6787 le16_to_cpu(firmware_features->num_elements)); 6788 6789 return pqi_config_table_update(ctrl_info, 6790 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 6791 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 6792 } 6793 6794 struct pqi_firmware_feature { 6795 char *feature_name; 6796 unsigned int feature_bit; 6797 bool supported; 6798 bool enabled; 6799 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 6800 struct pqi_firmware_feature *firmware_feature); 6801 }; 6802 6803 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 6804 struct pqi_firmware_feature *firmware_feature) 6805 { 6806 if (!firmware_feature->supported) { 6807 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 6808 firmware_feature->feature_name); 6809 return; 6810 } 6811 6812 if (firmware_feature->enabled) { 6813 dev_info(&ctrl_info->pci_dev->dev, 6814 "%s enabled\n", firmware_feature->feature_name); 6815 return; 6816 } 6817 6818 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 6819 firmware_feature->feature_name); 6820 } 6821 6822 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 6823 struct pqi_firmware_feature *firmware_feature) 6824 { 6825 switch (firmware_feature->feature_bit) { 6826 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 6827 ctrl_info->soft_reset_handshake_supported = 6828 firmware_feature->enabled; 6829 break; 6830 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 6831 ctrl_info->raid_iu_timeout_supported = 6832 firmware_feature->enabled; 6833 break; 6834 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 6835 ctrl_info->tmf_iu_timeout_supported = 6836 firmware_feature->enabled; 6837 break; 6838 } 6839 6840 pqi_firmware_feature_status(ctrl_info, firmware_feature); 6841 } 6842 6843 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 6844 struct pqi_firmware_feature *firmware_feature) 6845 { 6846 if (firmware_feature->feature_status) 6847 firmware_feature->feature_status(ctrl_info, firmware_feature); 6848 } 6849 6850 static DEFINE_MUTEX(pqi_firmware_features_mutex); 6851 6852 static struct pqi_firmware_feature pqi_firmware_features[] = { 6853 { 6854 .feature_name = "Online Firmware Activation", 6855 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 6856 .feature_status = pqi_firmware_feature_status, 6857 }, 6858 { 6859 .feature_name = "Serial Management Protocol", 6860 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6861 .feature_status = pqi_firmware_feature_status, 6862 }, 6863 { 6864 .feature_name = "New Soft Reset Handshake", 6865 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 6866 .feature_status = pqi_ctrl_update_feature_flags, 6867 }, 6868 { 6869 .feature_name = "RAID IU Timeout", 6870 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 6871 .feature_status = pqi_ctrl_update_feature_flags, 6872 }, 6873 { 6874 .feature_name = "TMF IU Timeout", 6875 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 6876 .feature_status = pqi_ctrl_update_feature_flags, 6877 }, 6878 }; 6879 6880 static void pqi_process_firmware_features( 6881 struct pqi_config_table_section_info *section_info) 6882 { 6883 int rc; 6884 struct pqi_ctrl_info *ctrl_info; 6885 struct pqi_config_table_firmware_features *firmware_features; 6886 void __iomem *firmware_features_iomem_addr; 6887 unsigned int i; 6888 unsigned int num_features_supported; 6889 6890 ctrl_info = section_info->ctrl_info; 6891 firmware_features = section_info->section; 6892 firmware_features_iomem_addr = section_info->section_iomem_addr; 6893 6894 for (i = 0, num_features_supported = 0; 6895 i < ARRAY_SIZE(pqi_firmware_features); i++) { 6896 if (pqi_is_firmware_feature_supported(firmware_features, 6897 pqi_firmware_features[i].feature_bit)) { 6898 pqi_firmware_features[i].supported = true; 6899 num_features_supported++; 6900 } else { 6901 pqi_firmware_feature_update(ctrl_info, 6902 &pqi_firmware_features[i]); 6903 } 6904 } 6905 6906 if (num_features_supported == 0) 6907 return; 6908 6909 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6910 if (!pqi_firmware_features[i].supported) 6911 continue; 6912 pqi_request_firmware_feature(firmware_features, 6913 pqi_firmware_features[i].feature_bit); 6914 } 6915 6916 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 6917 firmware_features_iomem_addr); 6918 if (rc) { 6919 dev_err(&ctrl_info->pci_dev->dev, 6920 "failed to enable firmware features in PQI configuration table\n"); 6921 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6922 if (!pqi_firmware_features[i].supported) 6923 continue; 6924 pqi_firmware_feature_update(ctrl_info, 6925 &pqi_firmware_features[i]); 6926 } 6927 return; 6928 } 6929 6930 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6931 if (!pqi_firmware_features[i].supported) 6932 continue; 6933 if (pqi_is_firmware_feature_enabled(firmware_features, 6934 firmware_features_iomem_addr, 6935 pqi_firmware_features[i].feature_bit)) { 6936 pqi_firmware_features[i].enabled = true; 6937 } 6938 pqi_firmware_feature_update(ctrl_info, 6939 &pqi_firmware_features[i]); 6940 } 6941 } 6942 6943 static void pqi_init_firmware_features(void) 6944 { 6945 unsigned int i; 6946 6947 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6948 pqi_firmware_features[i].supported = false; 6949 pqi_firmware_features[i].enabled = false; 6950 } 6951 } 6952 6953 static void pqi_process_firmware_features_section( 6954 struct pqi_config_table_section_info *section_info) 6955 { 6956 mutex_lock(&pqi_firmware_features_mutex); 6957 pqi_init_firmware_features(); 6958 pqi_process_firmware_features(section_info); 6959 mutex_unlock(&pqi_firmware_features_mutex); 6960 } 6961 6962 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 6963 { 6964 u32 table_length; 6965 u32 section_offset; 6966 void __iomem *table_iomem_addr; 6967 struct pqi_config_table *config_table; 6968 struct pqi_config_table_section_header *section; 6969 struct pqi_config_table_section_info section_info; 6970 6971 table_length = ctrl_info->config_table_length; 6972 if (table_length == 0) 6973 return 0; 6974 6975 config_table = kmalloc(table_length, GFP_KERNEL); 6976 if (!config_table) { 6977 dev_err(&ctrl_info->pci_dev->dev, 6978 "failed to allocate memory for PQI configuration table\n"); 6979 return -ENOMEM; 6980 } 6981 6982 /* 6983 * Copy the config table contents from I/O memory space into the 6984 * temporary buffer. 6985 */ 6986 table_iomem_addr = ctrl_info->iomem_base + 6987 ctrl_info->config_table_offset; 6988 memcpy_fromio(config_table, table_iomem_addr, table_length); 6989 6990 section_info.ctrl_info = ctrl_info; 6991 section_offset = 6992 get_unaligned_le32(&config_table->first_section_offset); 6993 6994 while (section_offset) { 6995 section = (void *)config_table + section_offset; 6996 6997 section_info.section = section; 6998 section_info.section_offset = section_offset; 6999 section_info.section_iomem_addr = 7000 table_iomem_addr + section_offset; 7001 7002 switch (get_unaligned_le16(§ion->section_id)) { 7003 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 7004 pqi_process_firmware_features_section(§ion_info); 7005 break; 7006 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 7007 if (pqi_disable_heartbeat) 7008 dev_warn(&ctrl_info->pci_dev->dev, 7009 "heartbeat disabled by module parameter\n"); 7010 else 7011 ctrl_info->heartbeat_counter = 7012 table_iomem_addr + 7013 section_offset + 7014 offsetof( 7015 struct pqi_config_table_heartbeat, 7016 heartbeat_counter); 7017 break; 7018 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 7019 ctrl_info->soft_reset_status = 7020 table_iomem_addr + 7021 section_offset + 7022 offsetof(struct pqi_config_table_soft_reset, 7023 soft_reset_status); 7024 break; 7025 } 7026 7027 section_offset = 7028 get_unaligned_le16(§ion->next_section_offset); 7029 } 7030 7031 kfree(config_table); 7032 7033 return 0; 7034 } 7035 7036 /* Switches the controller from PQI mode back into SIS mode. */ 7037 7038 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 7039 { 7040 int rc; 7041 7042 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 7043 rc = pqi_reset(ctrl_info); 7044 if (rc) 7045 return rc; 7046 rc = sis_reenable_sis_mode(ctrl_info); 7047 if (rc) { 7048 dev_err(&ctrl_info->pci_dev->dev, 7049 "re-enabling SIS mode failed with error %d\n", rc); 7050 return rc; 7051 } 7052 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7053 7054 return 0; 7055 } 7056 7057 /* 7058 * If the controller isn't already in SIS mode, this function forces it into 7059 * SIS mode. 7060 */ 7061 7062 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 7063 { 7064 if (!sis_is_firmware_running(ctrl_info)) 7065 return -ENXIO; 7066 7067 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 7068 return 0; 7069 7070 if (sis_is_kernel_up(ctrl_info)) { 7071 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7072 return 0; 7073 } 7074 7075 return pqi_revert_to_sis_mode(ctrl_info); 7076 } 7077 7078 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 7079 7080 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 7081 { 7082 int rc; 7083 7084 if (reset_devices) { 7085 sis_soft_reset(ctrl_info); 7086 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7087 } else { 7088 rc = pqi_force_sis_mode(ctrl_info); 7089 if (rc) 7090 return rc; 7091 } 7092 7093 /* 7094 * Wait until the controller is ready to start accepting SIS 7095 * commands. 7096 */ 7097 rc = sis_wait_for_ctrl_ready(ctrl_info); 7098 if (rc) 7099 return rc; 7100 7101 /* 7102 * Get the controller properties. This allows us to determine 7103 * whether or not it supports PQI mode. 7104 */ 7105 rc = sis_get_ctrl_properties(ctrl_info); 7106 if (rc) { 7107 dev_err(&ctrl_info->pci_dev->dev, 7108 "error obtaining controller properties\n"); 7109 return rc; 7110 } 7111 7112 rc = sis_get_pqi_capabilities(ctrl_info); 7113 if (rc) { 7114 dev_err(&ctrl_info->pci_dev->dev, 7115 "error obtaining controller capabilities\n"); 7116 return rc; 7117 } 7118 7119 if (reset_devices) { 7120 if (ctrl_info->max_outstanding_requests > 7121 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 7122 ctrl_info->max_outstanding_requests = 7123 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 7124 } else { 7125 if (ctrl_info->max_outstanding_requests > 7126 PQI_MAX_OUTSTANDING_REQUESTS) 7127 ctrl_info->max_outstanding_requests = 7128 PQI_MAX_OUTSTANDING_REQUESTS; 7129 } 7130 7131 pqi_calculate_io_resources(ctrl_info); 7132 7133 rc = pqi_alloc_error_buffer(ctrl_info); 7134 if (rc) { 7135 dev_err(&ctrl_info->pci_dev->dev, 7136 "failed to allocate PQI error buffer\n"); 7137 return rc; 7138 } 7139 7140 /* 7141 * If the function we are about to call succeeds, the 7142 * controller will transition from legacy SIS mode 7143 * into PQI mode. 7144 */ 7145 rc = sis_init_base_struct_addr(ctrl_info); 7146 if (rc) { 7147 dev_err(&ctrl_info->pci_dev->dev, 7148 "error initializing PQI mode\n"); 7149 return rc; 7150 } 7151 7152 /* Wait for the controller to complete the SIS -> PQI transition. */ 7153 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7154 if (rc) { 7155 dev_err(&ctrl_info->pci_dev->dev, 7156 "transition to PQI mode failed\n"); 7157 return rc; 7158 } 7159 7160 /* From here on, we are running in PQI mode. */ 7161 ctrl_info->pqi_mode_enabled = true; 7162 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7163 7164 rc = pqi_alloc_admin_queues(ctrl_info); 7165 if (rc) { 7166 dev_err(&ctrl_info->pci_dev->dev, 7167 "failed to allocate admin queues\n"); 7168 return rc; 7169 } 7170 7171 rc = pqi_create_admin_queues(ctrl_info); 7172 if (rc) { 7173 dev_err(&ctrl_info->pci_dev->dev, 7174 "error creating admin queues\n"); 7175 return rc; 7176 } 7177 7178 rc = pqi_report_device_capability(ctrl_info); 7179 if (rc) { 7180 dev_err(&ctrl_info->pci_dev->dev, 7181 "obtaining device capability failed\n"); 7182 return rc; 7183 } 7184 7185 rc = pqi_validate_device_capability(ctrl_info); 7186 if (rc) 7187 return rc; 7188 7189 pqi_calculate_queue_resources(ctrl_info); 7190 7191 rc = pqi_enable_msix_interrupts(ctrl_info); 7192 if (rc) 7193 return rc; 7194 7195 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 7196 ctrl_info->max_msix_vectors = 7197 ctrl_info->num_msix_vectors_enabled; 7198 pqi_calculate_queue_resources(ctrl_info); 7199 } 7200 7201 rc = pqi_alloc_io_resources(ctrl_info); 7202 if (rc) 7203 return rc; 7204 7205 rc = pqi_alloc_operational_queues(ctrl_info); 7206 if (rc) { 7207 dev_err(&ctrl_info->pci_dev->dev, 7208 "failed to allocate operational queues\n"); 7209 return rc; 7210 } 7211 7212 pqi_init_operational_queues(ctrl_info); 7213 7214 rc = pqi_request_irqs(ctrl_info); 7215 if (rc) 7216 return rc; 7217 7218 rc = pqi_create_queues(ctrl_info); 7219 if (rc) 7220 return rc; 7221 7222 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7223 7224 ctrl_info->controller_online = true; 7225 7226 rc = pqi_process_config_table(ctrl_info); 7227 if (rc) 7228 return rc; 7229 7230 pqi_start_heartbeat_timer(ctrl_info); 7231 7232 rc = pqi_enable_events(ctrl_info); 7233 if (rc) { 7234 dev_err(&ctrl_info->pci_dev->dev, 7235 "error enabling events\n"); 7236 return rc; 7237 } 7238 7239 /* Register with the SCSI subsystem. */ 7240 rc = pqi_register_scsi(ctrl_info); 7241 if (rc) 7242 return rc; 7243 7244 rc = pqi_get_ctrl_product_details(ctrl_info); 7245 if (rc) { 7246 dev_err(&ctrl_info->pci_dev->dev, 7247 "error obtaining product details\n"); 7248 return rc; 7249 } 7250 7251 rc = pqi_get_ctrl_serial_number(ctrl_info); 7252 if (rc) { 7253 dev_err(&ctrl_info->pci_dev->dev, 7254 "error obtaining ctrl serial number\n"); 7255 return rc; 7256 } 7257 7258 rc = pqi_set_diag_rescan(ctrl_info); 7259 if (rc) { 7260 dev_err(&ctrl_info->pci_dev->dev, 7261 "error enabling multi-lun rescan\n"); 7262 return rc; 7263 } 7264 7265 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7266 if (rc) { 7267 dev_err(&ctrl_info->pci_dev->dev, 7268 "error updating host wellness\n"); 7269 return rc; 7270 } 7271 7272 pqi_schedule_update_time_worker(ctrl_info); 7273 7274 pqi_scan_scsi_devices(ctrl_info); 7275 7276 return 0; 7277 } 7278 7279 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 7280 { 7281 unsigned int i; 7282 struct pqi_admin_queues *admin_queues; 7283 struct pqi_event_queue *event_queue; 7284 7285 admin_queues = &ctrl_info->admin_queues; 7286 admin_queues->iq_pi_copy = 0; 7287 admin_queues->oq_ci_copy = 0; 7288 writel(0, admin_queues->oq_pi); 7289 7290 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 7291 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 7292 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 7293 ctrl_info->queue_groups[i].oq_ci_copy = 0; 7294 7295 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 7296 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 7297 writel(0, ctrl_info->queue_groups[i].oq_pi); 7298 } 7299 7300 event_queue = &ctrl_info->event_queue; 7301 writel(0, event_queue->oq_pi); 7302 event_queue->oq_ci_copy = 0; 7303 } 7304 7305 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 7306 { 7307 int rc; 7308 7309 rc = pqi_force_sis_mode(ctrl_info); 7310 if (rc) 7311 return rc; 7312 7313 /* 7314 * Wait until the controller is ready to start accepting SIS 7315 * commands. 7316 */ 7317 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 7318 if (rc) 7319 return rc; 7320 7321 /* 7322 * Get the controller properties. This allows us to determine 7323 * whether or not it supports PQI mode. 7324 */ 7325 rc = sis_get_ctrl_properties(ctrl_info); 7326 if (rc) { 7327 dev_err(&ctrl_info->pci_dev->dev, 7328 "error obtaining controller properties\n"); 7329 return rc; 7330 } 7331 7332 rc = sis_get_pqi_capabilities(ctrl_info); 7333 if (rc) { 7334 dev_err(&ctrl_info->pci_dev->dev, 7335 "error obtaining controller capabilities\n"); 7336 return rc; 7337 } 7338 7339 /* 7340 * If the function we are about to call succeeds, the 7341 * controller will transition from legacy SIS mode 7342 * into PQI mode. 7343 */ 7344 rc = sis_init_base_struct_addr(ctrl_info); 7345 if (rc) { 7346 dev_err(&ctrl_info->pci_dev->dev, 7347 "error initializing PQI mode\n"); 7348 return rc; 7349 } 7350 7351 /* Wait for the controller to complete the SIS -> PQI transition. */ 7352 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7353 if (rc) { 7354 dev_err(&ctrl_info->pci_dev->dev, 7355 "transition to PQI mode failed\n"); 7356 return rc; 7357 } 7358 7359 /* From here on, we are running in PQI mode. */ 7360 ctrl_info->pqi_mode_enabled = true; 7361 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7362 7363 pqi_reinit_queues(ctrl_info); 7364 7365 rc = pqi_create_admin_queues(ctrl_info); 7366 if (rc) { 7367 dev_err(&ctrl_info->pci_dev->dev, 7368 "error creating admin queues\n"); 7369 return rc; 7370 } 7371 7372 rc = pqi_create_queues(ctrl_info); 7373 if (rc) 7374 return rc; 7375 7376 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7377 7378 ctrl_info->controller_online = true; 7379 pqi_ctrl_unblock_requests(ctrl_info); 7380 7381 rc = pqi_process_config_table(ctrl_info); 7382 if (rc) 7383 return rc; 7384 7385 pqi_start_heartbeat_timer(ctrl_info); 7386 7387 rc = pqi_enable_events(ctrl_info); 7388 if (rc) { 7389 dev_err(&ctrl_info->pci_dev->dev, 7390 "error enabling events\n"); 7391 return rc; 7392 } 7393 7394 rc = pqi_get_ctrl_product_details(ctrl_info); 7395 if (rc) { 7396 dev_err(&ctrl_info->pci_dev->dev, 7397 "error obtaining product details\n"); 7398 return rc; 7399 } 7400 7401 rc = pqi_set_diag_rescan(ctrl_info); 7402 if (rc) { 7403 dev_err(&ctrl_info->pci_dev->dev, 7404 "error enabling multi-lun rescan\n"); 7405 return rc; 7406 } 7407 7408 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7409 if (rc) { 7410 dev_err(&ctrl_info->pci_dev->dev, 7411 "error updating host wellness\n"); 7412 return rc; 7413 } 7414 7415 pqi_schedule_update_time_worker(ctrl_info); 7416 7417 pqi_scan_scsi_devices(ctrl_info); 7418 7419 return 0; 7420 } 7421 7422 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 7423 u16 timeout) 7424 { 7425 int rc; 7426 7427 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 7428 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 7429 7430 return pcibios_err_to_errno(rc); 7431 } 7432 7433 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 7434 { 7435 int rc; 7436 u64 mask; 7437 7438 rc = pci_enable_device(ctrl_info->pci_dev); 7439 if (rc) { 7440 dev_err(&ctrl_info->pci_dev->dev, 7441 "failed to enable PCI device\n"); 7442 return rc; 7443 } 7444 7445 if (sizeof(dma_addr_t) > 4) 7446 mask = DMA_BIT_MASK(64); 7447 else 7448 mask = DMA_BIT_MASK(32); 7449 7450 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 7451 if (rc) { 7452 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 7453 goto disable_device; 7454 } 7455 7456 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 7457 if (rc) { 7458 dev_err(&ctrl_info->pci_dev->dev, 7459 "failed to obtain PCI resources\n"); 7460 goto disable_device; 7461 } 7462 7463 ctrl_info->iomem_base = ioremap(pci_resource_start( 7464 ctrl_info->pci_dev, 0), 7465 sizeof(struct pqi_ctrl_registers)); 7466 if (!ctrl_info->iomem_base) { 7467 dev_err(&ctrl_info->pci_dev->dev, 7468 "failed to map memory for controller registers\n"); 7469 rc = -ENOMEM; 7470 goto release_regions; 7471 } 7472 7473 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 7474 7475 /* Increase the PCIe completion timeout. */ 7476 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 7477 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 7478 if (rc) { 7479 dev_err(&ctrl_info->pci_dev->dev, 7480 "failed to set PCIe completion timeout\n"); 7481 goto release_regions; 7482 } 7483 7484 /* Enable bus mastering. */ 7485 pci_set_master(ctrl_info->pci_dev); 7486 7487 ctrl_info->registers = ctrl_info->iomem_base; 7488 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 7489 7490 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 7491 7492 return 0; 7493 7494 release_regions: 7495 pci_release_regions(ctrl_info->pci_dev); 7496 disable_device: 7497 pci_disable_device(ctrl_info->pci_dev); 7498 7499 return rc; 7500 } 7501 7502 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 7503 { 7504 iounmap(ctrl_info->iomem_base); 7505 pci_release_regions(ctrl_info->pci_dev); 7506 if (pci_is_enabled(ctrl_info->pci_dev)) 7507 pci_disable_device(ctrl_info->pci_dev); 7508 pci_set_drvdata(ctrl_info->pci_dev, NULL); 7509 } 7510 7511 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 7512 { 7513 struct pqi_ctrl_info *ctrl_info; 7514 7515 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 7516 GFP_KERNEL, numa_node); 7517 if (!ctrl_info) 7518 return NULL; 7519 7520 mutex_init(&ctrl_info->scan_mutex); 7521 mutex_init(&ctrl_info->lun_reset_mutex); 7522 mutex_init(&ctrl_info->ofa_mutex); 7523 7524 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7525 spin_lock_init(&ctrl_info->scsi_device_list_lock); 7526 7527 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 7528 atomic_set(&ctrl_info->num_interrupts, 0); 7529 atomic_set(&ctrl_info->sync_cmds_outstanding, 0); 7530 7531 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 7532 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 7533 7534 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 7535 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 7536 7537 sema_init(&ctrl_info->sync_request_sem, 7538 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 7539 init_waitqueue_head(&ctrl_info->block_requests_wait); 7540 7541 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 7542 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 7543 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 7544 pqi_raid_bypass_retry_worker); 7545 7546 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 7547 ctrl_info->irq_mode = IRQ_MODE_NONE; 7548 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 7549 7550 return ctrl_info; 7551 } 7552 7553 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 7554 { 7555 kfree(ctrl_info); 7556 } 7557 7558 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 7559 { 7560 pqi_free_irqs(ctrl_info); 7561 pqi_disable_msix_interrupts(ctrl_info); 7562 } 7563 7564 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 7565 { 7566 pqi_stop_heartbeat_timer(ctrl_info); 7567 pqi_free_interrupts(ctrl_info); 7568 if (ctrl_info->queue_memory_base) 7569 dma_free_coherent(&ctrl_info->pci_dev->dev, 7570 ctrl_info->queue_memory_length, 7571 ctrl_info->queue_memory_base, 7572 ctrl_info->queue_memory_base_dma_handle); 7573 if (ctrl_info->admin_queue_memory_base) 7574 dma_free_coherent(&ctrl_info->pci_dev->dev, 7575 ctrl_info->admin_queue_memory_length, 7576 ctrl_info->admin_queue_memory_base, 7577 ctrl_info->admin_queue_memory_base_dma_handle); 7578 pqi_free_all_io_requests(ctrl_info); 7579 if (ctrl_info->error_buffer) 7580 dma_free_coherent(&ctrl_info->pci_dev->dev, 7581 ctrl_info->error_buffer_length, 7582 ctrl_info->error_buffer, 7583 ctrl_info->error_buffer_dma_handle); 7584 if (ctrl_info->iomem_base) 7585 pqi_cleanup_pci_init(ctrl_info); 7586 pqi_free_ctrl_info(ctrl_info); 7587 } 7588 7589 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 7590 { 7591 pqi_cancel_rescan_worker(ctrl_info); 7592 pqi_cancel_update_time_worker(ctrl_info); 7593 pqi_remove_all_scsi_devices(ctrl_info); 7594 pqi_unregister_scsi(ctrl_info); 7595 if (ctrl_info->pqi_mode_enabled) 7596 pqi_revert_to_sis_mode(ctrl_info); 7597 pqi_free_ctrl_resources(ctrl_info); 7598 } 7599 7600 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 7601 { 7602 pqi_cancel_update_time_worker(ctrl_info); 7603 pqi_cancel_rescan_worker(ctrl_info); 7604 pqi_wait_until_lun_reset_finished(ctrl_info); 7605 pqi_wait_until_scan_finished(ctrl_info); 7606 pqi_ctrl_ofa_start(ctrl_info); 7607 pqi_ctrl_block_requests(ctrl_info); 7608 pqi_ctrl_wait_until_quiesced(ctrl_info); 7609 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); 7610 pqi_fail_io_queued_for_all_devices(ctrl_info); 7611 pqi_wait_until_inbound_queues_empty(ctrl_info); 7612 pqi_stop_heartbeat_timer(ctrl_info); 7613 ctrl_info->pqi_mode_enabled = false; 7614 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7615 } 7616 7617 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 7618 { 7619 pqi_ofa_free_host_buffer(ctrl_info); 7620 ctrl_info->pqi_mode_enabled = true; 7621 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7622 ctrl_info->controller_online = true; 7623 pqi_ctrl_unblock_requests(ctrl_info); 7624 pqi_start_heartbeat_timer(ctrl_info); 7625 pqi_schedule_update_time_worker(ctrl_info); 7626 pqi_clear_soft_reset_status(ctrl_info, 7627 PQI_SOFT_RESET_ABORT); 7628 pqi_scan_scsi_devices(ctrl_info); 7629 } 7630 7631 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, 7632 u32 total_size, u32 chunk_size) 7633 { 7634 u32 sg_count; 7635 u32 size; 7636 int i; 7637 struct pqi_sg_descriptor *mem_descriptor = NULL; 7638 struct device *dev; 7639 struct pqi_ofa_memory *ofap; 7640 7641 dev = &ctrl_info->pci_dev->dev; 7642 7643 sg_count = (total_size + chunk_size - 1); 7644 sg_count /= chunk_size; 7645 7646 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7647 7648 if (sg_count*chunk_size < total_size) 7649 goto out; 7650 7651 ctrl_info->pqi_ofa_chunk_virt_addr = 7652 kcalloc(sg_count, sizeof(void *), GFP_KERNEL); 7653 if (!ctrl_info->pqi_ofa_chunk_virt_addr) 7654 goto out; 7655 7656 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { 7657 dma_addr_t dma_handle; 7658 7659 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7660 dma_alloc_coherent(dev, chunk_size, &dma_handle, 7661 GFP_KERNEL); 7662 7663 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7664 break; 7665 7666 mem_descriptor = &ofap->sg_descriptor[i]; 7667 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); 7668 put_unaligned_le32 (chunk_size, &mem_descriptor->length); 7669 } 7670 7671 if (!size || size < total_size) 7672 goto out_free_chunks; 7673 7674 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 7675 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 7676 put_unaligned_le32(size, &ofap->bytes_allocated); 7677 7678 return 0; 7679 7680 out_free_chunks: 7681 while (--i >= 0) { 7682 mem_descriptor = &ofap->sg_descriptor[i]; 7683 dma_free_coherent(dev, chunk_size, 7684 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7685 get_unaligned_le64(&mem_descriptor->address)); 7686 } 7687 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7688 7689 out: 7690 put_unaligned_le32 (0, &ofap->bytes_allocated); 7691 return -ENOMEM; 7692 } 7693 7694 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 7695 { 7696 u32 total_size; 7697 u32 min_chunk_size; 7698 u32 chunk_sz; 7699 7700 total_size = le32_to_cpu( 7701 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); 7702 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; 7703 7704 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) 7705 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) 7706 return 0; 7707 7708 return -ENOMEM; 7709 } 7710 7711 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 7712 u32 bytes_requested) 7713 { 7714 struct pqi_ofa_memory *pqi_ofa_memory; 7715 struct device *dev; 7716 7717 dev = &ctrl_info->pci_dev->dev; 7718 pqi_ofa_memory = dma_alloc_coherent(dev, 7719 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7720 &ctrl_info->pqi_ofa_mem_dma_handle, 7721 GFP_KERNEL); 7722 7723 if (!pqi_ofa_memory) 7724 return; 7725 7726 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); 7727 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, 7728 sizeof(pqi_ofa_memory->signature)); 7729 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); 7730 7731 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; 7732 7733 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 7734 dev_err(dev, "Failed to allocate host buffer of size = %u", 7735 bytes_requested); 7736 } 7737 7738 return; 7739 } 7740 7741 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 7742 { 7743 int i; 7744 struct pqi_sg_descriptor *mem_descriptor; 7745 struct pqi_ofa_memory *ofap; 7746 7747 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7748 7749 if (!ofap) 7750 return; 7751 7752 if (!ofap->bytes_allocated) 7753 goto out; 7754 7755 mem_descriptor = ofap->sg_descriptor; 7756 7757 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); 7758 i++) { 7759 dma_free_coherent(&ctrl_info->pci_dev->dev, 7760 get_unaligned_le32(&mem_descriptor[i].length), 7761 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7762 get_unaligned_le64(&mem_descriptor[i].address)); 7763 } 7764 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7765 7766 out: 7767 dma_free_coherent(&ctrl_info->pci_dev->dev, 7768 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, 7769 ctrl_info->pqi_ofa_mem_dma_handle); 7770 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 7771 } 7772 7773 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 7774 { 7775 struct pqi_vendor_general_request request; 7776 size_t size; 7777 struct pqi_ofa_memory *ofap; 7778 7779 memset(&request, 0, sizeof(request)); 7780 7781 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7782 7783 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7784 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7785 &request.header.iu_length); 7786 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 7787 &request.function_code); 7788 7789 if (ofap) { 7790 size = offsetof(struct pqi_ofa_memory, sg_descriptor) + 7791 get_unaligned_le16(&ofap->num_memory_descriptors) * 7792 sizeof(struct pqi_sg_descriptor); 7793 7794 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 7795 &request.data.ofa_memory_allocation.buffer_address); 7796 put_unaligned_le32(size, 7797 &request.data.ofa_memory_allocation.buffer_length); 7798 7799 } 7800 7801 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 7802 0, NULL, NO_TIMEOUT); 7803 } 7804 7805 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) 7806 { 7807 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7808 return pqi_ctrl_init_resume(ctrl_info); 7809 } 7810 7811 static void pqi_perform_lockup_action(void) 7812 { 7813 switch (pqi_lockup_action) { 7814 case PANIC: 7815 panic("FATAL: Smart Family Controller lockup detected"); 7816 break; 7817 case REBOOT: 7818 emergency_restart(); 7819 break; 7820 case NONE: 7821 default: 7822 break; 7823 } 7824 } 7825 7826 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 7827 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 7828 .status = SAM_STAT_CHECK_CONDITION, 7829 }; 7830 7831 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 7832 { 7833 unsigned int i; 7834 struct pqi_io_request *io_request; 7835 struct scsi_cmnd *scmd; 7836 7837 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7838 io_request = &ctrl_info->io_request_pool[i]; 7839 if (atomic_read(&io_request->refcount) == 0) 7840 continue; 7841 7842 scmd = io_request->scmd; 7843 if (scmd) { 7844 set_host_byte(scmd, DID_NO_CONNECT); 7845 } else { 7846 io_request->status = -ENXIO; 7847 io_request->error_info = 7848 &pqi_ctrl_offline_raid_error_info; 7849 } 7850 7851 io_request->io_complete_callback(io_request, 7852 io_request->context); 7853 } 7854 } 7855 7856 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 7857 { 7858 pqi_perform_lockup_action(); 7859 pqi_stop_heartbeat_timer(ctrl_info); 7860 pqi_free_interrupts(ctrl_info); 7861 pqi_cancel_rescan_worker(ctrl_info); 7862 pqi_cancel_update_time_worker(ctrl_info); 7863 pqi_ctrl_wait_until_quiesced(ctrl_info); 7864 pqi_fail_all_outstanding_requests(ctrl_info); 7865 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 7866 pqi_ctrl_unblock_requests(ctrl_info); 7867 } 7868 7869 static void pqi_ctrl_offline_worker(struct work_struct *work) 7870 { 7871 struct pqi_ctrl_info *ctrl_info; 7872 7873 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 7874 pqi_take_ctrl_offline_deferred(ctrl_info); 7875 } 7876 7877 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 7878 { 7879 if (!ctrl_info->controller_online) 7880 return; 7881 7882 ctrl_info->controller_online = false; 7883 ctrl_info->pqi_mode_enabled = false; 7884 pqi_ctrl_block_requests(ctrl_info); 7885 if (!pqi_disable_ctrl_shutdown) 7886 sis_shutdown_ctrl(ctrl_info); 7887 pci_disable_device(ctrl_info->pci_dev); 7888 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 7889 schedule_work(&ctrl_info->ctrl_offline_work); 7890 } 7891 7892 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 7893 const struct pci_device_id *id) 7894 { 7895 char *ctrl_description; 7896 7897 if (id->driver_data) 7898 ctrl_description = (char *)id->driver_data; 7899 else 7900 ctrl_description = "Microsemi Smart Family Controller"; 7901 7902 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 7903 } 7904 7905 static int pqi_pci_probe(struct pci_dev *pci_dev, 7906 const struct pci_device_id *id) 7907 { 7908 int rc; 7909 int node, cp_node; 7910 struct pqi_ctrl_info *ctrl_info; 7911 7912 pqi_print_ctrl_info(pci_dev, id); 7913 7914 if (pqi_disable_device_id_wildcards && 7915 id->subvendor == PCI_ANY_ID && 7916 id->subdevice == PCI_ANY_ID) { 7917 dev_warn(&pci_dev->dev, 7918 "controller not probed because device ID wildcards are disabled\n"); 7919 return -ENODEV; 7920 } 7921 7922 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 7923 dev_warn(&pci_dev->dev, 7924 "controller device ID matched using wildcards\n"); 7925 7926 node = dev_to_node(&pci_dev->dev); 7927 if (node == NUMA_NO_NODE) { 7928 cp_node = cpu_to_node(0); 7929 if (cp_node == NUMA_NO_NODE) 7930 cp_node = 0; 7931 set_dev_node(&pci_dev->dev, cp_node); 7932 } 7933 7934 ctrl_info = pqi_alloc_ctrl_info(node); 7935 if (!ctrl_info) { 7936 dev_err(&pci_dev->dev, 7937 "failed to allocate controller info block\n"); 7938 return -ENOMEM; 7939 } 7940 7941 ctrl_info->pci_dev = pci_dev; 7942 7943 rc = pqi_pci_init(ctrl_info); 7944 if (rc) 7945 goto error; 7946 7947 rc = pqi_ctrl_init(ctrl_info); 7948 if (rc) 7949 goto error; 7950 7951 return 0; 7952 7953 error: 7954 pqi_remove_ctrl(ctrl_info); 7955 7956 return rc; 7957 } 7958 7959 static void pqi_pci_remove(struct pci_dev *pci_dev) 7960 { 7961 struct pqi_ctrl_info *ctrl_info; 7962 7963 ctrl_info = pci_get_drvdata(pci_dev); 7964 if (!ctrl_info) 7965 return; 7966 7967 ctrl_info->in_shutdown = true; 7968 7969 pqi_remove_ctrl(ctrl_info); 7970 } 7971 7972 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 7973 { 7974 unsigned int i; 7975 struct pqi_io_request *io_request; 7976 struct scsi_cmnd *scmd; 7977 7978 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7979 io_request = &ctrl_info->io_request_pool[i]; 7980 if (atomic_read(&io_request->refcount) == 0) 7981 continue; 7982 scmd = io_request->scmd; 7983 WARN_ON(scmd != NULL); /* IO command from SML */ 7984 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 7985 } 7986 } 7987 7988 static void pqi_shutdown(struct pci_dev *pci_dev) 7989 { 7990 int rc; 7991 struct pqi_ctrl_info *ctrl_info; 7992 7993 ctrl_info = pci_get_drvdata(pci_dev); 7994 if (!ctrl_info) { 7995 dev_err(&pci_dev->dev, 7996 "cache could not be flushed\n"); 7997 return; 7998 } 7999 8000 pqi_disable_events(ctrl_info); 8001 pqi_wait_until_ofa_finished(ctrl_info); 8002 pqi_cancel_update_time_worker(ctrl_info); 8003 pqi_cancel_rescan_worker(ctrl_info); 8004 pqi_cancel_event_worker(ctrl_info); 8005 8006 pqi_ctrl_shutdown_start(ctrl_info); 8007 pqi_ctrl_wait_until_quiesced(ctrl_info); 8008 8009 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8010 if (rc) { 8011 dev_err(&pci_dev->dev, 8012 "wait for pending I/O failed\n"); 8013 return; 8014 } 8015 8016 pqi_ctrl_block_device_reset(ctrl_info); 8017 pqi_wait_until_lun_reset_finished(ctrl_info); 8018 8019 /* 8020 * Write all data in the controller's battery-backed cache to 8021 * storage. 8022 */ 8023 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 8024 if (rc) 8025 dev_err(&pci_dev->dev, 8026 "unable to flush controller cache\n"); 8027 8028 pqi_ctrl_block_requests(ctrl_info); 8029 8030 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); 8031 if (rc) { 8032 dev_err(&pci_dev->dev, 8033 "wait for pending sync cmds failed\n"); 8034 return; 8035 } 8036 8037 pqi_crash_if_pending_command(ctrl_info); 8038 pqi_reset(ctrl_info); 8039 } 8040 8041 static void pqi_process_lockup_action_param(void) 8042 { 8043 unsigned int i; 8044 8045 if (!pqi_lockup_action_param) 8046 return; 8047 8048 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 8049 if (strcmp(pqi_lockup_action_param, 8050 pqi_lockup_actions[i].name) == 0) { 8051 pqi_lockup_action = pqi_lockup_actions[i].action; 8052 return; 8053 } 8054 } 8055 8056 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 8057 DRIVER_NAME_SHORT, pqi_lockup_action_param); 8058 } 8059 8060 static void pqi_process_module_params(void) 8061 { 8062 pqi_process_lockup_action_param(); 8063 } 8064 8065 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 8066 { 8067 struct pqi_ctrl_info *ctrl_info; 8068 8069 ctrl_info = pci_get_drvdata(pci_dev); 8070 8071 pqi_disable_events(ctrl_info); 8072 pqi_cancel_update_time_worker(ctrl_info); 8073 pqi_cancel_rescan_worker(ctrl_info); 8074 pqi_wait_until_scan_finished(ctrl_info); 8075 pqi_wait_until_lun_reset_finished(ctrl_info); 8076 pqi_wait_until_ofa_finished(ctrl_info); 8077 pqi_flush_cache(ctrl_info, SUSPEND); 8078 pqi_ctrl_block_requests(ctrl_info); 8079 pqi_ctrl_wait_until_quiesced(ctrl_info); 8080 pqi_wait_until_inbound_queues_empty(ctrl_info); 8081 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8082 pqi_stop_heartbeat_timer(ctrl_info); 8083 8084 if (state.event == PM_EVENT_FREEZE) 8085 return 0; 8086 8087 pci_save_state(pci_dev); 8088 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 8089 8090 ctrl_info->controller_online = false; 8091 ctrl_info->pqi_mode_enabled = false; 8092 8093 return 0; 8094 } 8095 8096 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 8097 { 8098 int rc; 8099 struct pqi_ctrl_info *ctrl_info; 8100 8101 ctrl_info = pci_get_drvdata(pci_dev); 8102 8103 if (pci_dev->current_state != PCI_D0) { 8104 ctrl_info->max_hw_queue_index = 0; 8105 pqi_free_interrupts(ctrl_info); 8106 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 8107 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 8108 IRQF_SHARED, DRIVER_NAME_SHORT, 8109 &ctrl_info->queue_groups[0]); 8110 if (rc) { 8111 dev_err(&ctrl_info->pci_dev->dev, 8112 "irq %u init failed with error %d\n", 8113 pci_dev->irq, rc); 8114 return rc; 8115 } 8116 pqi_start_heartbeat_timer(ctrl_info); 8117 pqi_ctrl_unblock_requests(ctrl_info); 8118 return 0; 8119 } 8120 8121 pci_set_power_state(pci_dev, PCI_D0); 8122 pci_restore_state(pci_dev); 8123 8124 return pqi_ctrl_init_resume(ctrl_info); 8125 } 8126 8127 /* Define the PCI IDs for the controllers that we support. */ 8128 static const struct pci_device_id pqi_pci_id_table[] = { 8129 { 8130 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8131 0x105b, 0x1211) 8132 }, 8133 { 8134 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8135 0x105b, 0x1321) 8136 }, 8137 { 8138 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8139 0x152d, 0x8a22) 8140 }, 8141 { 8142 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8143 0x152d, 0x8a23) 8144 }, 8145 { 8146 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8147 0x152d, 0x8a24) 8148 }, 8149 { 8150 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8151 0x152d, 0x8a36) 8152 }, 8153 { 8154 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8155 0x152d, 0x8a37) 8156 }, 8157 { 8158 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8159 0x193d, 0x1104) 8160 }, 8161 { 8162 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8163 0x193d, 0x1105) 8164 }, 8165 { 8166 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8167 0x193d, 0x1106) 8168 }, 8169 { 8170 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8171 0x193d, 0x1107) 8172 }, 8173 { 8174 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8175 0x193d, 0x8460) 8176 }, 8177 { 8178 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8179 0x193d, 0x8461) 8180 }, 8181 { 8182 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8183 0x193d, 0xc460) 8184 }, 8185 { 8186 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8187 0x193d, 0xc461) 8188 }, 8189 { 8190 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8191 0x193d, 0xf460) 8192 }, 8193 { 8194 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8195 0x193d, 0xf461) 8196 }, 8197 { 8198 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8199 0x1bd4, 0x0045) 8200 }, 8201 { 8202 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8203 0x1bd4, 0x0046) 8204 }, 8205 { 8206 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8207 0x1bd4, 0x0047) 8208 }, 8209 { 8210 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8211 0x1bd4, 0x0048) 8212 }, 8213 { 8214 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8215 0x1bd4, 0x004a) 8216 }, 8217 { 8218 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8219 0x1bd4, 0x004b) 8220 }, 8221 { 8222 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8223 0x1bd4, 0x004c) 8224 }, 8225 { 8226 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8227 0x1bd4, 0x004f) 8228 }, 8229 { 8230 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8231 0x19e5, 0xd227) 8232 }, 8233 { 8234 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8235 0x19e5, 0xd228) 8236 }, 8237 { 8238 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8239 0x19e5, 0xd229) 8240 }, 8241 { 8242 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8243 0x19e5, 0xd22a) 8244 }, 8245 { 8246 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8247 0x19e5, 0xd22b) 8248 }, 8249 { 8250 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8251 0x19e5, 0xd22c) 8252 }, 8253 { 8254 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8255 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 8256 }, 8257 { 8258 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8259 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 8260 }, 8261 { 8262 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8263 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 8264 }, 8265 { 8266 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8267 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 8268 }, 8269 { 8270 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8271 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 8272 }, 8273 { 8274 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8275 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 8276 }, 8277 { 8278 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8279 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 8280 }, 8281 { 8282 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8283 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 8284 }, 8285 { 8286 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8287 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 8288 }, 8289 { 8290 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8291 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 8292 }, 8293 { 8294 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8295 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 8296 }, 8297 { 8298 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8299 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 8300 }, 8301 { 8302 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8303 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 8304 }, 8305 { 8306 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8307 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 8308 }, 8309 { 8310 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8311 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 8312 }, 8313 { 8314 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8315 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 8316 }, 8317 { 8318 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8319 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 8320 }, 8321 { 8322 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8323 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 8324 }, 8325 { 8326 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8327 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 8328 }, 8329 { 8330 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8331 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 8332 }, 8333 { 8334 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8335 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 8336 }, 8337 { 8338 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8339 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 8340 }, 8341 { 8342 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8343 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 8344 }, 8345 { 8346 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8347 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 8348 }, 8349 { 8350 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8351 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 8352 }, 8353 { 8354 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8355 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 8356 }, 8357 { 8358 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8359 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 8360 }, 8361 { 8362 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8363 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 8364 }, 8365 { 8366 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8367 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 8368 }, 8369 { 8370 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8371 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 8372 }, 8373 { 8374 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8375 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 8376 }, 8377 { 8378 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8379 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 8380 }, 8381 { 8382 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8383 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 8384 }, 8385 { 8386 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8387 PCI_VENDOR_ID_ADVANTECH, 0x8312) 8388 }, 8389 { 8390 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8391 PCI_VENDOR_ID_DELL, 0x1fe0) 8392 }, 8393 { 8394 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8395 PCI_VENDOR_ID_HP, 0x0600) 8396 }, 8397 { 8398 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8399 PCI_VENDOR_ID_HP, 0x0601) 8400 }, 8401 { 8402 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8403 PCI_VENDOR_ID_HP, 0x0602) 8404 }, 8405 { 8406 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8407 PCI_VENDOR_ID_HP, 0x0603) 8408 }, 8409 { 8410 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8411 PCI_VENDOR_ID_HP, 0x0609) 8412 }, 8413 { 8414 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8415 PCI_VENDOR_ID_HP, 0x0650) 8416 }, 8417 { 8418 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8419 PCI_VENDOR_ID_HP, 0x0651) 8420 }, 8421 { 8422 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8423 PCI_VENDOR_ID_HP, 0x0652) 8424 }, 8425 { 8426 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8427 PCI_VENDOR_ID_HP, 0x0653) 8428 }, 8429 { 8430 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8431 PCI_VENDOR_ID_HP, 0x0654) 8432 }, 8433 { 8434 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8435 PCI_VENDOR_ID_HP, 0x0655) 8436 }, 8437 { 8438 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8439 PCI_VENDOR_ID_HP, 0x0700) 8440 }, 8441 { 8442 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8443 PCI_VENDOR_ID_HP, 0x0701) 8444 }, 8445 { 8446 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8447 PCI_VENDOR_ID_HP, 0x1001) 8448 }, 8449 { 8450 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8451 PCI_VENDOR_ID_HP, 0x1100) 8452 }, 8453 { 8454 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8455 PCI_VENDOR_ID_HP, 0x1101) 8456 }, 8457 { 8458 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8459 0x1d8d, 0x0800) 8460 }, 8461 { 8462 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8463 0x1d8d, 0x0908) 8464 }, 8465 { 8466 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8467 0x1d8d, 0x0806) 8468 }, 8469 { 8470 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8471 0x1d8d, 0x0916) 8472 }, 8473 { 8474 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8475 PCI_VENDOR_ID_GIGABYTE, 0x1000) 8476 }, 8477 { 8478 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8479 PCI_ANY_ID, PCI_ANY_ID) 8480 }, 8481 { 0 } 8482 }; 8483 8484 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 8485 8486 static struct pci_driver pqi_pci_driver = { 8487 .name = DRIVER_NAME_SHORT, 8488 .id_table = pqi_pci_id_table, 8489 .probe = pqi_pci_probe, 8490 .remove = pqi_pci_remove, 8491 .shutdown = pqi_shutdown, 8492 #if defined(CONFIG_PM) 8493 .suspend = pqi_suspend, 8494 .resume = pqi_resume, 8495 #endif 8496 }; 8497 8498 static int __init pqi_init(void) 8499 { 8500 int rc; 8501 8502 pr_info(DRIVER_NAME "\n"); 8503 8504 pqi_sas_transport_template = 8505 sas_attach_transport(&pqi_sas_transport_functions); 8506 if (!pqi_sas_transport_template) 8507 return -ENODEV; 8508 8509 pqi_process_module_params(); 8510 8511 rc = pci_register_driver(&pqi_pci_driver); 8512 if (rc) 8513 sas_release_transport(pqi_sas_transport_template); 8514 8515 return rc; 8516 } 8517 8518 static void __exit pqi_cleanup(void) 8519 { 8520 pci_unregister_driver(&pqi_pci_driver); 8521 sas_release_transport(pqi_sas_transport_template); 8522 } 8523 8524 module_init(pqi_init); 8525 module_exit(pqi_cleanup); 8526 8527 static void __attribute__((unused)) verify_structures(void) 8528 { 8529 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8530 sis_host_to_ctrl_doorbell) != 0x20); 8531 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8532 sis_interrupt_mask) != 0x34); 8533 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8534 sis_ctrl_to_host_doorbell) != 0x9c); 8535 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8536 sis_ctrl_to_host_doorbell_clear) != 0xa0); 8537 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8538 sis_driver_scratch) != 0xb0); 8539 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8540 sis_firmware_status) != 0xbc); 8541 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8542 sis_mailbox) != 0x1000); 8543 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8544 pqi_registers) != 0x4000); 8545 8546 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8547 iu_type) != 0x0); 8548 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8549 iu_length) != 0x2); 8550 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8551 response_queue_id) != 0x4); 8552 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8553 work_area) != 0x6); 8554 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 8555 8556 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8557 status) != 0x0); 8558 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8559 service_response) != 0x1); 8560 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8561 data_present) != 0x2); 8562 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8563 reserved) != 0x3); 8564 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8565 residual_count) != 0x4); 8566 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8567 data_length) != 0x8); 8568 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8569 reserved1) != 0xa); 8570 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8571 data) != 0xc); 8572 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 8573 8574 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8575 data_in_result) != 0x0); 8576 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8577 data_out_result) != 0x1); 8578 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8579 reserved) != 0x2); 8580 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8581 status) != 0x5); 8582 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8583 status_qualifier) != 0x6); 8584 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8585 sense_data_length) != 0x8); 8586 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8587 response_data_length) != 0xa); 8588 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8589 data_in_transferred) != 0xc); 8590 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8591 data_out_transferred) != 0x10); 8592 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8593 data) != 0x14); 8594 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 8595 8596 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8597 signature) != 0x0); 8598 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8599 function_and_status_code) != 0x8); 8600 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8601 max_admin_iq_elements) != 0x10); 8602 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8603 max_admin_oq_elements) != 0x11); 8604 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8605 admin_iq_element_length) != 0x12); 8606 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8607 admin_oq_element_length) != 0x13); 8608 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8609 max_reset_timeout) != 0x14); 8610 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8611 legacy_intx_status) != 0x18); 8612 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8613 legacy_intx_mask_set) != 0x1c); 8614 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8615 legacy_intx_mask_clear) != 0x20); 8616 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8617 device_status) != 0x40); 8618 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8619 admin_iq_pi_offset) != 0x48); 8620 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8621 admin_oq_ci_offset) != 0x50); 8622 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8623 admin_iq_element_array_addr) != 0x58); 8624 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8625 admin_oq_element_array_addr) != 0x60); 8626 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8627 admin_iq_ci_addr) != 0x68); 8628 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8629 admin_oq_pi_addr) != 0x70); 8630 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8631 admin_iq_num_elements) != 0x78); 8632 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8633 admin_oq_num_elements) != 0x79); 8634 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8635 admin_queue_int_msg_num) != 0x7a); 8636 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8637 device_error) != 0x80); 8638 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8639 error_details) != 0x88); 8640 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8641 device_reset) != 0x90); 8642 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8643 power_action) != 0x94); 8644 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 8645 8646 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8647 header.iu_type) != 0); 8648 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8649 header.iu_length) != 2); 8650 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8651 header.work_area) != 6); 8652 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8653 request_id) != 8); 8654 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8655 function_code) != 10); 8656 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8657 data.report_device_capability.buffer_length) != 44); 8658 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8659 data.report_device_capability.sg_descriptor) != 48); 8660 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8661 data.create_operational_iq.queue_id) != 12); 8662 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8663 data.create_operational_iq.element_array_addr) != 16); 8664 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8665 data.create_operational_iq.ci_addr) != 24); 8666 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8667 data.create_operational_iq.num_elements) != 32); 8668 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8669 data.create_operational_iq.element_length) != 34); 8670 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8671 data.create_operational_iq.queue_protocol) != 36); 8672 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8673 data.create_operational_oq.queue_id) != 12); 8674 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8675 data.create_operational_oq.element_array_addr) != 16); 8676 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8677 data.create_operational_oq.pi_addr) != 24); 8678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8679 data.create_operational_oq.num_elements) != 32); 8680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8681 data.create_operational_oq.element_length) != 34); 8682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8683 data.create_operational_oq.queue_protocol) != 36); 8684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8685 data.create_operational_oq.int_msg_num) != 40); 8686 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8687 data.create_operational_oq.coalescing_count) != 42); 8688 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8689 data.create_operational_oq.min_coalescing_time) != 44); 8690 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8691 data.create_operational_oq.max_coalescing_time) != 48); 8692 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8693 data.delete_operational_queue.queue_id) != 12); 8694 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 8695 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8696 data.create_operational_iq) != 64 - 11); 8697 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8698 data.create_operational_oq) != 64 - 11); 8699 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8700 data.delete_operational_queue) != 64 - 11); 8701 8702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8703 header.iu_type) != 0); 8704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8705 header.iu_length) != 2); 8706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8707 header.work_area) != 6); 8708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8709 request_id) != 8); 8710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8711 function_code) != 10); 8712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8713 status) != 11); 8714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8715 data.create_operational_iq.status_descriptor) != 12); 8716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8717 data.create_operational_iq.iq_pi_offset) != 16); 8718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8719 data.create_operational_oq.status_descriptor) != 12); 8720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8721 data.create_operational_oq.oq_ci_offset) != 16); 8722 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 8723 8724 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8725 header.iu_type) != 0); 8726 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8727 header.iu_length) != 2); 8728 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8729 header.response_queue_id) != 4); 8730 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8731 header.work_area) != 6); 8732 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8733 request_id) != 8); 8734 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8735 nexus_id) != 10); 8736 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8737 buffer_length) != 12); 8738 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8739 lun_number) != 16); 8740 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8741 protocol_specific) != 24); 8742 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8743 error_index) != 27); 8744 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8745 cdb) != 32); 8746 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8747 timeout) != 60); 8748 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8749 sg_descriptors) != 64); 8750 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 8751 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8752 8753 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8754 header.iu_type) != 0); 8755 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8756 header.iu_length) != 2); 8757 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8758 header.response_queue_id) != 4); 8759 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8760 header.work_area) != 6); 8761 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8762 request_id) != 8); 8763 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8764 nexus_id) != 12); 8765 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8766 buffer_length) != 16); 8767 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8768 data_encryption_key_index) != 22); 8769 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8770 encrypt_tweak_lower) != 24); 8771 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8772 encrypt_tweak_upper) != 28); 8773 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8774 cdb) != 32); 8775 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8776 error_index) != 48); 8777 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8778 num_sg_descriptors) != 50); 8779 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8780 cdb_length) != 51); 8781 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8782 lun_number) != 52); 8783 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8784 sg_descriptors) != 64); 8785 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 8786 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8787 8788 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8789 header.iu_type) != 0); 8790 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8791 header.iu_length) != 2); 8792 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8793 request_id) != 8); 8794 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8795 error_index) != 10); 8796 8797 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8798 header.iu_type) != 0); 8799 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8800 header.iu_length) != 2); 8801 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8802 header.response_queue_id) != 4); 8803 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8804 request_id) != 8); 8805 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8806 data.report_event_configuration.buffer_length) != 12); 8807 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8808 data.report_event_configuration.sg_descriptors) != 16); 8809 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8810 data.set_event_configuration.global_event_oq_id) != 10); 8811 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8812 data.set_event_configuration.buffer_length) != 12); 8813 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8814 data.set_event_configuration.sg_descriptors) != 16); 8815 8816 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8817 max_inbound_iu_length) != 6); 8818 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8819 max_outbound_iu_length) != 14); 8820 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 8821 8822 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8823 data_length) != 0); 8824 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8825 iq_arbitration_priority_support_bitmask) != 8); 8826 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8827 maximum_aw_a) != 9); 8828 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8829 maximum_aw_b) != 10); 8830 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8831 maximum_aw_c) != 11); 8832 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8833 max_inbound_queues) != 16); 8834 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8835 max_elements_per_iq) != 18); 8836 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8837 max_iq_element_length) != 24); 8838 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8839 min_iq_element_length) != 26); 8840 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8841 max_outbound_queues) != 30); 8842 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8843 max_elements_per_oq) != 32); 8844 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8845 intr_coalescing_time_granularity) != 34); 8846 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8847 max_oq_element_length) != 36); 8848 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8849 min_oq_element_length) != 38); 8850 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8851 iu_layer_descriptors) != 64); 8852 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 8853 8854 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8855 event_type) != 0); 8856 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8857 oq_id) != 2); 8858 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 8859 8860 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8861 num_event_descriptors) != 2); 8862 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8863 descriptors) != 4); 8864 8865 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 8866 ARRAY_SIZE(pqi_supported_event_types)); 8867 8868 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8869 header.iu_type) != 0); 8870 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8871 header.iu_length) != 2); 8872 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8873 event_type) != 8); 8874 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8875 event_id) != 10); 8876 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8877 additional_event_id) != 12); 8878 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8879 data) != 16); 8880 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 8881 8882 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8883 header.iu_type) != 0); 8884 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8885 header.iu_length) != 2); 8886 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8887 event_type) != 8); 8888 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8889 event_id) != 10); 8890 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8891 additional_event_id) != 12); 8892 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 8893 8894 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8895 header.iu_type) != 0); 8896 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8897 header.iu_length) != 2); 8898 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8899 request_id) != 8); 8900 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8901 nexus_id) != 10); 8902 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8903 timeout) != 14); 8904 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8905 lun_number) != 16); 8906 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8907 protocol_specific) != 24); 8908 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8909 outbound_queue_id_to_manage) != 26); 8910 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8911 request_id_to_manage) != 28); 8912 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8913 task_management_function) != 30); 8914 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 8915 8916 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8917 header.iu_type) != 0); 8918 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8919 header.iu_length) != 2); 8920 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8921 request_id) != 8); 8922 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8923 nexus_id) != 10); 8924 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8925 additional_response_info) != 12); 8926 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8927 response_code) != 15); 8928 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 8929 8930 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8931 configured_logical_drive_count) != 0); 8932 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8933 configuration_signature) != 1); 8934 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8935 firmware_version) != 5); 8936 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8937 extended_logical_unit_count) != 154); 8938 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8939 firmware_build_number) != 190); 8940 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8941 controller_mode) != 292); 8942 8943 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8944 phys_bay_in_box) != 115); 8945 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8946 device_type) != 120); 8947 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8948 redundant_path_present_map) != 1736); 8949 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8950 active_path_number) != 1738); 8951 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8952 alternate_paths_phys_connector) != 1739); 8953 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8954 alternate_paths_phys_box_on_port) != 1755); 8955 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8956 current_queue_depth_limit) != 1796); 8957 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 8958 8959 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 8960 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 8961 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 8962 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8963 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 8964 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8965 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 8966 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 8967 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8968 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 8969 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 8970 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8971 8972 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 8973 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 8974 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 8975 } 8976