1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microsemi PQI-based storage controllers 4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/blk-mq-pci.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_eh.h> 27 #include <scsi/scsi_transport_sas.h> 28 #include <asm/unaligned.h> 29 #include "smartpqi.h" 30 #include "smartpqi_sis.h" 31 32 #if !defined(BUILD_TIMESTAMP) 33 #define BUILD_TIMESTAMP 34 #endif 35 36 #define DRIVER_VERSION "1.2.16-012" 37 #define DRIVER_MAJOR 1 38 #define DRIVER_MINOR 2 39 #define DRIVER_RELEASE 16 40 #define DRIVER_REVISION 12 41 42 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 43 DRIVER_VERSION BUILD_TIMESTAMP ")" 44 #define DRIVER_NAME_SHORT "smartpqi" 45 46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 47 48 MODULE_AUTHOR("Microsemi"); 49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 50 DRIVER_VERSION); 51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 52 MODULE_VERSION(DRIVER_VERSION); 53 MODULE_LICENSE("GPL"); 54 55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 56 static void pqi_ctrl_offline_worker(struct work_struct *work); 57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 59 static void pqi_scan_start(struct Scsi_Host *shost); 60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 61 struct pqi_queue_group *queue_group, enum pqi_io_path path, 62 struct pqi_io_request *io_request); 63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 64 struct pqi_iu_header *request, unsigned int flags, 65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 68 unsigned int cdb_length, struct pqi_queue_group *queue_group, 69 struct pqi_encryption_info *encryption_info, bool raid_bypass); 70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 74 u32 bytes_requested); 75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 78 struct pqi_scsi_dev *device, unsigned long timeout_secs); 79 80 /* for flags argument to pqi_submit_raid_request_synchronous() */ 81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 82 83 static struct scsi_transport_template *pqi_sas_transport_template; 84 85 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 86 87 enum pqi_lockup_action { 88 NONE, 89 REBOOT, 90 PANIC 91 }; 92 93 static enum pqi_lockup_action pqi_lockup_action = NONE; 94 95 static struct { 96 enum pqi_lockup_action action; 97 char *name; 98 } pqi_lockup_actions[] = { 99 { 100 .action = NONE, 101 .name = "none", 102 }, 103 { 104 .action = REBOOT, 105 .name = "reboot", 106 }, 107 { 108 .action = PANIC, 109 .name = "panic", 110 }, 111 }; 112 113 static unsigned int pqi_supported_event_types[] = { 114 PQI_EVENT_TYPE_HOTPLUG, 115 PQI_EVENT_TYPE_HARDWARE, 116 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 117 PQI_EVENT_TYPE_LOGICAL_DEVICE, 118 PQI_EVENT_TYPE_OFA, 119 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 121 }; 122 123 static int pqi_disable_device_id_wildcards; 124 module_param_named(disable_device_id_wildcards, 125 pqi_disable_device_id_wildcards, int, 0644); 126 MODULE_PARM_DESC(disable_device_id_wildcards, 127 "Disable device ID wildcards."); 128 129 static int pqi_disable_heartbeat; 130 module_param_named(disable_heartbeat, 131 pqi_disable_heartbeat, int, 0644); 132 MODULE_PARM_DESC(disable_heartbeat, 133 "Disable heartbeat."); 134 135 static int pqi_disable_ctrl_shutdown; 136 module_param_named(disable_ctrl_shutdown, 137 pqi_disable_ctrl_shutdown, int, 0644); 138 MODULE_PARM_DESC(disable_ctrl_shutdown, 139 "Disable controller shutdown when controller locked up."); 140 141 static char *pqi_lockup_action_param; 142 module_param_named(lockup_action, 143 pqi_lockup_action_param, charp, 0644); 144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 145 "\t\tSupported: none, reboot, panic\n" 146 "\t\tDefault: none"); 147 148 static int pqi_expose_ld_first; 149 module_param_named(expose_ld_first, 150 pqi_expose_ld_first, int, 0644); 151 MODULE_PARM_DESC(expose_ld_first, 152 "Expose logical drives before physical drives."); 153 154 static int pqi_hide_vsep; 155 module_param_named(hide_vsep, 156 pqi_hide_vsep, int, 0644); 157 MODULE_PARM_DESC(hide_vsep, 158 "Hide the virtual SEP for direct attached drives."); 159 160 static char *raid_levels[] = { 161 "RAID-0", 162 "RAID-4", 163 "RAID-1(1+0)", 164 "RAID-5", 165 "RAID-5+1", 166 "RAID-ADG", 167 "RAID-1(ADM)", 168 }; 169 170 static char *pqi_raid_level_to_string(u8 raid_level) 171 { 172 if (raid_level < ARRAY_SIZE(raid_levels)) 173 return raid_levels[raid_level]; 174 175 return "RAID UNKNOWN"; 176 } 177 178 #define SA_RAID_0 0 179 #define SA_RAID_4 1 180 #define SA_RAID_1 2 /* also used for RAID 10 */ 181 #define SA_RAID_5 3 /* also used for RAID 50 */ 182 #define SA_RAID_51 4 183 #define SA_RAID_6 5 /* also used for RAID 60 */ 184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 185 #define SA_RAID_MAX SA_RAID_ADM 186 #define SA_RAID_UNKNOWN 0xff 187 188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 189 { 190 pqi_prep_for_scsi_done(scmd); 191 scmd->scsi_done(scmd); 192 } 193 194 static inline void pqi_disable_write_same(struct scsi_device *sdev) 195 { 196 sdev->no_write_same = 1; 197 } 198 199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 200 { 201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 202 } 203 204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 205 { 206 return !device->is_physical_device; 207 } 208 209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 210 { 211 return scsi3addr[2] != 0; 212 } 213 214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 215 { 216 return !ctrl_info->controller_online; 217 } 218 219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 220 { 221 if (ctrl_info->controller_online) 222 if (!sis_is_firmware_running(ctrl_info)) 223 pqi_take_ctrl_offline(ctrl_info); 224 } 225 226 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 227 { 228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 229 } 230 231 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 232 struct pqi_ctrl_info *ctrl_info) 233 { 234 return sis_read_driver_scratch(ctrl_info); 235 } 236 237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 238 enum pqi_ctrl_mode mode) 239 { 240 sis_write_driver_scratch(ctrl_info, mode); 241 } 242 243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 244 { 245 ctrl_info->block_device_reset = true; 246 } 247 248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) 249 { 250 return ctrl_info->block_device_reset; 251 } 252 253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 254 { 255 return ctrl_info->block_requests; 256 } 257 258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 259 { 260 ctrl_info->block_requests = true; 261 scsi_block_requests(ctrl_info->scsi_host); 262 } 263 264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 265 { 266 ctrl_info->block_requests = false; 267 wake_up_all(&ctrl_info->block_requests_wait); 268 pqi_retry_raid_bypass_requests(ctrl_info); 269 scsi_unblock_requests(ctrl_info->scsi_host); 270 } 271 272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 273 unsigned long timeout_msecs) 274 { 275 unsigned long remaining_msecs; 276 277 if (!pqi_ctrl_blocked(ctrl_info)) 278 return timeout_msecs; 279 280 atomic_inc(&ctrl_info->num_blocked_threads); 281 282 if (timeout_msecs == NO_TIMEOUT) { 283 wait_event(ctrl_info->block_requests_wait, 284 !pqi_ctrl_blocked(ctrl_info)); 285 remaining_msecs = timeout_msecs; 286 } else { 287 unsigned long remaining_jiffies; 288 289 remaining_jiffies = 290 wait_event_timeout(ctrl_info->block_requests_wait, 291 !pqi_ctrl_blocked(ctrl_info), 292 msecs_to_jiffies(timeout_msecs)); 293 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 294 } 295 296 atomic_dec(&ctrl_info->num_blocked_threads); 297 298 return remaining_msecs; 299 } 300 301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 302 { 303 while (atomic_read(&ctrl_info->num_busy_threads) > 304 atomic_read(&ctrl_info->num_blocked_threads)) 305 usleep_range(1000, 2000); 306 } 307 308 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 309 { 310 return device->device_offline; 311 } 312 313 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 314 { 315 device->in_reset = true; 316 } 317 318 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 319 { 320 device->in_reset = false; 321 } 322 323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 324 { 325 return device->in_reset; 326 } 327 328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 329 { 330 ctrl_info->in_ofa = true; 331 } 332 333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 334 { 335 ctrl_info->in_ofa = false; 336 } 337 338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) 339 { 340 return ctrl_info->in_ofa; 341 } 342 343 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 344 { 345 device->in_remove = true; 346 } 347 348 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 349 { 350 return device->in_remove; 351 } 352 353 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) 354 { 355 ctrl_info->in_shutdown = true; 356 } 357 358 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) 359 { 360 return ctrl_info->in_shutdown; 361 } 362 363 static inline void pqi_schedule_rescan_worker_with_delay( 364 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 365 { 366 if (pqi_ctrl_offline(ctrl_info)) 367 return; 368 if (pqi_ctrl_in_ofa(ctrl_info)) 369 return; 370 371 schedule_delayed_work(&ctrl_info->rescan_work, delay); 372 } 373 374 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 375 { 376 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 377 } 378 379 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) 380 381 static inline void pqi_schedule_rescan_worker_delayed( 382 struct pqi_ctrl_info *ctrl_info) 383 { 384 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 385 } 386 387 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 388 { 389 cancel_delayed_work_sync(&ctrl_info->rescan_work); 390 } 391 392 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) 393 { 394 cancel_work_sync(&ctrl_info->event_work); 395 } 396 397 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 398 { 399 if (!ctrl_info->heartbeat_counter) 400 return 0; 401 402 return readl(ctrl_info->heartbeat_counter); 403 } 404 405 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 406 { 407 if (!ctrl_info->soft_reset_status) 408 return 0; 409 410 return readb(ctrl_info->soft_reset_status); 411 } 412 413 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, 414 u8 clear) 415 { 416 u8 status; 417 418 if (!ctrl_info->soft_reset_status) 419 return; 420 421 status = pqi_read_soft_reset_status(ctrl_info); 422 status &= ~clear; 423 writeb(status, ctrl_info->soft_reset_status); 424 } 425 426 static int pqi_map_single(struct pci_dev *pci_dev, 427 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 428 size_t buffer_length, enum dma_data_direction data_direction) 429 { 430 dma_addr_t bus_address; 431 432 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 433 return 0; 434 435 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 436 data_direction); 437 if (dma_mapping_error(&pci_dev->dev, bus_address)) 438 return -ENOMEM; 439 440 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 441 put_unaligned_le32(buffer_length, &sg_descriptor->length); 442 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 443 444 return 0; 445 } 446 447 static void pqi_pci_unmap(struct pci_dev *pci_dev, 448 struct pqi_sg_descriptor *descriptors, int num_descriptors, 449 enum dma_data_direction data_direction) 450 { 451 int i; 452 453 if (data_direction == DMA_NONE) 454 return; 455 456 for (i = 0; i < num_descriptors; i++) 457 dma_unmap_single(&pci_dev->dev, 458 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 459 get_unaligned_le32(&descriptors[i].length), 460 data_direction); 461 } 462 463 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 464 struct pqi_raid_path_request *request, u8 cmd, 465 u8 *scsi3addr, void *buffer, size_t buffer_length, 466 u16 vpd_page, enum dma_data_direction *dir) 467 { 468 u8 *cdb; 469 size_t cdb_length = buffer_length; 470 471 memset(request, 0, sizeof(*request)); 472 473 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 474 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 475 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 476 &request->header.iu_length); 477 put_unaligned_le32(buffer_length, &request->buffer_length); 478 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 479 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 480 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 481 482 cdb = request->cdb; 483 484 switch (cmd) { 485 case INQUIRY: 486 request->data_direction = SOP_READ_FLAG; 487 cdb[0] = INQUIRY; 488 if (vpd_page & VPD_PAGE) { 489 cdb[1] = 0x1; 490 cdb[2] = (u8)vpd_page; 491 } 492 cdb[4] = (u8)cdb_length; 493 break; 494 case CISS_REPORT_LOG: 495 case CISS_REPORT_PHYS: 496 request->data_direction = SOP_READ_FLAG; 497 cdb[0] = cmd; 498 if (cmd == CISS_REPORT_PHYS) 499 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; 500 else 501 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 502 put_unaligned_be32(cdb_length, &cdb[6]); 503 break; 504 case CISS_GET_RAID_MAP: 505 request->data_direction = SOP_READ_FLAG; 506 cdb[0] = CISS_READ; 507 cdb[1] = CISS_GET_RAID_MAP; 508 put_unaligned_be32(cdb_length, &cdb[6]); 509 break; 510 case SA_FLUSH_CACHE: 511 request->data_direction = SOP_WRITE_FLAG; 512 cdb[0] = BMIC_WRITE; 513 cdb[6] = BMIC_FLUSH_CACHE; 514 put_unaligned_be16(cdb_length, &cdb[7]); 515 break; 516 case BMIC_SENSE_DIAG_OPTIONS: 517 cdb_length = 0; 518 fallthrough; 519 case BMIC_IDENTIFY_CONTROLLER: 520 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 521 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 522 request->data_direction = SOP_READ_FLAG; 523 cdb[0] = BMIC_READ; 524 cdb[6] = cmd; 525 put_unaligned_be16(cdb_length, &cdb[7]); 526 break; 527 case BMIC_SET_DIAG_OPTIONS: 528 cdb_length = 0; 529 fallthrough; 530 case BMIC_WRITE_HOST_WELLNESS: 531 request->data_direction = SOP_WRITE_FLAG; 532 cdb[0] = BMIC_WRITE; 533 cdb[6] = cmd; 534 put_unaligned_be16(cdb_length, &cdb[7]); 535 break; 536 case BMIC_CSMI_PASSTHRU: 537 request->data_direction = SOP_BIDIRECTIONAL; 538 cdb[0] = BMIC_WRITE; 539 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 540 cdb[6] = cmd; 541 put_unaligned_be16(cdb_length, &cdb[7]); 542 break; 543 default: 544 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 545 break; 546 } 547 548 switch (request->data_direction) { 549 case SOP_READ_FLAG: 550 *dir = DMA_FROM_DEVICE; 551 break; 552 case SOP_WRITE_FLAG: 553 *dir = DMA_TO_DEVICE; 554 break; 555 case SOP_NO_DIRECTION_FLAG: 556 *dir = DMA_NONE; 557 break; 558 default: 559 *dir = DMA_BIDIRECTIONAL; 560 break; 561 } 562 563 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 564 buffer, buffer_length, *dir); 565 } 566 567 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 568 { 569 io_request->scmd = NULL; 570 io_request->status = 0; 571 io_request->error_info = NULL; 572 io_request->raid_bypass = false; 573 } 574 575 static struct pqi_io_request *pqi_alloc_io_request( 576 struct pqi_ctrl_info *ctrl_info) 577 { 578 struct pqi_io_request *io_request; 579 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 580 581 while (1) { 582 io_request = &ctrl_info->io_request_pool[i]; 583 if (atomic_inc_return(&io_request->refcount) == 1) 584 break; 585 atomic_dec(&io_request->refcount); 586 i = (i + 1) % ctrl_info->max_io_slots; 587 } 588 589 /* benignly racy */ 590 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 591 592 pqi_reinit_io_request(io_request); 593 594 return io_request; 595 } 596 597 static void pqi_free_io_request(struct pqi_io_request *io_request) 598 { 599 atomic_dec(&io_request->refcount); 600 } 601 602 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 603 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 604 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 605 { 606 int rc; 607 struct pqi_raid_path_request request; 608 enum dma_data_direction dir; 609 610 rc = pqi_build_raid_path_request(ctrl_info, &request, 611 cmd, scsi3addr, buffer, 612 buffer_length, vpd_page, &dir); 613 if (rc) 614 return rc; 615 616 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 617 error_info, timeout_msecs); 618 619 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 620 621 return rc; 622 } 623 624 /* helper functions for pqi_send_scsi_raid_request */ 625 626 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 627 u8 cmd, void *buffer, size_t buffer_length) 628 { 629 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 630 buffer, buffer_length, 0, NULL, NO_TIMEOUT); 631 } 632 633 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 634 u8 cmd, void *buffer, size_t buffer_length, 635 struct pqi_raid_error_info *error_info) 636 { 637 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 638 buffer, buffer_length, 0, error_info, NO_TIMEOUT); 639 } 640 641 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 642 struct bmic_identify_controller *buffer) 643 { 644 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 645 buffer, sizeof(*buffer)); 646 } 647 648 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 649 struct bmic_sense_subsystem_info *sense_info) 650 { 651 return pqi_send_ctrl_raid_request(ctrl_info, 652 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 653 sizeof(*sense_info)); 654 } 655 656 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 657 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 658 { 659 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 660 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); 661 } 662 663 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 664 struct pqi_scsi_dev *device, 665 struct bmic_identify_physical_device *buffer, size_t buffer_length) 666 { 667 int rc; 668 enum dma_data_direction dir; 669 u16 bmic_device_index; 670 struct pqi_raid_path_request request; 671 672 rc = pqi_build_raid_path_request(ctrl_info, &request, 673 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 674 buffer_length, 0, &dir); 675 if (rc) 676 return rc; 677 678 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 679 request.cdb[2] = (u8)bmic_device_index; 680 request.cdb[9] = (u8)(bmic_device_index >> 8); 681 682 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 683 0, NULL, NO_TIMEOUT); 684 685 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 686 687 return rc; 688 } 689 690 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 691 enum bmic_flush_cache_shutdown_event shutdown_event) 692 { 693 int rc; 694 struct bmic_flush_cache *flush_cache; 695 696 /* 697 * Don't bother trying to flush the cache if the controller is 698 * locked up. 699 */ 700 if (pqi_ctrl_offline(ctrl_info)) 701 return -ENXIO; 702 703 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 704 if (!flush_cache) 705 return -ENOMEM; 706 707 flush_cache->shutdown_event = shutdown_event; 708 709 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 710 sizeof(*flush_cache)); 711 712 kfree(flush_cache); 713 714 return rc; 715 } 716 717 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 718 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 719 struct pqi_raid_error_info *error_info) 720 { 721 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 722 buffer, buffer_length, error_info); 723 } 724 725 #define PQI_FETCH_PTRAID_DATA (1 << 31) 726 727 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 728 { 729 int rc; 730 struct bmic_diag_options *diag; 731 732 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 733 if (!diag) 734 return -ENOMEM; 735 736 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 737 diag, sizeof(*diag)); 738 if (rc) 739 goto out; 740 741 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 742 743 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 744 sizeof(*diag)); 745 746 out: 747 kfree(diag); 748 749 return rc; 750 } 751 752 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 753 void *buffer, size_t buffer_length) 754 { 755 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 756 buffer, buffer_length); 757 } 758 759 #pragma pack(1) 760 761 struct bmic_host_wellness_driver_version { 762 u8 start_tag[4]; 763 u8 driver_version_tag[2]; 764 __le16 driver_version_length; 765 char driver_version[32]; 766 u8 dont_write_tag[2]; 767 u8 end_tag[2]; 768 }; 769 770 #pragma pack() 771 772 static int pqi_write_driver_version_to_host_wellness( 773 struct pqi_ctrl_info *ctrl_info) 774 { 775 int rc; 776 struct bmic_host_wellness_driver_version *buffer; 777 size_t buffer_length; 778 779 buffer_length = sizeof(*buffer); 780 781 buffer = kmalloc(buffer_length, GFP_KERNEL); 782 if (!buffer) 783 return -ENOMEM; 784 785 buffer->start_tag[0] = '<'; 786 buffer->start_tag[1] = 'H'; 787 buffer->start_tag[2] = 'W'; 788 buffer->start_tag[3] = '>'; 789 buffer->driver_version_tag[0] = 'D'; 790 buffer->driver_version_tag[1] = 'V'; 791 put_unaligned_le16(sizeof(buffer->driver_version), 792 &buffer->driver_version_length); 793 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 794 sizeof(buffer->driver_version) - 1); 795 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 796 buffer->dont_write_tag[0] = 'D'; 797 buffer->dont_write_tag[1] = 'W'; 798 buffer->end_tag[0] = 'Z'; 799 buffer->end_tag[1] = 'Z'; 800 801 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 802 803 kfree(buffer); 804 805 return rc; 806 } 807 808 #pragma pack(1) 809 810 struct bmic_host_wellness_time { 811 u8 start_tag[4]; 812 u8 time_tag[2]; 813 __le16 time_length; 814 u8 time[8]; 815 u8 dont_write_tag[2]; 816 u8 end_tag[2]; 817 }; 818 819 #pragma pack() 820 821 static int pqi_write_current_time_to_host_wellness( 822 struct pqi_ctrl_info *ctrl_info) 823 { 824 int rc; 825 struct bmic_host_wellness_time *buffer; 826 size_t buffer_length; 827 time64_t local_time; 828 unsigned int year; 829 struct tm tm; 830 831 buffer_length = sizeof(*buffer); 832 833 buffer = kmalloc(buffer_length, GFP_KERNEL); 834 if (!buffer) 835 return -ENOMEM; 836 837 buffer->start_tag[0] = '<'; 838 buffer->start_tag[1] = 'H'; 839 buffer->start_tag[2] = 'W'; 840 buffer->start_tag[3] = '>'; 841 buffer->time_tag[0] = 'T'; 842 buffer->time_tag[1] = 'D'; 843 put_unaligned_le16(sizeof(buffer->time), 844 &buffer->time_length); 845 846 local_time = ktime_get_real_seconds(); 847 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 848 year = tm.tm_year + 1900; 849 850 buffer->time[0] = bin2bcd(tm.tm_hour); 851 buffer->time[1] = bin2bcd(tm.tm_min); 852 buffer->time[2] = bin2bcd(tm.tm_sec); 853 buffer->time[3] = 0; 854 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 855 buffer->time[5] = bin2bcd(tm.tm_mday); 856 buffer->time[6] = bin2bcd(year / 100); 857 buffer->time[7] = bin2bcd(year % 100); 858 859 buffer->dont_write_tag[0] = 'D'; 860 buffer->dont_write_tag[1] = 'W'; 861 buffer->end_tag[0] = 'Z'; 862 buffer->end_tag[1] = 'Z'; 863 864 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 865 866 kfree(buffer); 867 868 return rc; 869 } 870 871 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) 872 873 static void pqi_update_time_worker(struct work_struct *work) 874 { 875 int rc; 876 struct pqi_ctrl_info *ctrl_info; 877 878 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 879 update_time_work); 880 881 if (pqi_ctrl_offline(ctrl_info)) 882 return; 883 884 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 885 if (rc) 886 dev_warn(&ctrl_info->pci_dev->dev, 887 "error updating time on controller\n"); 888 889 schedule_delayed_work(&ctrl_info->update_time_work, 890 PQI_UPDATE_TIME_WORK_INTERVAL); 891 } 892 893 static inline void pqi_schedule_update_time_worker( 894 struct pqi_ctrl_info *ctrl_info) 895 { 896 schedule_delayed_work(&ctrl_info->update_time_work, 0); 897 } 898 899 static inline void pqi_cancel_update_time_worker( 900 struct pqi_ctrl_info *ctrl_info) 901 { 902 cancel_delayed_work_sync(&ctrl_info->update_time_work); 903 } 904 905 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 906 void *buffer, size_t buffer_length) 907 { 908 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, 909 buffer_length); 910 } 911 912 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 913 void **buffer) 914 { 915 int rc; 916 size_t lun_list_length; 917 size_t lun_data_length; 918 size_t new_lun_list_length; 919 void *lun_data = NULL; 920 struct report_lun_header *report_lun_header; 921 922 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 923 if (!report_lun_header) { 924 rc = -ENOMEM; 925 goto out; 926 } 927 928 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 929 sizeof(*report_lun_header)); 930 if (rc) 931 goto out; 932 933 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 934 935 again: 936 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 937 938 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 939 if (!lun_data) { 940 rc = -ENOMEM; 941 goto out; 942 } 943 944 if (lun_list_length == 0) { 945 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 946 goto out; 947 } 948 949 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 950 if (rc) 951 goto out; 952 953 new_lun_list_length = get_unaligned_be32( 954 &((struct report_lun_header *)lun_data)->list_length); 955 956 if (new_lun_list_length > lun_list_length) { 957 lun_list_length = new_lun_list_length; 958 kfree(lun_data); 959 goto again; 960 } 961 962 out: 963 kfree(report_lun_header); 964 965 if (rc) { 966 kfree(lun_data); 967 lun_data = NULL; 968 } 969 970 *buffer = lun_data; 971 972 return rc; 973 } 974 975 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 976 void **buffer) 977 { 978 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 979 buffer); 980 } 981 982 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 983 void **buffer) 984 { 985 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 986 } 987 988 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 989 struct report_phys_lun_extended **physdev_list, 990 struct report_log_lun_extended **logdev_list) 991 { 992 int rc; 993 size_t logdev_list_length; 994 size_t logdev_data_length; 995 struct report_log_lun_extended *internal_logdev_list; 996 struct report_log_lun_extended *logdev_data; 997 struct report_lun_header report_lun_header; 998 999 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1000 if (rc) 1001 dev_err(&ctrl_info->pci_dev->dev, 1002 "report physical LUNs failed\n"); 1003 1004 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1005 if (rc) 1006 dev_err(&ctrl_info->pci_dev->dev, 1007 "report logical LUNs failed\n"); 1008 1009 /* 1010 * Tack the controller itself onto the end of the logical device list. 1011 */ 1012 1013 logdev_data = *logdev_list; 1014 1015 if (logdev_data) { 1016 logdev_list_length = 1017 get_unaligned_be32(&logdev_data->header.list_length); 1018 } else { 1019 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1020 logdev_data = 1021 (struct report_log_lun_extended *)&report_lun_header; 1022 logdev_list_length = 0; 1023 } 1024 1025 logdev_data_length = sizeof(struct report_lun_header) + 1026 logdev_list_length; 1027 1028 internal_logdev_list = kmalloc(logdev_data_length + 1029 sizeof(struct report_log_lun_extended), GFP_KERNEL); 1030 if (!internal_logdev_list) { 1031 kfree(*logdev_list); 1032 *logdev_list = NULL; 1033 return -ENOMEM; 1034 } 1035 1036 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1037 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1038 sizeof(struct report_log_lun_extended_entry)); 1039 put_unaligned_be32(logdev_list_length + 1040 sizeof(struct report_log_lun_extended_entry), 1041 &internal_logdev_list->header.list_length); 1042 1043 kfree(*logdev_list); 1044 *logdev_list = internal_logdev_list; 1045 1046 return 0; 1047 } 1048 1049 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1050 int bus, int target, int lun) 1051 { 1052 device->bus = bus; 1053 device->target = target; 1054 device->lun = lun; 1055 } 1056 1057 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1058 { 1059 u8 *scsi3addr; 1060 u32 lunid; 1061 int bus; 1062 int target; 1063 int lun; 1064 1065 scsi3addr = device->scsi3addr; 1066 lunid = get_unaligned_le32(scsi3addr); 1067 1068 if (pqi_is_hba_lunid(scsi3addr)) { 1069 /* The specified device is the controller. */ 1070 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1071 device->target_lun_valid = true; 1072 return; 1073 } 1074 1075 if (pqi_is_logical_device(device)) { 1076 if (device->is_external_raid_device) { 1077 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1078 target = (lunid >> 16) & 0x3fff; 1079 lun = lunid & 0xff; 1080 } else { 1081 bus = PQI_RAID_VOLUME_BUS; 1082 target = 0; 1083 lun = lunid & 0x3fff; 1084 } 1085 pqi_set_bus_target_lun(device, bus, target, lun); 1086 device->target_lun_valid = true; 1087 return; 1088 } 1089 1090 /* 1091 * Defer target and LUN assignment for non-controller physical devices 1092 * because the SAS transport layer will make these assignments later. 1093 */ 1094 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1095 } 1096 1097 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1098 struct pqi_scsi_dev *device) 1099 { 1100 int rc; 1101 u8 raid_level; 1102 u8 *buffer; 1103 1104 raid_level = SA_RAID_UNKNOWN; 1105 1106 buffer = kmalloc(64, GFP_KERNEL); 1107 if (buffer) { 1108 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1109 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1110 if (rc == 0) { 1111 raid_level = buffer[8]; 1112 if (raid_level > SA_RAID_MAX) 1113 raid_level = SA_RAID_UNKNOWN; 1114 } 1115 kfree(buffer); 1116 } 1117 1118 device->raid_level = raid_level; 1119 } 1120 1121 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1122 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1123 { 1124 char *err_msg; 1125 u32 raid_map_size; 1126 u32 r5or6_blocks_per_row; 1127 1128 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1129 1130 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1131 err_msg = "RAID map too small"; 1132 goto bad_raid_map; 1133 } 1134 1135 if (device->raid_level == SA_RAID_1) { 1136 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1137 err_msg = "invalid RAID-1 map"; 1138 goto bad_raid_map; 1139 } 1140 } else if (device->raid_level == SA_RAID_ADM) { 1141 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1142 err_msg = "invalid RAID-1(ADM) map"; 1143 goto bad_raid_map; 1144 } 1145 } else if ((device->raid_level == SA_RAID_5 || 1146 device->raid_level == SA_RAID_6) && 1147 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1148 /* RAID 50/60 */ 1149 r5or6_blocks_per_row = 1150 get_unaligned_le16(&raid_map->strip_size) * 1151 get_unaligned_le16(&raid_map->data_disks_per_row); 1152 if (r5or6_blocks_per_row == 0) { 1153 err_msg = "invalid RAID-5 or RAID-6 map"; 1154 goto bad_raid_map; 1155 } 1156 } 1157 1158 return 0; 1159 1160 bad_raid_map: 1161 dev_warn(&ctrl_info->pci_dev->dev, 1162 "logical device %08x%08x %s\n", 1163 *((u32 *)&device->scsi3addr), 1164 *((u32 *)&device->scsi3addr[4]), err_msg); 1165 1166 return -EINVAL; 1167 } 1168 1169 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1170 struct pqi_scsi_dev *device) 1171 { 1172 int rc; 1173 u32 raid_map_size; 1174 struct raid_map *raid_map; 1175 1176 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1177 if (!raid_map) 1178 return -ENOMEM; 1179 1180 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1181 device->scsi3addr, raid_map, sizeof(*raid_map), 1182 0, NULL, NO_TIMEOUT); 1183 1184 if (rc) 1185 goto error; 1186 1187 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1188 1189 if (raid_map_size > sizeof(*raid_map)) { 1190 1191 kfree(raid_map); 1192 1193 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1194 if (!raid_map) 1195 return -ENOMEM; 1196 1197 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1198 device->scsi3addr, raid_map, raid_map_size, 1199 0, NULL, NO_TIMEOUT); 1200 if (rc) 1201 goto error; 1202 1203 if (get_unaligned_le32(&raid_map->structure_size) 1204 != raid_map_size) { 1205 dev_warn(&ctrl_info->pci_dev->dev, 1206 "Requested %d bytes, received %d bytes", 1207 raid_map_size, 1208 get_unaligned_le32(&raid_map->structure_size)); 1209 goto error; 1210 } 1211 } 1212 1213 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1214 if (rc) 1215 goto error; 1216 1217 device->raid_map = raid_map; 1218 1219 return 0; 1220 1221 error: 1222 kfree(raid_map); 1223 1224 return rc; 1225 } 1226 1227 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1228 struct pqi_scsi_dev *device) 1229 { 1230 int rc; 1231 u8 *buffer; 1232 u8 bypass_status; 1233 1234 buffer = kmalloc(64, GFP_KERNEL); 1235 if (!buffer) 1236 return; 1237 1238 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1239 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1240 if (rc) 1241 goto out; 1242 1243 #define RAID_BYPASS_STATUS 4 1244 #define RAID_BYPASS_CONFIGURED 0x1 1245 #define RAID_BYPASS_ENABLED 0x2 1246 1247 bypass_status = buffer[RAID_BYPASS_STATUS]; 1248 device->raid_bypass_configured = 1249 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1250 if (device->raid_bypass_configured && 1251 (bypass_status & RAID_BYPASS_ENABLED) && 1252 pqi_get_raid_map(ctrl_info, device) == 0) 1253 device->raid_bypass_enabled = true; 1254 1255 out: 1256 kfree(buffer); 1257 } 1258 1259 /* 1260 * Use vendor-specific VPD to determine online/offline status of a volume. 1261 */ 1262 1263 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1264 struct pqi_scsi_dev *device) 1265 { 1266 int rc; 1267 size_t page_length; 1268 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1269 bool volume_offline = true; 1270 u32 volume_flags; 1271 struct ciss_vpd_logical_volume_status *vpd; 1272 1273 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1274 if (!vpd) 1275 goto no_buffer; 1276 1277 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1278 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1279 if (rc) 1280 goto out; 1281 1282 if (vpd->page_code != CISS_VPD_LV_STATUS) 1283 goto out; 1284 1285 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1286 volume_status) + vpd->page_length; 1287 if (page_length < sizeof(*vpd)) 1288 goto out; 1289 1290 volume_status = vpd->volume_status; 1291 volume_flags = get_unaligned_be32(&vpd->flags); 1292 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1293 1294 out: 1295 kfree(vpd); 1296 no_buffer: 1297 device->volume_status = volume_status; 1298 device->volume_offline = volume_offline; 1299 } 1300 1301 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1302 struct pqi_scsi_dev *device, 1303 struct bmic_identify_physical_device *id_phys) 1304 { 1305 int rc; 1306 1307 memset(id_phys, 0, sizeof(*id_phys)); 1308 1309 rc = pqi_identify_physical_device(ctrl_info, device, 1310 id_phys, sizeof(*id_phys)); 1311 if (rc) { 1312 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1313 return rc; 1314 } 1315 1316 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1317 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1318 1319 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1320 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1321 1322 device->box_index = id_phys->box_index; 1323 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1324 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1325 device->queue_depth = 1326 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1327 device->active_path_index = id_phys->active_path_number; 1328 device->path_map = id_phys->redundant_path_present_map; 1329 memcpy(&device->box, 1330 &id_phys->alternate_paths_phys_box_on_port, 1331 sizeof(device->box)); 1332 memcpy(&device->phys_connector, 1333 &id_phys->alternate_paths_phys_connector, 1334 sizeof(device->phys_connector)); 1335 device->bay = id_phys->phys_bay_in_box; 1336 1337 return 0; 1338 } 1339 1340 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1341 struct pqi_scsi_dev *device) 1342 { 1343 int rc; 1344 u8 *buffer; 1345 1346 buffer = kmalloc(64, GFP_KERNEL); 1347 if (!buffer) 1348 return -ENOMEM; 1349 1350 /* Send an inquiry to the device to see what it is. */ 1351 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1352 if (rc) 1353 goto out; 1354 1355 scsi_sanitize_inquiry_string(&buffer[8], 8); 1356 scsi_sanitize_inquiry_string(&buffer[16], 16); 1357 1358 device->devtype = buffer[0] & 0x1f; 1359 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1360 memcpy(device->model, &buffer[16], sizeof(device->model)); 1361 1362 if (device->devtype == TYPE_DISK) { 1363 if (device->is_external_raid_device) { 1364 device->raid_level = SA_RAID_UNKNOWN; 1365 device->volume_status = CISS_LV_OK; 1366 device->volume_offline = false; 1367 } else { 1368 pqi_get_raid_level(ctrl_info, device); 1369 pqi_get_raid_bypass_status(ctrl_info, device); 1370 pqi_get_volume_status(ctrl_info, device); 1371 } 1372 } 1373 1374 out: 1375 kfree(buffer); 1376 1377 return rc; 1378 } 1379 1380 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1381 struct pqi_scsi_dev *device, 1382 struct bmic_identify_physical_device *id_phys) 1383 { 1384 int rc; 1385 1386 if (device->is_expander_smp_device) 1387 return 0; 1388 1389 if (pqi_is_logical_device(device)) 1390 rc = pqi_get_logical_device_info(ctrl_info, device); 1391 else 1392 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1393 1394 return rc; 1395 } 1396 1397 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1398 struct pqi_scsi_dev *device) 1399 { 1400 char *status; 1401 static const char unknown_state_str[] = 1402 "Volume is in an unknown state (%u)"; 1403 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1404 1405 switch (device->volume_status) { 1406 case CISS_LV_OK: 1407 status = "Volume online"; 1408 break; 1409 case CISS_LV_FAILED: 1410 status = "Volume failed"; 1411 break; 1412 case CISS_LV_NOT_CONFIGURED: 1413 status = "Volume not configured"; 1414 break; 1415 case CISS_LV_DEGRADED: 1416 status = "Volume degraded"; 1417 break; 1418 case CISS_LV_READY_FOR_RECOVERY: 1419 status = "Volume ready for recovery operation"; 1420 break; 1421 case CISS_LV_UNDERGOING_RECOVERY: 1422 status = "Volume undergoing recovery"; 1423 break; 1424 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1425 status = "Wrong physical drive was replaced"; 1426 break; 1427 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1428 status = "A physical drive not properly connected"; 1429 break; 1430 case CISS_LV_HARDWARE_OVERHEATING: 1431 status = "Hardware is overheating"; 1432 break; 1433 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1434 status = "Hardware has overheated"; 1435 break; 1436 case CISS_LV_UNDERGOING_EXPANSION: 1437 status = "Volume undergoing expansion"; 1438 break; 1439 case CISS_LV_NOT_AVAILABLE: 1440 status = "Volume waiting for transforming volume"; 1441 break; 1442 case CISS_LV_QUEUED_FOR_EXPANSION: 1443 status = "Volume queued for expansion"; 1444 break; 1445 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1446 status = "Volume disabled due to SCSI ID conflict"; 1447 break; 1448 case CISS_LV_EJECTED: 1449 status = "Volume has been ejected"; 1450 break; 1451 case CISS_LV_UNDERGOING_ERASE: 1452 status = "Volume undergoing background erase"; 1453 break; 1454 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1455 status = "Volume ready for predictive spare rebuild"; 1456 break; 1457 case CISS_LV_UNDERGOING_RPI: 1458 status = "Volume undergoing rapid parity initialization"; 1459 break; 1460 case CISS_LV_PENDING_RPI: 1461 status = "Volume queued for rapid parity initialization"; 1462 break; 1463 case CISS_LV_ENCRYPTED_NO_KEY: 1464 status = "Encrypted volume inaccessible - key not present"; 1465 break; 1466 case CISS_LV_UNDERGOING_ENCRYPTION: 1467 status = "Volume undergoing encryption process"; 1468 break; 1469 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1470 status = "Volume undergoing encryption re-keying process"; 1471 break; 1472 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1473 status = "Volume encrypted but encryption is disabled"; 1474 break; 1475 case CISS_LV_PENDING_ENCRYPTION: 1476 status = "Volume pending migration to encrypted state"; 1477 break; 1478 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1479 status = "Volume pending encryption rekeying"; 1480 break; 1481 case CISS_LV_NOT_SUPPORTED: 1482 status = "Volume not supported on this controller"; 1483 break; 1484 case CISS_LV_STATUS_UNAVAILABLE: 1485 status = "Volume status not available"; 1486 break; 1487 default: 1488 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1489 unknown_state_str, device->volume_status); 1490 status = unknown_state_buffer; 1491 break; 1492 } 1493 1494 dev_info(&ctrl_info->pci_dev->dev, 1495 "scsi %d:%d:%d:%d %s\n", 1496 ctrl_info->scsi_host->host_no, 1497 device->bus, device->target, device->lun, status); 1498 } 1499 1500 static void pqi_rescan_worker(struct work_struct *work) 1501 { 1502 struct pqi_ctrl_info *ctrl_info; 1503 1504 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1505 rescan_work); 1506 1507 pqi_scan_scsi_devices(ctrl_info); 1508 } 1509 1510 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1511 struct pqi_scsi_dev *device) 1512 { 1513 int rc; 1514 1515 if (pqi_is_logical_device(device)) 1516 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1517 device->target, device->lun); 1518 else 1519 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1520 1521 return rc; 1522 } 1523 1524 #define PQI_PENDING_IO_TIMEOUT_SECS 20 1525 1526 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1527 struct pqi_scsi_dev *device) 1528 { 1529 int rc; 1530 1531 pqi_device_remove_start(device); 1532 1533 rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS); 1534 if (rc) 1535 dev_err(&ctrl_info->pci_dev->dev, 1536 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1537 ctrl_info->scsi_host->host_no, device->bus, 1538 device->target, device->lun, 1539 atomic_read(&device->scsi_cmds_outstanding)); 1540 1541 if (pqi_is_logical_device(device)) 1542 scsi_remove_device(device->sdev); 1543 else 1544 pqi_remove_sas_device(device); 1545 } 1546 1547 /* Assumes the SCSI device list lock is held. */ 1548 1549 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1550 int bus, int target, int lun) 1551 { 1552 struct pqi_scsi_dev *device; 1553 1554 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1555 if (device->bus == bus && device->target == target && device->lun == lun) 1556 return device; 1557 1558 return NULL; 1559 } 1560 1561 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1562 struct pqi_scsi_dev *dev2) 1563 { 1564 if (dev1->is_physical_device != dev2->is_physical_device) 1565 return false; 1566 1567 if (dev1->is_physical_device) 1568 return dev1->wwid == dev2->wwid; 1569 1570 return memcmp(dev1->volume_id, dev2->volume_id, 1571 sizeof(dev1->volume_id)) == 0; 1572 } 1573 1574 enum pqi_find_result { 1575 DEVICE_NOT_FOUND, 1576 DEVICE_CHANGED, 1577 DEVICE_SAME, 1578 }; 1579 1580 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1581 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1582 { 1583 struct pqi_scsi_dev *device; 1584 1585 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1586 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1587 *matching_device = device; 1588 if (pqi_device_equal(device_to_find, device)) { 1589 if (device_to_find->volume_offline) 1590 return DEVICE_CHANGED; 1591 return DEVICE_SAME; 1592 } 1593 return DEVICE_CHANGED; 1594 } 1595 } 1596 1597 return DEVICE_NOT_FOUND; 1598 } 1599 1600 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1601 { 1602 if (device->is_expander_smp_device) 1603 return "Enclosure SMP "; 1604 1605 return scsi_device_type(device->devtype); 1606 } 1607 1608 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1609 1610 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1611 char *action, struct pqi_scsi_dev *device) 1612 { 1613 ssize_t count; 1614 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1615 1616 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1617 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1618 1619 if (device->target_lun_valid) 1620 count += scnprintf(buffer + count, 1621 PQI_DEV_INFO_BUFFER_LENGTH - count, 1622 "%d:%d", 1623 device->target, 1624 device->lun); 1625 else 1626 count += scnprintf(buffer + count, 1627 PQI_DEV_INFO_BUFFER_LENGTH - count, 1628 "-:-"); 1629 1630 if (pqi_is_logical_device(device)) 1631 count += scnprintf(buffer + count, 1632 PQI_DEV_INFO_BUFFER_LENGTH - count, 1633 " %08x%08x", 1634 *((u32 *)&device->scsi3addr), 1635 *((u32 *)&device->scsi3addr[4])); 1636 else 1637 count += scnprintf(buffer + count, 1638 PQI_DEV_INFO_BUFFER_LENGTH - count, 1639 " %016llx", device->sas_address); 1640 1641 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1642 " %s %.8s %.16s ", 1643 pqi_device_type(device), 1644 device->vendor, 1645 device->model); 1646 1647 if (pqi_is_logical_device(device)) { 1648 if (device->devtype == TYPE_DISK) 1649 count += scnprintf(buffer + count, 1650 PQI_DEV_INFO_BUFFER_LENGTH - count, 1651 "SSDSmartPathCap%c En%c %-12s", 1652 device->raid_bypass_configured ? '+' : '-', 1653 device->raid_bypass_enabled ? '+' : '-', 1654 pqi_raid_level_to_string(device->raid_level)); 1655 } else { 1656 count += scnprintf(buffer + count, 1657 PQI_DEV_INFO_BUFFER_LENGTH - count, 1658 "AIO%c", device->aio_enabled ? '+' : '-'); 1659 if (device->devtype == TYPE_DISK || 1660 device->devtype == TYPE_ZBC) 1661 count += scnprintf(buffer + count, 1662 PQI_DEV_INFO_BUFFER_LENGTH - count, 1663 " qd=%-6d", device->queue_depth); 1664 } 1665 1666 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1667 } 1668 1669 /* Assumes the SCSI device list lock is held. */ 1670 1671 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1672 struct pqi_scsi_dev *new_device) 1673 { 1674 existing_device->devtype = new_device->devtype; 1675 existing_device->device_type = new_device->device_type; 1676 existing_device->bus = new_device->bus; 1677 if (new_device->target_lun_valid) { 1678 existing_device->target = new_device->target; 1679 existing_device->lun = new_device->lun; 1680 existing_device->target_lun_valid = true; 1681 } 1682 1683 if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION || 1684 existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) && 1685 new_device->volume_status == CISS_LV_OK) 1686 existing_device->rescan = true; 1687 1688 /* By definition, the scsi3addr and wwid fields are already the same. */ 1689 1690 existing_device->is_physical_device = new_device->is_physical_device; 1691 existing_device->is_external_raid_device = 1692 new_device->is_external_raid_device; 1693 existing_device->is_expander_smp_device = 1694 new_device->is_expander_smp_device; 1695 existing_device->aio_enabled = new_device->aio_enabled; 1696 memcpy(existing_device->vendor, new_device->vendor, 1697 sizeof(existing_device->vendor)); 1698 memcpy(existing_device->model, new_device->model, 1699 sizeof(existing_device->model)); 1700 existing_device->sas_address = new_device->sas_address; 1701 existing_device->raid_level = new_device->raid_level; 1702 existing_device->queue_depth = new_device->queue_depth; 1703 existing_device->aio_handle = new_device->aio_handle; 1704 existing_device->volume_status = new_device->volume_status; 1705 existing_device->active_path_index = new_device->active_path_index; 1706 existing_device->path_map = new_device->path_map; 1707 existing_device->bay = new_device->bay; 1708 existing_device->box_index = new_device->box_index; 1709 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 1710 existing_device->phy_connected_dev_type = 1711 new_device->phy_connected_dev_type; 1712 memcpy(existing_device->box, new_device->box, 1713 sizeof(existing_device->box)); 1714 memcpy(existing_device->phys_connector, new_device->phys_connector, 1715 sizeof(existing_device->phys_connector)); 1716 existing_device->offload_to_mirror = 0; 1717 kfree(existing_device->raid_map); 1718 existing_device->raid_map = new_device->raid_map; 1719 existing_device->raid_bypass_configured = 1720 new_device->raid_bypass_configured; 1721 existing_device->raid_bypass_enabled = 1722 new_device->raid_bypass_enabled; 1723 existing_device->device_offline = false; 1724 1725 /* To prevent this from being freed later. */ 1726 new_device->raid_map = NULL; 1727 } 1728 1729 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1730 { 1731 if (device) { 1732 kfree(device->raid_map); 1733 kfree(device); 1734 } 1735 } 1736 1737 /* 1738 * Called when exposing a new device to the OS fails in order to re-adjust 1739 * our internal SCSI device list to match the SCSI ML's view. 1740 */ 1741 1742 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1743 struct pqi_scsi_dev *device) 1744 { 1745 unsigned long flags; 1746 1747 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1748 list_del(&device->scsi_device_list_entry); 1749 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1750 1751 /* Allow the device structure to be freed later. */ 1752 device->keep_device = false; 1753 } 1754 1755 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 1756 { 1757 if (device->is_expander_smp_device) 1758 return device->sas_port != NULL; 1759 1760 return device->sdev != NULL; 1761 } 1762 1763 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1764 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1765 { 1766 int rc; 1767 unsigned int i; 1768 unsigned long flags; 1769 enum pqi_find_result find_result; 1770 struct pqi_scsi_dev *device; 1771 struct pqi_scsi_dev *next; 1772 struct pqi_scsi_dev *matching_device; 1773 LIST_HEAD(add_list); 1774 LIST_HEAD(delete_list); 1775 1776 /* 1777 * The idea here is to do as little work as possible while holding the 1778 * spinlock. That's why we go to great pains to defer anything other 1779 * than updating the internal device list until after we release the 1780 * spinlock. 1781 */ 1782 1783 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1784 1785 /* Assume that all devices in the existing list have gone away. */ 1786 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1787 device->device_gone = true; 1788 1789 for (i = 0; i < num_new_devices; i++) { 1790 device = new_device_list[i]; 1791 1792 find_result = pqi_scsi_find_entry(ctrl_info, device, 1793 &matching_device); 1794 1795 switch (find_result) { 1796 case DEVICE_SAME: 1797 /* 1798 * The newly found device is already in the existing 1799 * device list. 1800 */ 1801 device->new_device = false; 1802 matching_device->device_gone = false; 1803 pqi_scsi_update_device(matching_device, device); 1804 break; 1805 case DEVICE_NOT_FOUND: 1806 /* 1807 * The newly found device is NOT in the existing device 1808 * list. 1809 */ 1810 device->new_device = true; 1811 break; 1812 case DEVICE_CHANGED: 1813 /* 1814 * The original device has gone away and we need to add 1815 * the new device. 1816 */ 1817 device->new_device = true; 1818 break; 1819 } 1820 } 1821 1822 /* Process all devices that have gone away. */ 1823 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1824 scsi_device_list_entry) { 1825 if (device->device_gone) { 1826 list_del_init(&device->scsi_device_list_entry); 1827 list_add_tail(&device->delete_list_entry, &delete_list); 1828 } 1829 } 1830 1831 /* Process all new devices. */ 1832 for (i = 0; i < num_new_devices; i++) { 1833 device = new_device_list[i]; 1834 if (!device->new_device) 1835 continue; 1836 if (device->volume_offline) 1837 continue; 1838 list_add_tail(&device->scsi_device_list_entry, 1839 &ctrl_info->scsi_device_list); 1840 list_add_tail(&device->add_list_entry, &add_list); 1841 /* To prevent this device structure from being freed later. */ 1842 device->keep_device = true; 1843 } 1844 1845 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1846 1847 if (pqi_ctrl_in_ofa(ctrl_info)) 1848 pqi_ctrl_ofa_done(ctrl_info); 1849 1850 /* Remove all devices that have gone away. */ 1851 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 1852 if (device->volume_offline) { 1853 pqi_dev_info(ctrl_info, "offline", device); 1854 pqi_show_volume_status(ctrl_info, device); 1855 } 1856 list_del(&device->delete_list_entry); 1857 if (pqi_is_device_added(device)) { 1858 pqi_remove_device(ctrl_info, device); 1859 } else { 1860 if (!device->volume_offline) 1861 pqi_dev_info(ctrl_info, "removed", device); 1862 pqi_free_device(device); 1863 } 1864 } 1865 1866 /* 1867 * Notify the SCSI ML if the queue depth of any existing device has 1868 * changed. 1869 */ 1870 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1871 scsi_device_list_entry) { 1872 if (device->sdev) { 1873 if (device->queue_depth != 1874 device->advertised_queue_depth) { 1875 device->advertised_queue_depth = device->queue_depth; 1876 scsi_change_queue_depth(device->sdev, 1877 device->advertised_queue_depth); 1878 } 1879 if (device->rescan) { 1880 scsi_rescan_device(&device->sdev->sdev_gendev); 1881 device->rescan = false; 1882 } 1883 } 1884 } 1885 1886 /* Expose any new devices. */ 1887 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1888 if (!pqi_is_device_added(device)) { 1889 rc = pqi_add_device(ctrl_info, device); 1890 if (rc == 0) { 1891 pqi_dev_info(ctrl_info, "added", device); 1892 } else { 1893 dev_warn(&ctrl_info->pci_dev->dev, 1894 "scsi %d:%d:%d:%d addition failed, device not added\n", 1895 ctrl_info->scsi_host->host_no, 1896 device->bus, device->target, 1897 device->lun); 1898 pqi_fixup_botched_add(ctrl_info, device); 1899 } 1900 } 1901 } 1902 } 1903 1904 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1905 { 1906 /* 1907 * Only support the HBA controller itself as a RAID 1908 * controller. If it's a RAID controller other than 1909 * the HBA itself (an external RAID controller, for 1910 * example), we don't support it. 1911 */ 1912 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 1913 !pqi_is_hba_lunid(device->scsi3addr)) 1914 return false; 1915 1916 return true; 1917 } 1918 1919 static inline bool pqi_skip_device(u8 *scsi3addr) 1920 { 1921 /* Ignore all masked devices. */ 1922 if (MASKED_DEVICE(scsi3addr)) 1923 return true; 1924 1925 return false; 1926 } 1927 1928 static inline void pqi_mask_device(u8 *scsi3addr) 1929 { 1930 scsi3addr[3] |= 0xc0; 1931 } 1932 1933 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) 1934 { 1935 switch (device->device_type) { 1936 case SA_DEVICE_TYPE_SAS: 1937 case SA_DEVICE_TYPE_EXPANDER_SMP: 1938 case SA_DEVICE_TYPE_SES: 1939 return true; 1940 } 1941 1942 return false; 1943 } 1944 1945 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 1946 { 1947 return !device->is_physical_device || 1948 !pqi_skip_device(device->scsi3addr); 1949 } 1950 1951 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1952 { 1953 int i; 1954 int rc; 1955 LIST_HEAD(new_device_list_head); 1956 struct report_phys_lun_extended *physdev_list = NULL; 1957 struct report_log_lun_extended *logdev_list = NULL; 1958 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1959 struct report_log_lun_extended_entry *log_lun_ext_entry; 1960 struct bmic_identify_physical_device *id_phys = NULL; 1961 u32 num_physicals; 1962 u32 num_logicals; 1963 struct pqi_scsi_dev **new_device_list = NULL; 1964 struct pqi_scsi_dev *device; 1965 struct pqi_scsi_dev *next; 1966 unsigned int num_new_devices; 1967 unsigned int num_valid_devices; 1968 bool is_physical_device; 1969 u8 *scsi3addr; 1970 unsigned int physical_index; 1971 unsigned int logical_index; 1972 static char *out_of_memory_msg = 1973 "failed to allocate memory, device discovery stopped"; 1974 1975 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1976 if (rc) 1977 goto out; 1978 1979 if (physdev_list) 1980 num_physicals = 1981 get_unaligned_be32(&physdev_list->header.list_length) 1982 / sizeof(physdev_list->lun_entries[0]); 1983 else 1984 num_physicals = 0; 1985 1986 if (logdev_list) 1987 num_logicals = 1988 get_unaligned_be32(&logdev_list->header.list_length) 1989 / sizeof(logdev_list->lun_entries[0]); 1990 else 1991 num_logicals = 0; 1992 1993 if (num_physicals) { 1994 /* 1995 * We need this buffer for calls to pqi_get_physical_disk_info() 1996 * below. We allocate it here instead of inside 1997 * pqi_get_physical_disk_info() because it's a fairly large 1998 * buffer. 1999 */ 2000 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2001 if (!id_phys) { 2002 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2003 out_of_memory_msg); 2004 rc = -ENOMEM; 2005 goto out; 2006 } 2007 2008 if (pqi_hide_vsep) { 2009 for (i = num_physicals - 1; i >= 0; i--) { 2010 phys_lun_ext_entry = 2011 &physdev_list->lun_entries[i]; 2012 if (CISS_GET_DRIVE_NUMBER( 2013 phys_lun_ext_entry->lunid) == 2014 PQI_VSEP_CISS_BTL) { 2015 pqi_mask_device( 2016 phys_lun_ext_entry->lunid); 2017 break; 2018 } 2019 } 2020 } 2021 } 2022 2023 num_new_devices = num_physicals + num_logicals; 2024 2025 new_device_list = kmalloc_array(num_new_devices, 2026 sizeof(*new_device_list), 2027 GFP_KERNEL); 2028 if (!new_device_list) { 2029 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2030 rc = -ENOMEM; 2031 goto out; 2032 } 2033 2034 for (i = 0; i < num_new_devices; i++) { 2035 device = kzalloc(sizeof(*device), GFP_KERNEL); 2036 if (!device) { 2037 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2038 out_of_memory_msg); 2039 rc = -ENOMEM; 2040 goto out; 2041 } 2042 list_add_tail(&device->new_device_list_entry, 2043 &new_device_list_head); 2044 } 2045 2046 device = NULL; 2047 num_valid_devices = 0; 2048 physical_index = 0; 2049 logical_index = 0; 2050 2051 for (i = 0; i < num_new_devices; i++) { 2052 2053 if ((!pqi_expose_ld_first && i < num_physicals) || 2054 (pqi_expose_ld_first && i >= num_logicals)) { 2055 is_physical_device = true; 2056 phys_lun_ext_entry = 2057 &physdev_list->lun_entries[physical_index++]; 2058 log_lun_ext_entry = NULL; 2059 scsi3addr = phys_lun_ext_entry->lunid; 2060 } else { 2061 is_physical_device = false; 2062 phys_lun_ext_entry = NULL; 2063 log_lun_ext_entry = 2064 &logdev_list->lun_entries[logical_index++]; 2065 scsi3addr = log_lun_ext_entry->lunid; 2066 } 2067 2068 if (is_physical_device && pqi_skip_device(scsi3addr)) 2069 continue; 2070 2071 if (device) 2072 device = list_next_entry(device, new_device_list_entry); 2073 else 2074 device = list_first_entry(&new_device_list_head, 2075 struct pqi_scsi_dev, new_device_list_entry); 2076 2077 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2078 device->is_physical_device = is_physical_device; 2079 if (is_physical_device) { 2080 device->device_type = phys_lun_ext_entry->device_type; 2081 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2082 device->is_expander_smp_device = true; 2083 } else { 2084 device->is_external_raid_device = 2085 pqi_is_external_raid_addr(scsi3addr); 2086 } 2087 2088 if (!pqi_is_supported_device(device)) 2089 continue; 2090 2091 /* Gather information about the device. */ 2092 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2093 if (rc == -ENOMEM) { 2094 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2095 out_of_memory_msg); 2096 goto out; 2097 } 2098 if (rc) { 2099 if (device->is_physical_device) 2100 dev_warn(&ctrl_info->pci_dev->dev, 2101 "obtaining device info failed, skipping physical device %016llx\n", 2102 get_unaligned_be64( 2103 &phys_lun_ext_entry->wwid)); 2104 else 2105 dev_warn(&ctrl_info->pci_dev->dev, 2106 "obtaining device info failed, skipping logical device %08x%08x\n", 2107 *((u32 *)&device->scsi3addr), 2108 *((u32 *)&device->scsi3addr[4])); 2109 rc = 0; 2110 continue; 2111 } 2112 2113 pqi_assign_bus_target_lun(device); 2114 2115 if (device->is_physical_device) { 2116 device->wwid = phys_lun_ext_entry->wwid; 2117 if ((phys_lun_ext_entry->device_flags & 2118 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2119 phys_lun_ext_entry->aio_handle) { 2120 device->aio_enabled = true; 2121 device->aio_handle = 2122 phys_lun_ext_entry->aio_handle; 2123 } 2124 } else { 2125 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2126 sizeof(device->volume_id)); 2127 } 2128 2129 if (pqi_is_device_with_sas_address(device)) 2130 device->sas_address = get_unaligned_be64(&device->wwid); 2131 2132 new_device_list[num_valid_devices++] = device; 2133 } 2134 2135 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2136 2137 out: 2138 list_for_each_entry_safe(device, next, &new_device_list_head, 2139 new_device_list_entry) { 2140 if (device->keep_device) 2141 continue; 2142 list_del(&device->new_device_list_entry); 2143 pqi_free_device(device); 2144 } 2145 2146 kfree(new_device_list); 2147 kfree(physdev_list); 2148 kfree(logdev_list); 2149 kfree(id_phys); 2150 2151 return rc; 2152 } 2153 2154 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2155 { 2156 int rc = 0; 2157 2158 if (pqi_ctrl_offline(ctrl_info)) 2159 return -ENXIO; 2160 2161 if (!mutex_trylock(&ctrl_info->scan_mutex)) { 2162 pqi_schedule_rescan_worker_delayed(ctrl_info); 2163 rc = -EINPROGRESS; 2164 } else { 2165 rc = pqi_update_scsi_devices(ctrl_info); 2166 if (rc) 2167 pqi_schedule_rescan_worker_delayed(ctrl_info); 2168 mutex_unlock(&ctrl_info->scan_mutex); 2169 } 2170 2171 return rc; 2172 } 2173 2174 static void pqi_scan_start(struct Scsi_Host *shost) 2175 { 2176 struct pqi_ctrl_info *ctrl_info; 2177 2178 ctrl_info = shost_to_hba(shost); 2179 if (pqi_ctrl_in_ofa(ctrl_info)) 2180 return; 2181 2182 pqi_scan_scsi_devices(ctrl_info); 2183 } 2184 2185 /* Returns TRUE if scan is finished. */ 2186 2187 static int pqi_scan_finished(struct Scsi_Host *shost, 2188 unsigned long elapsed_time) 2189 { 2190 struct pqi_ctrl_info *ctrl_info; 2191 2192 ctrl_info = shost_priv(shost); 2193 2194 return !mutex_is_locked(&ctrl_info->scan_mutex); 2195 } 2196 2197 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2198 { 2199 mutex_lock(&ctrl_info->scan_mutex); 2200 mutex_unlock(&ctrl_info->scan_mutex); 2201 } 2202 2203 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2204 { 2205 mutex_lock(&ctrl_info->lun_reset_mutex); 2206 mutex_unlock(&ctrl_info->lun_reset_mutex); 2207 } 2208 2209 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 2210 { 2211 mutex_lock(&ctrl_info->ofa_mutex); 2212 mutex_unlock(&ctrl_info->ofa_mutex); 2213 } 2214 2215 static inline void pqi_set_encryption_info( 2216 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2217 u64 first_block) 2218 { 2219 u32 volume_blk_size; 2220 2221 /* 2222 * Set the encryption tweak values based on logical block address. 2223 * If the block size is 512, the tweak value is equal to the LBA. 2224 * For other block sizes, tweak value is (LBA * block size) / 512. 2225 */ 2226 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2227 if (volume_blk_size != 512) 2228 first_block = (first_block * volume_blk_size) / 512; 2229 2230 encryption_info->data_encryption_key_index = 2231 get_unaligned_le16(&raid_map->data_encryption_key_index); 2232 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2233 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2234 } 2235 2236 /* 2237 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2238 */ 2239 2240 #define PQI_RAID_BYPASS_INELIGIBLE 1 2241 2242 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2243 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2244 struct pqi_queue_group *queue_group) 2245 { 2246 struct raid_map *raid_map; 2247 bool is_write = false; 2248 u32 map_index; 2249 u64 first_block; 2250 u64 last_block; 2251 u32 block_cnt; 2252 u32 blocks_per_row; 2253 u64 first_row; 2254 u64 last_row; 2255 u32 first_row_offset; 2256 u32 last_row_offset; 2257 u32 first_column; 2258 u32 last_column; 2259 u64 r0_first_row; 2260 u64 r0_last_row; 2261 u32 r5or6_blocks_per_row; 2262 u64 r5or6_first_row; 2263 u64 r5or6_last_row; 2264 u32 r5or6_first_row_offset; 2265 u32 r5or6_last_row_offset; 2266 u32 r5or6_first_column; 2267 u32 r5or6_last_column; 2268 u16 data_disks_per_row; 2269 u32 total_disks_per_row; 2270 u16 layout_map_count; 2271 u32 stripesize; 2272 u16 strip_size; 2273 u32 first_group; 2274 u32 last_group; 2275 u32 current_group; 2276 u32 map_row; 2277 u32 aio_handle; 2278 u64 disk_block; 2279 u32 disk_block_cnt; 2280 u8 cdb[16]; 2281 u8 cdb_length; 2282 int offload_to_mirror; 2283 struct pqi_encryption_info *encryption_info_ptr; 2284 struct pqi_encryption_info encryption_info; 2285 #if BITS_PER_LONG == 32 2286 u64 tmpdiv; 2287 #endif 2288 2289 /* Check for valid opcode, get LBA and block count. */ 2290 switch (scmd->cmnd[0]) { 2291 case WRITE_6: 2292 is_write = true; 2293 fallthrough; 2294 case READ_6: 2295 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2296 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2297 block_cnt = (u32)scmd->cmnd[4]; 2298 if (block_cnt == 0) 2299 block_cnt = 256; 2300 break; 2301 case WRITE_10: 2302 is_write = true; 2303 fallthrough; 2304 case READ_10: 2305 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2306 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2307 break; 2308 case WRITE_12: 2309 is_write = true; 2310 fallthrough; 2311 case READ_12: 2312 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2313 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2314 break; 2315 case WRITE_16: 2316 is_write = true; 2317 fallthrough; 2318 case READ_16: 2319 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2320 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2321 break; 2322 default: 2323 /* Process via normal I/O path. */ 2324 return PQI_RAID_BYPASS_INELIGIBLE; 2325 } 2326 2327 /* Check for write to non-RAID-0. */ 2328 if (is_write && device->raid_level != SA_RAID_0) 2329 return PQI_RAID_BYPASS_INELIGIBLE; 2330 2331 if (unlikely(block_cnt == 0)) 2332 return PQI_RAID_BYPASS_INELIGIBLE; 2333 2334 last_block = first_block + block_cnt - 1; 2335 raid_map = device->raid_map; 2336 2337 /* Check for invalid block or wraparound. */ 2338 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2339 last_block < first_block) 2340 return PQI_RAID_BYPASS_INELIGIBLE; 2341 2342 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2343 strip_size = get_unaligned_le16(&raid_map->strip_size); 2344 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2345 2346 /* Calculate stripe information for the request. */ 2347 blocks_per_row = data_disks_per_row * strip_size; 2348 #if BITS_PER_LONG == 32 2349 tmpdiv = first_block; 2350 do_div(tmpdiv, blocks_per_row); 2351 first_row = tmpdiv; 2352 tmpdiv = last_block; 2353 do_div(tmpdiv, blocks_per_row); 2354 last_row = tmpdiv; 2355 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2356 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2357 tmpdiv = first_row_offset; 2358 do_div(tmpdiv, strip_size); 2359 first_column = tmpdiv; 2360 tmpdiv = last_row_offset; 2361 do_div(tmpdiv, strip_size); 2362 last_column = tmpdiv; 2363 #else 2364 first_row = first_block / blocks_per_row; 2365 last_row = last_block / blocks_per_row; 2366 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2367 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2368 first_column = first_row_offset / strip_size; 2369 last_column = last_row_offset / strip_size; 2370 #endif 2371 2372 /* If this isn't a single row/column then give to the controller. */ 2373 if (first_row != last_row || first_column != last_column) 2374 return PQI_RAID_BYPASS_INELIGIBLE; 2375 2376 /* Proceeding with driver mapping. */ 2377 total_disks_per_row = data_disks_per_row + 2378 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2379 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2380 get_unaligned_le16(&raid_map->row_cnt); 2381 map_index = (map_row * total_disks_per_row) + first_column; 2382 2383 /* RAID 1 */ 2384 if (device->raid_level == SA_RAID_1) { 2385 if (device->offload_to_mirror) 2386 map_index += data_disks_per_row; 2387 device->offload_to_mirror = !device->offload_to_mirror; 2388 } else if (device->raid_level == SA_RAID_ADM) { 2389 /* RAID ADM */ 2390 /* 2391 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2392 * divisible by 3. 2393 */ 2394 offload_to_mirror = device->offload_to_mirror; 2395 if (offload_to_mirror == 0) { 2396 /* use physical disk in the first mirrored group. */ 2397 map_index %= data_disks_per_row; 2398 } else { 2399 do { 2400 /* 2401 * Determine mirror group that map_index 2402 * indicates. 2403 */ 2404 current_group = map_index / data_disks_per_row; 2405 2406 if (offload_to_mirror != current_group) { 2407 if (current_group < 2408 layout_map_count - 1) { 2409 /* 2410 * Select raid index from 2411 * next group. 2412 */ 2413 map_index += data_disks_per_row; 2414 current_group++; 2415 } else { 2416 /* 2417 * Select raid index from first 2418 * group. 2419 */ 2420 map_index %= data_disks_per_row; 2421 current_group = 0; 2422 } 2423 } 2424 } while (offload_to_mirror != current_group); 2425 } 2426 2427 /* Set mirror group to use next time. */ 2428 offload_to_mirror = 2429 (offload_to_mirror >= layout_map_count - 1) ? 2430 0 : offload_to_mirror + 1; 2431 device->offload_to_mirror = offload_to_mirror; 2432 /* 2433 * Avoid direct use of device->offload_to_mirror within this 2434 * function since multiple threads might simultaneously 2435 * increment it beyond the range of device->layout_map_count -1. 2436 */ 2437 } else if ((device->raid_level == SA_RAID_5 || 2438 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2439 /* RAID 50/60 */ 2440 /* Verify first and last block are in same RAID group */ 2441 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2442 stripesize = r5or6_blocks_per_row * layout_map_count; 2443 #if BITS_PER_LONG == 32 2444 tmpdiv = first_block; 2445 first_group = do_div(tmpdiv, stripesize); 2446 tmpdiv = first_group; 2447 do_div(tmpdiv, r5or6_blocks_per_row); 2448 first_group = tmpdiv; 2449 tmpdiv = last_block; 2450 last_group = do_div(tmpdiv, stripesize); 2451 tmpdiv = last_group; 2452 do_div(tmpdiv, r5or6_blocks_per_row); 2453 last_group = tmpdiv; 2454 #else 2455 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2456 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2457 #endif 2458 if (first_group != last_group) 2459 return PQI_RAID_BYPASS_INELIGIBLE; 2460 2461 /* Verify request is in a single row of RAID 5/6 */ 2462 #if BITS_PER_LONG == 32 2463 tmpdiv = first_block; 2464 do_div(tmpdiv, stripesize); 2465 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2466 tmpdiv = last_block; 2467 do_div(tmpdiv, stripesize); 2468 r5or6_last_row = r0_last_row = tmpdiv; 2469 #else 2470 first_row = r5or6_first_row = r0_first_row = 2471 first_block / stripesize; 2472 r5or6_last_row = r0_last_row = last_block / stripesize; 2473 #endif 2474 if (r5or6_first_row != r5or6_last_row) 2475 return PQI_RAID_BYPASS_INELIGIBLE; 2476 2477 /* Verify request is in a single column */ 2478 #if BITS_PER_LONG == 32 2479 tmpdiv = first_block; 2480 first_row_offset = do_div(tmpdiv, stripesize); 2481 tmpdiv = first_row_offset; 2482 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2483 r5or6_first_row_offset = first_row_offset; 2484 tmpdiv = last_block; 2485 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2486 tmpdiv = r5or6_last_row_offset; 2487 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2488 tmpdiv = r5or6_first_row_offset; 2489 do_div(tmpdiv, strip_size); 2490 first_column = r5or6_first_column = tmpdiv; 2491 tmpdiv = r5or6_last_row_offset; 2492 do_div(tmpdiv, strip_size); 2493 r5or6_last_column = tmpdiv; 2494 #else 2495 first_row_offset = r5or6_first_row_offset = 2496 (u32)((first_block % stripesize) % 2497 r5or6_blocks_per_row); 2498 2499 r5or6_last_row_offset = 2500 (u32)((last_block % stripesize) % 2501 r5or6_blocks_per_row); 2502 2503 first_column = r5or6_first_row_offset / strip_size; 2504 r5or6_first_column = first_column; 2505 r5or6_last_column = r5or6_last_row_offset / strip_size; 2506 #endif 2507 if (r5or6_first_column != r5or6_last_column) 2508 return PQI_RAID_BYPASS_INELIGIBLE; 2509 2510 /* Request is eligible */ 2511 map_row = 2512 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2513 get_unaligned_le16(&raid_map->row_cnt); 2514 2515 map_index = (first_group * 2516 (get_unaligned_le16(&raid_map->row_cnt) * 2517 total_disks_per_row)) + 2518 (map_row * total_disks_per_row) + first_column; 2519 } 2520 2521 aio_handle = raid_map->disk_data[map_index].aio_handle; 2522 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2523 first_row * strip_size + 2524 (first_row_offset - first_column * strip_size); 2525 disk_block_cnt = block_cnt; 2526 2527 /* Handle differing logical/physical block sizes. */ 2528 if (raid_map->phys_blk_shift) { 2529 disk_block <<= raid_map->phys_blk_shift; 2530 disk_block_cnt <<= raid_map->phys_blk_shift; 2531 } 2532 2533 if (unlikely(disk_block_cnt > 0xffff)) 2534 return PQI_RAID_BYPASS_INELIGIBLE; 2535 2536 /* Build the new CDB for the physical disk I/O. */ 2537 if (disk_block > 0xffffffff) { 2538 cdb[0] = is_write ? WRITE_16 : READ_16; 2539 cdb[1] = 0; 2540 put_unaligned_be64(disk_block, &cdb[2]); 2541 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2542 cdb[14] = 0; 2543 cdb[15] = 0; 2544 cdb_length = 16; 2545 } else { 2546 cdb[0] = is_write ? WRITE_10 : READ_10; 2547 cdb[1] = 0; 2548 put_unaligned_be32((u32)disk_block, &cdb[2]); 2549 cdb[6] = 0; 2550 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2551 cdb[9] = 0; 2552 cdb_length = 10; 2553 } 2554 2555 if (get_unaligned_le16(&raid_map->flags) & 2556 RAID_MAP_ENCRYPTION_ENABLED) { 2557 pqi_set_encryption_info(&encryption_info, raid_map, 2558 first_block); 2559 encryption_info_ptr = &encryption_info; 2560 } else { 2561 encryption_info_ptr = NULL; 2562 } 2563 2564 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2565 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2566 } 2567 2568 #define PQI_STATUS_IDLE 0x0 2569 2570 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2571 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2572 2573 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2574 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2575 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2576 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2577 #define PQI_DEVICE_STATE_ERROR 0x4 2578 2579 #define PQI_MODE_READY_TIMEOUT_SECS 30 2580 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2581 2582 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2583 { 2584 struct pqi_device_registers __iomem *pqi_registers; 2585 unsigned long timeout; 2586 u64 signature; 2587 u8 status; 2588 2589 pqi_registers = ctrl_info->pqi_registers; 2590 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; 2591 2592 while (1) { 2593 signature = readq(&pqi_registers->signature); 2594 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2595 sizeof(signature)) == 0) 2596 break; 2597 if (time_after(jiffies, timeout)) { 2598 dev_err(&ctrl_info->pci_dev->dev, 2599 "timed out waiting for PQI signature\n"); 2600 return -ETIMEDOUT; 2601 } 2602 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2603 } 2604 2605 while (1) { 2606 status = readb(&pqi_registers->function_and_status_code); 2607 if (status == PQI_STATUS_IDLE) 2608 break; 2609 if (time_after(jiffies, timeout)) { 2610 dev_err(&ctrl_info->pci_dev->dev, 2611 "timed out waiting for PQI IDLE\n"); 2612 return -ETIMEDOUT; 2613 } 2614 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2615 } 2616 2617 while (1) { 2618 if (readl(&pqi_registers->device_status) == 2619 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2620 break; 2621 if (time_after(jiffies, timeout)) { 2622 dev_err(&ctrl_info->pci_dev->dev, 2623 "timed out waiting for PQI all registers ready\n"); 2624 return -ETIMEDOUT; 2625 } 2626 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2627 } 2628 2629 return 0; 2630 } 2631 2632 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2633 { 2634 struct pqi_scsi_dev *device; 2635 2636 device = io_request->scmd->device->hostdata; 2637 device->raid_bypass_enabled = false; 2638 device->aio_enabled = false; 2639 } 2640 2641 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2642 { 2643 struct pqi_ctrl_info *ctrl_info; 2644 struct pqi_scsi_dev *device; 2645 2646 device = sdev->hostdata; 2647 if (device->device_offline) 2648 return; 2649 2650 device->device_offline = true; 2651 ctrl_info = shost_to_hba(sdev->host); 2652 pqi_schedule_rescan_worker(ctrl_info); 2653 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 2654 path, ctrl_info->scsi_host->host_no, device->bus, 2655 device->target, device->lun); 2656 } 2657 2658 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2659 { 2660 u8 scsi_status; 2661 u8 host_byte; 2662 struct scsi_cmnd *scmd; 2663 struct pqi_raid_error_info *error_info; 2664 size_t sense_data_length; 2665 int residual_count; 2666 int xfer_count; 2667 struct scsi_sense_hdr sshdr; 2668 2669 scmd = io_request->scmd; 2670 if (!scmd) 2671 return; 2672 2673 error_info = io_request->error_info; 2674 scsi_status = error_info->status; 2675 host_byte = DID_OK; 2676 2677 switch (error_info->data_out_result) { 2678 case PQI_DATA_IN_OUT_GOOD: 2679 break; 2680 case PQI_DATA_IN_OUT_UNDERFLOW: 2681 xfer_count = 2682 get_unaligned_le32(&error_info->data_out_transferred); 2683 residual_count = scsi_bufflen(scmd) - xfer_count; 2684 scsi_set_resid(scmd, residual_count); 2685 if (xfer_count < scmd->underflow) 2686 host_byte = DID_SOFT_ERROR; 2687 break; 2688 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2689 case PQI_DATA_IN_OUT_ABORTED: 2690 host_byte = DID_ABORT; 2691 break; 2692 case PQI_DATA_IN_OUT_TIMEOUT: 2693 host_byte = DID_TIME_OUT; 2694 break; 2695 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2696 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2697 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2698 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2699 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2700 case PQI_DATA_IN_OUT_ERROR: 2701 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2702 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2703 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2704 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2705 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2706 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2707 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2708 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2709 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2710 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2711 default: 2712 host_byte = DID_ERROR; 2713 break; 2714 } 2715 2716 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2717 if (sense_data_length == 0) 2718 sense_data_length = 2719 get_unaligned_le16(&error_info->response_data_length); 2720 if (sense_data_length) { 2721 if (sense_data_length > sizeof(error_info->data)) 2722 sense_data_length = sizeof(error_info->data); 2723 2724 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2725 scsi_normalize_sense(error_info->data, 2726 sense_data_length, &sshdr) && 2727 sshdr.sense_key == HARDWARE_ERROR && 2728 sshdr.asc == 0x3e) { 2729 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 2730 struct pqi_scsi_dev *device = scmd->device->hostdata; 2731 2732 switch (sshdr.ascq) { 2733 case 0x1: /* LOGICAL UNIT FAILURE */ 2734 if (printk_ratelimit()) 2735 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 2736 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2737 pqi_take_device_offline(scmd->device, "RAID"); 2738 host_byte = DID_NO_CONNECT; 2739 break; 2740 2741 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 2742 if (printk_ratelimit()) 2743 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 2744 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2745 break; 2746 } 2747 } 2748 2749 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2750 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2751 memcpy(scmd->sense_buffer, error_info->data, 2752 sense_data_length); 2753 } 2754 2755 scmd->result = scsi_status; 2756 set_host_byte(scmd, host_byte); 2757 } 2758 2759 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2760 { 2761 u8 scsi_status; 2762 u8 host_byte; 2763 struct scsi_cmnd *scmd; 2764 struct pqi_aio_error_info *error_info; 2765 size_t sense_data_length; 2766 int residual_count; 2767 int xfer_count; 2768 bool device_offline; 2769 2770 scmd = io_request->scmd; 2771 error_info = io_request->error_info; 2772 host_byte = DID_OK; 2773 sense_data_length = 0; 2774 device_offline = false; 2775 2776 switch (error_info->service_response) { 2777 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2778 scsi_status = error_info->status; 2779 break; 2780 case PQI_AIO_SERV_RESPONSE_FAILURE: 2781 switch (error_info->status) { 2782 case PQI_AIO_STATUS_IO_ABORTED: 2783 scsi_status = SAM_STAT_TASK_ABORTED; 2784 break; 2785 case PQI_AIO_STATUS_UNDERRUN: 2786 scsi_status = SAM_STAT_GOOD; 2787 residual_count = get_unaligned_le32( 2788 &error_info->residual_count); 2789 scsi_set_resid(scmd, residual_count); 2790 xfer_count = scsi_bufflen(scmd) - residual_count; 2791 if (xfer_count < scmd->underflow) 2792 host_byte = DID_SOFT_ERROR; 2793 break; 2794 case PQI_AIO_STATUS_OVERRUN: 2795 scsi_status = SAM_STAT_GOOD; 2796 break; 2797 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2798 pqi_aio_path_disabled(io_request); 2799 scsi_status = SAM_STAT_GOOD; 2800 io_request->status = -EAGAIN; 2801 break; 2802 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2803 case PQI_AIO_STATUS_INVALID_DEVICE: 2804 if (!io_request->raid_bypass) { 2805 device_offline = true; 2806 pqi_take_device_offline(scmd->device, "AIO"); 2807 host_byte = DID_NO_CONNECT; 2808 } 2809 scsi_status = SAM_STAT_CHECK_CONDITION; 2810 break; 2811 case PQI_AIO_STATUS_IO_ERROR: 2812 default: 2813 scsi_status = SAM_STAT_CHECK_CONDITION; 2814 break; 2815 } 2816 break; 2817 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2818 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2819 scsi_status = SAM_STAT_GOOD; 2820 break; 2821 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2822 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2823 default: 2824 scsi_status = SAM_STAT_CHECK_CONDITION; 2825 break; 2826 } 2827 2828 if (error_info->data_present) { 2829 sense_data_length = 2830 get_unaligned_le16(&error_info->data_length); 2831 if (sense_data_length) { 2832 if (sense_data_length > sizeof(error_info->data)) 2833 sense_data_length = sizeof(error_info->data); 2834 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2835 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2836 memcpy(scmd->sense_buffer, error_info->data, 2837 sense_data_length); 2838 } 2839 } 2840 2841 if (device_offline && sense_data_length == 0) 2842 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2843 0x3e, 0x1); 2844 2845 scmd->result = scsi_status; 2846 set_host_byte(scmd, host_byte); 2847 } 2848 2849 static void pqi_process_io_error(unsigned int iu_type, 2850 struct pqi_io_request *io_request) 2851 { 2852 switch (iu_type) { 2853 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2854 pqi_process_raid_io_error(io_request); 2855 break; 2856 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2857 pqi_process_aio_io_error(io_request); 2858 break; 2859 } 2860 } 2861 2862 static int pqi_interpret_task_management_response( 2863 struct pqi_task_management_response *response) 2864 { 2865 int rc; 2866 2867 switch (response->response_code) { 2868 case SOP_TMF_COMPLETE: 2869 case SOP_TMF_FUNCTION_SUCCEEDED: 2870 rc = 0; 2871 break; 2872 case SOP_TMF_REJECTED: 2873 rc = -EAGAIN; 2874 break; 2875 default: 2876 rc = -EIO; 2877 break; 2878 } 2879 2880 return rc; 2881 } 2882 2883 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info) 2884 { 2885 pqi_take_ctrl_offline(ctrl_info); 2886 } 2887 2888 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 2889 { 2890 int num_responses; 2891 pqi_index_t oq_pi; 2892 pqi_index_t oq_ci; 2893 struct pqi_io_request *io_request; 2894 struct pqi_io_response *response; 2895 u16 request_id; 2896 2897 num_responses = 0; 2898 oq_ci = queue_group->oq_ci_copy; 2899 2900 while (1) { 2901 oq_pi = readl(queue_group->oq_pi); 2902 if (oq_pi >= ctrl_info->num_elements_per_oq) { 2903 pqi_invalid_response(ctrl_info); 2904 dev_err(&ctrl_info->pci_dev->dev, 2905 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 2906 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 2907 return -1; 2908 } 2909 if (oq_pi == oq_ci) 2910 break; 2911 2912 num_responses++; 2913 response = queue_group->oq_element_array + 2914 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2915 2916 request_id = get_unaligned_le16(&response->request_id); 2917 if (request_id >= ctrl_info->max_io_slots) { 2918 pqi_invalid_response(ctrl_info); 2919 dev_err(&ctrl_info->pci_dev->dev, 2920 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 2921 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 2922 return -1; 2923 } 2924 2925 io_request = &ctrl_info->io_request_pool[request_id]; 2926 if (atomic_read(&io_request->refcount) == 0) { 2927 pqi_invalid_response(ctrl_info); 2928 dev_err(&ctrl_info->pci_dev->dev, 2929 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 2930 request_id, oq_pi, oq_ci); 2931 return -1; 2932 } 2933 2934 switch (response->header.iu_type) { 2935 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2936 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2937 if (io_request->scmd) 2938 io_request->scmd->result = 0; 2939 fallthrough; 2940 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2941 break; 2942 case PQI_RESPONSE_IU_VENDOR_GENERAL: 2943 io_request->status = 2944 get_unaligned_le16( 2945 &((struct pqi_vendor_general_response *) 2946 response)->status); 2947 break; 2948 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2949 io_request->status = 2950 pqi_interpret_task_management_response( 2951 (void *)response); 2952 break; 2953 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2954 pqi_aio_path_disabled(io_request); 2955 io_request->status = -EAGAIN; 2956 break; 2957 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2958 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2959 io_request->error_info = ctrl_info->error_buffer + 2960 (get_unaligned_le16(&response->error_index) * 2961 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2962 pqi_process_io_error(response->header.iu_type, io_request); 2963 break; 2964 default: 2965 pqi_invalid_response(ctrl_info); 2966 dev_err(&ctrl_info->pci_dev->dev, 2967 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 2968 response->header.iu_type, oq_pi, oq_ci); 2969 return -1; 2970 } 2971 2972 io_request->io_complete_callback(io_request, io_request->context); 2973 2974 /* 2975 * Note that the I/O request structure CANNOT BE TOUCHED after 2976 * returning from the I/O completion callback! 2977 */ 2978 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2979 } 2980 2981 if (num_responses) { 2982 queue_group->oq_ci_copy = oq_ci; 2983 writel(oq_ci, queue_group->oq_ci); 2984 } 2985 2986 return num_responses; 2987 } 2988 2989 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2990 unsigned int ci, unsigned int elements_in_queue) 2991 { 2992 unsigned int num_elements_used; 2993 2994 if (pi >= ci) 2995 num_elements_used = pi - ci; 2996 else 2997 num_elements_used = elements_in_queue - ci + pi; 2998 2999 return elements_in_queue - num_elements_used - 1; 3000 } 3001 3002 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3003 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3004 { 3005 pqi_index_t iq_pi; 3006 pqi_index_t iq_ci; 3007 unsigned long flags; 3008 void *next_element; 3009 struct pqi_queue_group *queue_group; 3010 3011 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3012 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3013 3014 while (1) { 3015 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3016 3017 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3018 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3019 3020 if (pqi_num_elements_free(iq_pi, iq_ci, 3021 ctrl_info->num_elements_per_iq)) 3022 break; 3023 3024 spin_unlock_irqrestore( 3025 &queue_group->submit_lock[RAID_PATH], flags); 3026 3027 if (pqi_ctrl_offline(ctrl_info)) 3028 return; 3029 } 3030 3031 next_element = queue_group->iq_element_array[RAID_PATH] + 3032 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3033 3034 memcpy(next_element, iu, iu_length); 3035 3036 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3037 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3038 3039 /* 3040 * This write notifies the controller that an IU is available to be 3041 * processed. 3042 */ 3043 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3044 3045 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3046 } 3047 3048 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3049 struct pqi_event *event) 3050 { 3051 struct pqi_event_acknowledge_request request; 3052 3053 memset(&request, 0, sizeof(request)); 3054 3055 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3056 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3057 &request.header.iu_length); 3058 request.event_type = event->event_type; 3059 request.event_id = event->event_id; 3060 request.additional_event_id = event->additional_event_id; 3061 3062 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3063 } 3064 3065 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3066 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3067 3068 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3069 struct pqi_ctrl_info *ctrl_info) 3070 { 3071 unsigned long timeout; 3072 u8 status; 3073 3074 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 3075 3076 while (1) { 3077 status = pqi_read_soft_reset_status(ctrl_info); 3078 if (status & PQI_SOFT_RESET_INITIATE) 3079 return RESET_INITIATE_DRIVER; 3080 3081 if (status & PQI_SOFT_RESET_ABORT) 3082 return RESET_ABORT; 3083 3084 if (time_after(jiffies, timeout)) { 3085 dev_err(&ctrl_info->pci_dev->dev, 3086 "timed out waiting for soft reset status\n"); 3087 return RESET_TIMEDOUT; 3088 } 3089 3090 if (!sis_is_firmware_running(ctrl_info)) 3091 return RESET_NORESPONSE; 3092 3093 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3094 } 3095 } 3096 3097 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, 3098 enum pqi_soft_reset_status reset_status) 3099 { 3100 int rc; 3101 3102 switch (reset_status) { 3103 case RESET_INITIATE_DRIVER: 3104 case RESET_TIMEDOUT: 3105 dev_info(&ctrl_info->pci_dev->dev, 3106 "resetting controller %u\n", ctrl_info->ctrl_id); 3107 sis_soft_reset(ctrl_info); 3108 fallthrough; 3109 case RESET_INITIATE_FIRMWARE: 3110 rc = pqi_ofa_ctrl_restart(ctrl_info); 3111 pqi_ofa_free_host_buffer(ctrl_info); 3112 dev_info(&ctrl_info->pci_dev->dev, 3113 "Online Firmware Activation for controller %u: %s\n", 3114 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); 3115 break; 3116 case RESET_ABORT: 3117 pqi_ofa_ctrl_unquiesce(ctrl_info); 3118 dev_info(&ctrl_info->pci_dev->dev, 3119 "Online Firmware Activation for controller %u: %s\n", 3120 ctrl_info->ctrl_id, "ABORTED"); 3121 break; 3122 case RESET_NORESPONSE: 3123 pqi_ofa_free_host_buffer(ctrl_info); 3124 pqi_take_ctrl_offline(ctrl_info); 3125 break; 3126 } 3127 } 3128 3129 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3130 struct pqi_event *event) 3131 { 3132 u16 event_id; 3133 enum pqi_soft_reset_status status; 3134 3135 event_id = get_unaligned_le16(&event->event_id); 3136 3137 mutex_lock(&ctrl_info->ofa_mutex); 3138 3139 if (event_id == PQI_EVENT_OFA_QUIESCE) { 3140 dev_info(&ctrl_info->pci_dev->dev, 3141 "Received Online Firmware Activation quiesce event for controller %u\n", 3142 ctrl_info->ctrl_id); 3143 pqi_ofa_ctrl_quiesce(ctrl_info); 3144 pqi_acknowledge_event(ctrl_info, event); 3145 if (ctrl_info->soft_reset_handshake_supported) { 3146 status = pqi_poll_for_soft_reset_status(ctrl_info); 3147 pqi_process_soft_reset(ctrl_info, status); 3148 } else { 3149 pqi_process_soft_reset(ctrl_info, 3150 RESET_INITIATE_FIRMWARE); 3151 } 3152 3153 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3154 pqi_acknowledge_event(ctrl_info, event); 3155 pqi_ofa_setup_host_buffer(ctrl_info, 3156 le32_to_cpu(event->ofa_bytes_requested)); 3157 pqi_ofa_host_memory_update(ctrl_info); 3158 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3159 pqi_ofa_free_host_buffer(ctrl_info); 3160 pqi_acknowledge_event(ctrl_info, event); 3161 dev_info(&ctrl_info->pci_dev->dev, 3162 "Online Firmware Activation(%u) cancel reason : %u\n", 3163 ctrl_info->ctrl_id, event->ofa_cancel_reason); 3164 } 3165 3166 mutex_unlock(&ctrl_info->ofa_mutex); 3167 } 3168 3169 static void pqi_event_worker(struct work_struct *work) 3170 { 3171 unsigned int i; 3172 struct pqi_ctrl_info *ctrl_info; 3173 struct pqi_event *event; 3174 3175 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3176 3177 pqi_ctrl_busy(ctrl_info); 3178 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 3179 if (pqi_ctrl_offline(ctrl_info)) 3180 goto out; 3181 3182 pqi_schedule_rescan_worker_delayed(ctrl_info); 3183 3184 event = ctrl_info->events; 3185 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3186 if (event->pending) { 3187 event->pending = false; 3188 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3189 pqi_ctrl_unbusy(ctrl_info); 3190 pqi_ofa_process_event(ctrl_info, event); 3191 return; 3192 } 3193 pqi_acknowledge_event(ctrl_info, event); 3194 } 3195 event++; 3196 } 3197 3198 out: 3199 pqi_ctrl_unbusy(ctrl_info); 3200 } 3201 3202 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) 3203 3204 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3205 { 3206 int num_interrupts; 3207 u32 heartbeat_count; 3208 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 3209 heartbeat_timer); 3210 3211 pqi_check_ctrl_health(ctrl_info); 3212 if (pqi_ctrl_offline(ctrl_info)) 3213 return; 3214 3215 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3216 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3217 3218 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3219 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3220 dev_err(&ctrl_info->pci_dev->dev, 3221 "no heartbeat detected - last heartbeat count: %u\n", 3222 heartbeat_count); 3223 pqi_take_ctrl_offline(ctrl_info); 3224 return; 3225 } 3226 } else { 3227 ctrl_info->previous_num_interrupts = num_interrupts; 3228 } 3229 3230 ctrl_info->previous_heartbeat_count = heartbeat_count; 3231 mod_timer(&ctrl_info->heartbeat_timer, 3232 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3233 } 3234 3235 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3236 { 3237 if (!ctrl_info->heartbeat_counter) 3238 return; 3239 3240 ctrl_info->previous_num_interrupts = 3241 atomic_read(&ctrl_info->num_interrupts); 3242 ctrl_info->previous_heartbeat_count = 3243 pqi_read_heartbeat_counter(ctrl_info); 3244 3245 ctrl_info->heartbeat_timer.expires = 3246 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3247 add_timer(&ctrl_info->heartbeat_timer); 3248 } 3249 3250 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3251 { 3252 del_timer_sync(&ctrl_info->heartbeat_timer); 3253 } 3254 3255 static inline int pqi_event_type_to_event_index(unsigned int event_type) 3256 { 3257 int index; 3258 3259 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 3260 if (event_type == pqi_supported_event_types[index]) 3261 return index; 3262 3263 return -1; 3264 } 3265 3266 static inline bool pqi_is_supported_event(unsigned int event_type) 3267 { 3268 return pqi_event_type_to_event_index(event_type) != -1; 3269 } 3270 3271 static void pqi_ofa_capture_event_payload(struct pqi_event *event, 3272 struct pqi_event_response *response) 3273 { 3274 u16 event_id; 3275 3276 event_id = get_unaligned_le16(&event->event_id); 3277 3278 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3279 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3280 event->ofa_bytes_requested = 3281 response->data.ofa_memory_allocation.bytes_requested; 3282 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3283 event->ofa_cancel_reason = 3284 response->data.ofa_cancelled.reason; 3285 } 3286 } 3287 } 3288 3289 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3290 { 3291 int num_events; 3292 pqi_index_t oq_pi; 3293 pqi_index_t oq_ci; 3294 struct pqi_event_queue *event_queue; 3295 struct pqi_event_response *response; 3296 struct pqi_event *event; 3297 int event_index; 3298 3299 event_queue = &ctrl_info->event_queue; 3300 num_events = 0; 3301 oq_ci = event_queue->oq_ci_copy; 3302 3303 while (1) { 3304 oq_pi = readl(event_queue->oq_pi); 3305 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3306 pqi_invalid_response(ctrl_info); 3307 dev_err(&ctrl_info->pci_dev->dev, 3308 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3309 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3310 return -1; 3311 } 3312 3313 if (oq_pi == oq_ci) 3314 break; 3315 3316 num_events++; 3317 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3318 3319 event_index = 3320 pqi_event_type_to_event_index(response->event_type); 3321 3322 if (event_index >= 0 && response->request_acknowledge) { 3323 event = &ctrl_info->events[event_index]; 3324 event->pending = true; 3325 event->event_type = response->event_type; 3326 event->event_id = response->event_id; 3327 event->additional_event_id = response->additional_event_id; 3328 if (event->event_type == PQI_EVENT_TYPE_OFA) 3329 pqi_ofa_capture_event_payload(event, response); 3330 } 3331 3332 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3333 } 3334 3335 if (num_events) { 3336 event_queue->oq_ci_copy = oq_ci; 3337 writel(oq_ci, event_queue->oq_ci); 3338 schedule_work(&ctrl_info->event_work); 3339 } 3340 3341 return num_events; 3342 } 3343 3344 #define PQI_LEGACY_INTX_MASK 0x1 3345 3346 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 3347 bool enable_intx) 3348 { 3349 u32 intx_mask; 3350 struct pqi_device_registers __iomem *pqi_registers; 3351 volatile void __iomem *register_addr; 3352 3353 pqi_registers = ctrl_info->pqi_registers; 3354 3355 if (enable_intx) 3356 register_addr = &pqi_registers->legacy_intx_mask_clear; 3357 else 3358 register_addr = &pqi_registers->legacy_intx_mask_set; 3359 3360 intx_mask = readl(register_addr); 3361 intx_mask |= PQI_LEGACY_INTX_MASK; 3362 writel(intx_mask, register_addr); 3363 } 3364 3365 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3366 enum pqi_irq_mode new_mode) 3367 { 3368 switch (ctrl_info->irq_mode) { 3369 case IRQ_MODE_MSIX: 3370 switch (new_mode) { 3371 case IRQ_MODE_MSIX: 3372 break; 3373 case IRQ_MODE_INTX: 3374 pqi_configure_legacy_intx(ctrl_info, true); 3375 sis_enable_intx(ctrl_info); 3376 break; 3377 case IRQ_MODE_NONE: 3378 break; 3379 } 3380 break; 3381 case IRQ_MODE_INTX: 3382 switch (new_mode) { 3383 case IRQ_MODE_MSIX: 3384 pqi_configure_legacy_intx(ctrl_info, false); 3385 sis_enable_msix(ctrl_info); 3386 break; 3387 case IRQ_MODE_INTX: 3388 break; 3389 case IRQ_MODE_NONE: 3390 pqi_configure_legacy_intx(ctrl_info, false); 3391 break; 3392 } 3393 break; 3394 case IRQ_MODE_NONE: 3395 switch (new_mode) { 3396 case IRQ_MODE_MSIX: 3397 sis_enable_msix(ctrl_info); 3398 break; 3399 case IRQ_MODE_INTX: 3400 pqi_configure_legacy_intx(ctrl_info, true); 3401 sis_enable_intx(ctrl_info); 3402 break; 3403 case IRQ_MODE_NONE: 3404 break; 3405 } 3406 break; 3407 } 3408 3409 ctrl_info->irq_mode = new_mode; 3410 } 3411 3412 #define PQI_LEGACY_INTX_PENDING 0x1 3413 3414 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3415 { 3416 bool valid_irq; 3417 u32 intx_status; 3418 3419 switch (ctrl_info->irq_mode) { 3420 case IRQ_MODE_MSIX: 3421 valid_irq = true; 3422 break; 3423 case IRQ_MODE_INTX: 3424 intx_status = 3425 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3426 if (intx_status & PQI_LEGACY_INTX_PENDING) 3427 valid_irq = true; 3428 else 3429 valid_irq = false; 3430 break; 3431 case IRQ_MODE_NONE: 3432 default: 3433 valid_irq = false; 3434 break; 3435 } 3436 3437 return valid_irq; 3438 } 3439 3440 static irqreturn_t pqi_irq_handler(int irq, void *data) 3441 { 3442 struct pqi_ctrl_info *ctrl_info; 3443 struct pqi_queue_group *queue_group; 3444 int num_io_responses_handled; 3445 int num_events_handled; 3446 3447 queue_group = data; 3448 ctrl_info = queue_group->ctrl_info; 3449 3450 if (!pqi_is_valid_irq(ctrl_info)) 3451 return IRQ_NONE; 3452 3453 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3454 if (num_io_responses_handled < 0) 3455 goto out; 3456 3457 if (irq == ctrl_info->event_irq) { 3458 num_events_handled = pqi_process_event_intr(ctrl_info); 3459 if (num_events_handled < 0) 3460 goto out; 3461 } else { 3462 num_events_handled = 0; 3463 } 3464 3465 if (num_io_responses_handled + num_events_handled > 0) 3466 atomic_inc(&ctrl_info->num_interrupts); 3467 3468 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3469 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3470 3471 out: 3472 return IRQ_HANDLED; 3473 } 3474 3475 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3476 { 3477 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3478 int i; 3479 int rc; 3480 3481 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3482 3483 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3484 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3485 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3486 if (rc) { 3487 dev_err(&pci_dev->dev, 3488 "irq %u init failed with error %d\n", 3489 pci_irq_vector(pci_dev, i), rc); 3490 return rc; 3491 } 3492 ctrl_info->num_msix_vectors_initialized++; 3493 } 3494 3495 return 0; 3496 } 3497 3498 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3499 { 3500 int i; 3501 3502 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3503 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3504 &ctrl_info->queue_groups[i]); 3505 3506 ctrl_info->num_msix_vectors_initialized = 0; 3507 } 3508 3509 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3510 { 3511 int num_vectors_enabled; 3512 3513 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3514 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3515 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3516 if (num_vectors_enabled < 0) { 3517 dev_err(&ctrl_info->pci_dev->dev, 3518 "MSI-X init failed with error %d\n", 3519 num_vectors_enabled); 3520 return num_vectors_enabled; 3521 } 3522 3523 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3524 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3525 return 0; 3526 } 3527 3528 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3529 { 3530 if (ctrl_info->num_msix_vectors_enabled) { 3531 pci_free_irq_vectors(ctrl_info->pci_dev); 3532 ctrl_info->num_msix_vectors_enabled = 0; 3533 } 3534 } 3535 3536 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3537 { 3538 unsigned int i; 3539 size_t alloc_length; 3540 size_t element_array_length_per_iq; 3541 size_t element_array_length_per_oq; 3542 void *element_array; 3543 void __iomem *next_queue_index; 3544 void *aligned_pointer; 3545 unsigned int num_inbound_queues; 3546 unsigned int num_outbound_queues; 3547 unsigned int num_queue_indexes; 3548 struct pqi_queue_group *queue_group; 3549 3550 element_array_length_per_iq = 3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3552 ctrl_info->num_elements_per_iq; 3553 element_array_length_per_oq = 3554 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3555 ctrl_info->num_elements_per_oq; 3556 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3557 num_outbound_queues = ctrl_info->num_queue_groups; 3558 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3559 3560 aligned_pointer = NULL; 3561 3562 for (i = 0; i < num_inbound_queues; i++) { 3563 aligned_pointer = PTR_ALIGN(aligned_pointer, 3564 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3565 aligned_pointer += element_array_length_per_iq; 3566 } 3567 3568 for (i = 0; i < num_outbound_queues; i++) { 3569 aligned_pointer = PTR_ALIGN(aligned_pointer, 3570 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3571 aligned_pointer += element_array_length_per_oq; 3572 } 3573 3574 aligned_pointer = PTR_ALIGN(aligned_pointer, 3575 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3576 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3577 PQI_EVENT_OQ_ELEMENT_LENGTH; 3578 3579 for (i = 0; i < num_queue_indexes; i++) { 3580 aligned_pointer = PTR_ALIGN(aligned_pointer, 3581 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3582 aligned_pointer += sizeof(pqi_index_t); 3583 } 3584 3585 alloc_length = (size_t)aligned_pointer + 3586 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3587 3588 alloc_length += PQI_EXTRA_SGL_MEMORY; 3589 3590 ctrl_info->queue_memory_base = 3591 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3592 &ctrl_info->queue_memory_base_dma_handle, 3593 GFP_KERNEL); 3594 3595 if (!ctrl_info->queue_memory_base) 3596 return -ENOMEM; 3597 3598 ctrl_info->queue_memory_length = alloc_length; 3599 3600 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3601 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3602 3603 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3604 queue_group = &ctrl_info->queue_groups[i]; 3605 queue_group->iq_element_array[RAID_PATH] = element_array; 3606 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3607 ctrl_info->queue_memory_base_dma_handle + 3608 (element_array - ctrl_info->queue_memory_base); 3609 element_array += element_array_length_per_iq; 3610 element_array = PTR_ALIGN(element_array, 3611 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3612 queue_group->iq_element_array[AIO_PATH] = element_array; 3613 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3614 ctrl_info->queue_memory_base_dma_handle + 3615 (element_array - ctrl_info->queue_memory_base); 3616 element_array += element_array_length_per_iq; 3617 element_array = PTR_ALIGN(element_array, 3618 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3619 } 3620 3621 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3622 queue_group = &ctrl_info->queue_groups[i]; 3623 queue_group->oq_element_array = element_array; 3624 queue_group->oq_element_array_bus_addr = 3625 ctrl_info->queue_memory_base_dma_handle + 3626 (element_array - ctrl_info->queue_memory_base); 3627 element_array += element_array_length_per_oq; 3628 element_array = PTR_ALIGN(element_array, 3629 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3630 } 3631 3632 ctrl_info->event_queue.oq_element_array = element_array; 3633 ctrl_info->event_queue.oq_element_array_bus_addr = 3634 ctrl_info->queue_memory_base_dma_handle + 3635 (element_array - ctrl_info->queue_memory_base); 3636 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3637 PQI_EVENT_OQ_ELEMENT_LENGTH; 3638 3639 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3640 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3641 3642 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3643 queue_group = &ctrl_info->queue_groups[i]; 3644 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3645 queue_group->iq_ci_bus_addr[RAID_PATH] = 3646 ctrl_info->queue_memory_base_dma_handle + 3647 (next_queue_index - 3648 (void __iomem *)ctrl_info->queue_memory_base); 3649 next_queue_index += sizeof(pqi_index_t); 3650 next_queue_index = PTR_ALIGN(next_queue_index, 3651 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3652 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3653 queue_group->iq_ci_bus_addr[AIO_PATH] = 3654 ctrl_info->queue_memory_base_dma_handle + 3655 (next_queue_index - 3656 (void __iomem *)ctrl_info->queue_memory_base); 3657 next_queue_index += sizeof(pqi_index_t); 3658 next_queue_index = PTR_ALIGN(next_queue_index, 3659 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3660 queue_group->oq_pi = next_queue_index; 3661 queue_group->oq_pi_bus_addr = 3662 ctrl_info->queue_memory_base_dma_handle + 3663 (next_queue_index - 3664 (void __iomem *)ctrl_info->queue_memory_base); 3665 next_queue_index += sizeof(pqi_index_t); 3666 next_queue_index = PTR_ALIGN(next_queue_index, 3667 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3668 } 3669 3670 ctrl_info->event_queue.oq_pi = next_queue_index; 3671 ctrl_info->event_queue.oq_pi_bus_addr = 3672 ctrl_info->queue_memory_base_dma_handle + 3673 (next_queue_index - 3674 (void __iomem *)ctrl_info->queue_memory_base); 3675 3676 return 0; 3677 } 3678 3679 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3680 { 3681 unsigned int i; 3682 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3683 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3684 3685 /* 3686 * Initialize the backpointers to the controller structure in 3687 * each operational queue group structure. 3688 */ 3689 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3690 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3691 3692 /* 3693 * Assign IDs to all operational queues. Note that the IDs 3694 * assigned to operational IQs are independent of the IDs 3695 * assigned to operational OQs. 3696 */ 3697 ctrl_info->event_queue.oq_id = next_oq_id++; 3698 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3699 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3700 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3701 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3702 } 3703 3704 /* 3705 * Assign MSI-X table entry indexes to all queues. Note that the 3706 * interrupt for the event queue is shared with the first queue group. 3707 */ 3708 ctrl_info->event_queue.int_msg_num = 0; 3709 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3710 ctrl_info->queue_groups[i].int_msg_num = i; 3711 3712 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3713 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3715 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3717 } 3718 } 3719 3720 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3721 { 3722 size_t alloc_length; 3723 struct pqi_admin_queues_aligned *admin_queues_aligned; 3724 struct pqi_admin_queues *admin_queues; 3725 3726 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3727 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3728 3729 ctrl_info->admin_queue_memory_base = 3730 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3731 &ctrl_info->admin_queue_memory_base_dma_handle, 3732 GFP_KERNEL); 3733 3734 if (!ctrl_info->admin_queue_memory_base) 3735 return -ENOMEM; 3736 3737 ctrl_info->admin_queue_memory_length = alloc_length; 3738 3739 admin_queues = &ctrl_info->admin_queues; 3740 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3741 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3742 admin_queues->iq_element_array = 3743 &admin_queues_aligned->iq_element_array; 3744 admin_queues->oq_element_array = 3745 &admin_queues_aligned->oq_element_array; 3746 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3747 admin_queues->oq_pi = 3748 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3749 3750 admin_queues->iq_element_array_bus_addr = 3751 ctrl_info->admin_queue_memory_base_dma_handle + 3752 (admin_queues->iq_element_array - 3753 ctrl_info->admin_queue_memory_base); 3754 admin_queues->oq_element_array_bus_addr = 3755 ctrl_info->admin_queue_memory_base_dma_handle + 3756 (admin_queues->oq_element_array - 3757 ctrl_info->admin_queue_memory_base); 3758 admin_queues->iq_ci_bus_addr = 3759 ctrl_info->admin_queue_memory_base_dma_handle + 3760 ((void *)admin_queues->iq_ci - 3761 ctrl_info->admin_queue_memory_base); 3762 admin_queues->oq_pi_bus_addr = 3763 ctrl_info->admin_queue_memory_base_dma_handle + 3764 ((void __iomem *)admin_queues->oq_pi - 3765 (void __iomem *)ctrl_info->admin_queue_memory_base); 3766 3767 return 0; 3768 } 3769 3770 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ 3771 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3772 3773 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3774 { 3775 struct pqi_device_registers __iomem *pqi_registers; 3776 struct pqi_admin_queues *admin_queues; 3777 unsigned long timeout; 3778 u8 status; 3779 u32 reg; 3780 3781 pqi_registers = ctrl_info->pqi_registers; 3782 admin_queues = &ctrl_info->admin_queues; 3783 3784 writeq((u64)admin_queues->iq_element_array_bus_addr, 3785 &pqi_registers->admin_iq_element_array_addr); 3786 writeq((u64)admin_queues->oq_element_array_bus_addr, 3787 &pqi_registers->admin_oq_element_array_addr); 3788 writeq((u64)admin_queues->iq_ci_bus_addr, 3789 &pqi_registers->admin_iq_ci_addr); 3790 writeq((u64)admin_queues->oq_pi_bus_addr, 3791 &pqi_registers->admin_oq_pi_addr); 3792 3793 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3794 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 3795 (admin_queues->int_msg_num << 16); 3796 writel(reg, &pqi_registers->admin_iq_num_elements); 3797 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3798 &pqi_registers->function_and_status_code); 3799 3800 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3801 while (1) { 3802 status = readb(&pqi_registers->function_and_status_code); 3803 if (status == PQI_STATUS_IDLE) 3804 break; 3805 if (time_after(jiffies, timeout)) 3806 return -ETIMEDOUT; 3807 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3808 } 3809 3810 /* 3811 * The offset registers are not initialized to the correct 3812 * offsets until *after* the create admin queue pair command 3813 * completes successfully. 3814 */ 3815 admin_queues->iq_pi = ctrl_info->iomem_base + 3816 PQI_DEVICE_REGISTERS_OFFSET + 3817 readq(&pqi_registers->admin_iq_pi_offset); 3818 admin_queues->oq_ci = ctrl_info->iomem_base + 3819 PQI_DEVICE_REGISTERS_OFFSET + 3820 readq(&pqi_registers->admin_oq_ci_offset); 3821 3822 return 0; 3823 } 3824 3825 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3826 struct pqi_general_admin_request *request) 3827 { 3828 struct pqi_admin_queues *admin_queues; 3829 void *next_element; 3830 pqi_index_t iq_pi; 3831 3832 admin_queues = &ctrl_info->admin_queues; 3833 iq_pi = admin_queues->iq_pi_copy; 3834 3835 next_element = admin_queues->iq_element_array + 3836 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3837 3838 memcpy(next_element, request, sizeof(*request)); 3839 3840 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3841 admin_queues->iq_pi_copy = iq_pi; 3842 3843 /* 3844 * This write notifies the controller that an IU is available to be 3845 * processed. 3846 */ 3847 writel(iq_pi, admin_queues->iq_pi); 3848 } 3849 3850 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3851 3852 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3853 struct pqi_general_admin_response *response) 3854 { 3855 struct pqi_admin_queues *admin_queues; 3856 pqi_index_t oq_pi; 3857 pqi_index_t oq_ci; 3858 unsigned long timeout; 3859 3860 admin_queues = &ctrl_info->admin_queues; 3861 oq_ci = admin_queues->oq_ci_copy; 3862 3863 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; 3864 3865 while (1) { 3866 oq_pi = readl(admin_queues->oq_pi); 3867 if (oq_pi != oq_ci) 3868 break; 3869 if (time_after(jiffies, timeout)) { 3870 dev_err(&ctrl_info->pci_dev->dev, 3871 "timed out waiting for admin response\n"); 3872 return -ETIMEDOUT; 3873 } 3874 if (!sis_is_firmware_running(ctrl_info)) 3875 return -ENXIO; 3876 usleep_range(1000, 2000); 3877 } 3878 3879 memcpy(response, admin_queues->oq_element_array + 3880 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3881 3882 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3883 admin_queues->oq_ci_copy = oq_ci; 3884 writel(oq_ci, admin_queues->oq_ci); 3885 3886 return 0; 3887 } 3888 3889 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3890 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3891 struct pqi_io_request *io_request) 3892 { 3893 struct pqi_io_request *next; 3894 void *next_element; 3895 pqi_index_t iq_pi; 3896 pqi_index_t iq_ci; 3897 size_t iu_length; 3898 unsigned long flags; 3899 unsigned int num_elements_needed; 3900 unsigned int num_elements_to_end_of_queue; 3901 size_t copy_count; 3902 struct pqi_iu_header *request; 3903 3904 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3905 3906 if (io_request) { 3907 io_request->queue_group = queue_group; 3908 list_add_tail(&io_request->request_list_entry, 3909 &queue_group->request_list[path]); 3910 } 3911 3912 iq_pi = queue_group->iq_pi_copy[path]; 3913 3914 list_for_each_entry_safe(io_request, next, 3915 &queue_group->request_list[path], request_list_entry) { 3916 3917 request = io_request->iu; 3918 3919 iu_length = get_unaligned_le16(&request->iu_length) + 3920 PQI_REQUEST_HEADER_LENGTH; 3921 num_elements_needed = 3922 DIV_ROUND_UP(iu_length, 3923 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3924 3925 iq_ci = readl(queue_group->iq_ci[path]); 3926 3927 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3928 ctrl_info->num_elements_per_iq)) 3929 break; 3930 3931 put_unaligned_le16(queue_group->oq_id, 3932 &request->response_queue_id); 3933 3934 next_element = queue_group->iq_element_array[path] + 3935 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3936 3937 num_elements_to_end_of_queue = 3938 ctrl_info->num_elements_per_iq - iq_pi; 3939 3940 if (num_elements_needed <= num_elements_to_end_of_queue) { 3941 memcpy(next_element, request, iu_length); 3942 } else { 3943 copy_count = num_elements_to_end_of_queue * 3944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3945 memcpy(next_element, request, copy_count); 3946 memcpy(queue_group->iq_element_array[path], 3947 (u8 *)request + copy_count, 3948 iu_length - copy_count); 3949 } 3950 3951 iq_pi = (iq_pi + num_elements_needed) % 3952 ctrl_info->num_elements_per_iq; 3953 3954 list_del(&io_request->request_list_entry); 3955 } 3956 3957 if (iq_pi != queue_group->iq_pi_copy[path]) { 3958 queue_group->iq_pi_copy[path] = iq_pi; 3959 /* 3960 * This write notifies the controller that one or more IUs are 3961 * available to be processed. 3962 */ 3963 writel(iq_pi, queue_group->iq_pi[path]); 3964 } 3965 3966 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3967 } 3968 3969 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3970 3971 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3972 struct completion *wait) 3973 { 3974 int rc; 3975 3976 while (1) { 3977 if (wait_for_completion_io_timeout(wait, 3978 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { 3979 rc = 0; 3980 break; 3981 } 3982 3983 pqi_check_ctrl_health(ctrl_info); 3984 if (pqi_ctrl_offline(ctrl_info)) { 3985 rc = -ENXIO; 3986 break; 3987 } 3988 } 3989 3990 return rc; 3991 } 3992 3993 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3994 void *context) 3995 { 3996 struct completion *waiting = context; 3997 3998 complete(waiting); 3999 } 4000 4001 static int pqi_process_raid_io_error_synchronous( 4002 struct pqi_raid_error_info *error_info) 4003 { 4004 int rc = -EIO; 4005 4006 switch (error_info->data_out_result) { 4007 case PQI_DATA_IN_OUT_GOOD: 4008 if (error_info->status == SAM_STAT_GOOD) 4009 rc = 0; 4010 break; 4011 case PQI_DATA_IN_OUT_UNDERFLOW: 4012 if (error_info->status == SAM_STAT_GOOD || 4013 error_info->status == SAM_STAT_CHECK_CONDITION) 4014 rc = 0; 4015 break; 4016 case PQI_DATA_IN_OUT_ABORTED: 4017 rc = PQI_CMD_STATUS_ABORTED; 4018 break; 4019 } 4020 4021 return rc; 4022 } 4023 4024 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4025 struct pqi_iu_header *request, unsigned int flags, 4026 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 4027 { 4028 int rc = 0; 4029 struct pqi_io_request *io_request; 4030 unsigned long start_jiffies; 4031 unsigned long msecs_blocked; 4032 size_t iu_length; 4033 DECLARE_COMPLETION_ONSTACK(wait); 4034 4035 /* 4036 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 4037 * are mutually exclusive. 4038 */ 4039 4040 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4041 if (down_interruptible(&ctrl_info->sync_request_sem)) 4042 return -ERESTARTSYS; 4043 } else { 4044 if (timeout_msecs == NO_TIMEOUT) { 4045 down(&ctrl_info->sync_request_sem); 4046 } else { 4047 start_jiffies = jiffies; 4048 if (down_timeout(&ctrl_info->sync_request_sem, 4049 msecs_to_jiffies(timeout_msecs))) 4050 return -ETIMEDOUT; 4051 msecs_blocked = 4052 jiffies_to_msecs(jiffies - start_jiffies); 4053 if (msecs_blocked >= timeout_msecs) { 4054 rc = -ETIMEDOUT; 4055 goto out; 4056 } 4057 timeout_msecs -= msecs_blocked; 4058 } 4059 } 4060 4061 pqi_ctrl_busy(ctrl_info); 4062 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 4063 if (timeout_msecs == 0) { 4064 pqi_ctrl_unbusy(ctrl_info); 4065 rc = -ETIMEDOUT; 4066 goto out; 4067 } 4068 4069 if (pqi_ctrl_offline(ctrl_info)) { 4070 pqi_ctrl_unbusy(ctrl_info); 4071 rc = -ENXIO; 4072 goto out; 4073 } 4074 4075 atomic_inc(&ctrl_info->sync_cmds_outstanding); 4076 4077 io_request = pqi_alloc_io_request(ctrl_info); 4078 4079 put_unaligned_le16(io_request->index, 4080 &(((struct pqi_raid_path_request *)request)->request_id)); 4081 4082 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4083 ((struct pqi_raid_path_request *)request)->error_index = 4084 ((struct pqi_raid_path_request *)request)->request_id; 4085 4086 iu_length = get_unaligned_le16(&request->iu_length) + 4087 PQI_REQUEST_HEADER_LENGTH; 4088 memcpy(io_request->iu, request, iu_length); 4089 4090 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4091 io_request->context = &wait; 4092 4093 pqi_start_io(ctrl_info, 4094 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4095 io_request); 4096 4097 pqi_ctrl_unbusy(ctrl_info); 4098 4099 if (timeout_msecs == NO_TIMEOUT) { 4100 pqi_wait_for_completion_io(ctrl_info, &wait); 4101 } else { 4102 if (!wait_for_completion_io_timeout(&wait, 4103 msecs_to_jiffies(timeout_msecs))) { 4104 dev_warn(&ctrl_info->pci_dev->dev, 4105 "command timed out\n"); 4106 rc = -ETIMEDOUT; 4107 } 4108 } 4109 4110 if (error_info) { 4111 if (io_request->error_info) 4112 memcpy(error_info, io_request->error_info, 4113 sizeof(*error_info)); 4114 else 4115 memset(error_info, 0, sizeof(*error_info)); 4116 } else if (rc == 0 && io_request->error_info) { 4117 rc = pqi_process_raid_io_error_synchronous( 4118 io_request->error_info); 4119 } 4120 4121 pqi_free_io_request(io_request); 4122 4123 atomic_dec(&ctrl_info->sync_cmds_outstanding); 4124 out: 4125 up(&ctrl_info->sync_request_sem); 4126 4127 return rc; 4128 } 4129 4130 static int pqi_validate_admin_response( 4131 struct pqi_general_admin_response *response, u8 expected_function_code) 4132 { 4133 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4134 return -EINVAL; 4135 4136 if (get_unaligned_le16(&response->header.iu_length) != 4137 PQI_GENERAL_ADMIN_IU_LENGTH) 4138 return -EINVAL; 4139 4140 if (response->function_code != expected_function_code) 4141 return -EINVAL; 4142 4143 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4144 return -EINVAL; 4145 4146 return 0; 4147 } 4148 4149 static int pqi_submit_admin_request_synchronous( 4150 struct pqi_ctrl_info *ctrl_info, 4151 struct pqi_general_admin_request *request, 4152 struct pqi_general_admin_response *response) 4153 { 4154 int rc; 4155 4156 pqi_submit_admin_request(ctrl_info, request); 4157 4158 rc = pqi_poll_for_admin_response(ctrl_info, response); 4159 4160 if (rc == 0) 4161 rc = pqi_validate_admin_response(response, 4162 request->function_code); 4163 4164 return rc; 4165 } 4166 4167 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4168 { 4169 int rc; 4170 struct pqi_general_admin_request request; 4171 struct pqi_general_admin_response response; 4172 struct pqi_device_capability *capability; 4173 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4174 4175 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4176 if (!capability) 4177 return -ENOMEM; 4178 4179 memset(&request, 0, sizeof(request)); 4180 4181 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4182 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4183 &request.header.iu_length); 4184 request.function_code = 4185 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4186 put_unaligned_le32(sizeof(*capability), 4187 &request.data.report_device_capability.buffer_length); 4188 4189 rc = pqi_map_single(ctrl_info->pci_dev, 4190 &request.data.report_device_capability.sg_descriptor, 4191 capability, sizeof(*capability), 4192 DMA_FROM_DEVICE); 4193 if (rc) 4194 goto out; 4195 4196 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4197 &response); 4198 4199 pqi_pci_unmap(ctrl_info->pci_dev, 4200 &request.data.report_device_capability.sg_descriptor, 1, 4201 DMA_FROM_DEVICE); 4202 4203 if (rc) 4204 goto out; 4205 4206 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4207 rc = -EIO; 4208 goto out; 4209 } 4210 4211 ctrl_info->max_inbound_queues = 4212 get_unaligned_le16(&capability->max_inbound_queues); 4213 ctrl_info->max_elements_per_iq = 4214 get_unaligned_le16(&capability->max_elements_per_iq); 4215 ctrl_info->max_iq_element_length = 4216 get_unaligned_le16(&capability->max_iq_element_length) 4217 * 16; 4218 ctrl_info->max_outbound_queues = 4219 get_unaligned_le16(&capability->max_outbound_queues); 4220 ctrl_info->max_elements_per_oq = 4221 get_unaligned_le16(&capability->max_elements_per_oq); 4222 ctrl_info->max_oq_element_length = 4223 get_unaligned_le16(&capability->max_oq_element_length) 4224 * 16; 4225 4226 sop_iu_layer_descriptor = 4227 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4228 4229 ctrl_info->max_inbound_iu_length_per_firmware = 4230 get_unaligned_le16( 4231 &sop_iu_layer_descriptor->max_inbound_iu_length); 4232 ctrl_info->inbound_spanning_supported = 4233 sop_iu_layer_descriptor->inbound_spanning_supported; 4234 ctrl_info->outbound_spanning_supported = 4235 sop_iu_layer_descriptor->outbound_spanning_supported; 4236 4237 out: 4238 kfree(capability); 4239 4240 return rc; 4241 } 4242 4243 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4244 { 4245 if (ctrl_info->max_iq_element_length < 4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4247 dev_err(&ctrl_info->pci_dev->dev, 4248 "max. inbound queue element length of %d is less than the required length of %d\n", 4249 ctrl_info->max_iq_element_length, 4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4251 return -EINVAL; 4252 } 4253 4254 if (ctrl_info->max_oq_element_length < 4255 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4256 dev_err(&ctrl_info->pci_dev->dev, 4257 "max. outbound queue element length of %d is less than the required length of %d\n", 4258 ctrl_info->max_oq_element_length, 4259 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4260 return -EINVAL; 4261 } 4262 4263 if (ctrl_info->max_inbound_iu_length_per_firmware < 4264 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4265 dev_err(&ctrl_info->pci_dev->dev, 4266 "max. inbound IU length of %u is less than the min. required length of %d\n", 4267 ctrl_info->max_inbound_iu_length_per_firmware, 4268 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4269 return -EINVAL; 4270 } 4271 4272 if (!ctrl_info->inbound_spanning_supported) { 4273 dev_err(&ctrl_info->pci_dev->dev, 4274 "the controller does not support inbound spanning\n"); 4275 return -EINVAL; 4276 } 4277 4278 if (ctrl_info->outbound_spanning_supported) { 4279 dev_err(&ctrl_info->pci_dev->dev, 4280 "the controller supports outbound spanning but this driver does not\n"); 4281 return -EINVAL; 4282 } 4283 4284 return 0; 4285 } 4286 4287 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4288 { 4289 int rc; 4290 struct pqi_event_queue *event_queue; 4291 struct pqi_general_admin_request request; 4292 struct pqi_general_admin_response response; 4293 4294 event_queue = &ctrl_info->event_queue; 4295 4296 /* 4297 * Create OQ (Outbound Queue - device to host queue) to dedicate 4298 * to events. 4299 */ 4300 memset(&request, 0, sizeof(request)); 4301 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4302 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4303 &request.header.iu_length); 4304 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4305 put_unaligned_le16(event_queue->oq_id, 4306 &request.data.create_operational_oq.queue_id); 4307 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4308 &request.data.create_operational_oq.element_array_addr); 4309 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4310 &request.data.create_operational_oq.pi_addr); 4311 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4312 &request.data.create_operational_oq.num_elements); 4313 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4314 &request.data.create_operational_oq.element_length); 4315 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4316 put_unaligned_le16(event_queue->int_msg_num, 4317 &request.data.create_operational_oq.int_msg_num); 4318 4319 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4320 &response); 4321 if (rc) 4322 return rc; 4323 4324 event_queue->oq_ci = ctrl_info->iomem_base + 4325 PQI_DEVICE_REGISTERS_OFFSET + 4326 get_unaligned_le64( 4327 &response.data.create_operational_oq.oq_ci_offset); 4328 4329 return 0; 4330 } 4331 4332 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4333 unsigned int group_number) 4334 { 4335 int rc; 4336 struct pqi_queue_group *queue_group; 4337 struct pqi_general_admin_request request; 4338 struct pqi_general_admin_response response; 4339 4340 queue_group = &ctrl_info->queue_groups[group_number]; 4341 4342 /* 4343 * Create IQ (Inbound Queue - host to device queue) for 4344 * RAID path. 4345 */ 4346 memset(&request, 0, sizeof(request)); 4347 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4348 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4349 &request.header.iu_length); 4350 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4351 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4352 &request.data.create_operational_iq.queue_id); 4353 put_unaligned_le64( 4354 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4355 &request.data.create_operational_iq.element_array_addr); 4356 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4357 &request.data.create_operational_iq.ci_addr); 4358 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4359 &request.data.create_operational_iq.num_elements); 4360 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4361 &request.data.create_operational_iq.element_length); 4362 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4363 4364 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4365 &response); 4366 if (rc) { 4367 dev_err(&ctrl_info->pci_dev->dev, 4368 "error creating inbound RAID queue\n"); 4369 return rc; 4370 } 4371 4372 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4373 PQI_DEVICE_REGISTERS_OFFSET + 4374 get_unaligned_le64( 4375 &response.data.create_operational_iq.iq_pi_offset); 4376 4377 /* 4378 * Create IQ (Inbound Queue - host to device queue) for 4379 * Advanced I/O (AIO) path. 4380 */ 4381 memset(&request, 0, sizeof(request)); 4382 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4383 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4384 &request.header.iu_length); 4385 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4386 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4387 &request.data.create_operational_iq.queue_id); 4388 put_unaligned_le64((u64)queue_group-> 4389 iq_element_array_bus_addr[AIO_PATH], 4390 &request.data.create_operational_iq.element_array_addr); 4391 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4392 &request.data.create_operational_iq.ci_addr); 4393 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4394 &request.data.create_operational_iq.num_elements); 4395 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4396 &request.data.create_operational_iq.element_length); 4397 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4398 4399 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4400 &response); 4401 if (rc) { 4402 dev_err(&ctrl_info->pci_dev->dev, 4403 "error creating inbound AIO queue\n"); 4404 return rc; 4405 } 4406 4407 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4408 PQI_DEVICE_REGISTERS_OFFSET + 4409 get_unaligned_le64( 4410 &response.data.create_operational_iq.iq_pi_offset); 4411 4412 /* 4413 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4414 * assumed to be for RAID path I/O unless we change the queue's 4415 * property. 4416 */ 4417 memset(&request, 0, sizeof(request)); 4418 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4419 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4420 &request.header.iu_length); 4421 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4422 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4423 &request.data.change_operational_iq_properties.queue_id); 4424 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4425 &request.data.change_operational_iq_properties.vendor_specific); 4426 4427 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4428 &response); 4429 if (rc) { 4430 dev_err(&ctrl_info->pci_dev->dev, 4431 "error changing queue property\n"); 4432 return rc; 4433 } 4434 4435 /* 4436 * Create OQ (Outbound Queue - device to host queue). 4437 */ 4438 memset(&request, 0, sizeof(request)); 4439 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4440 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4441 &request.header.iu_length); 4442 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4443 put_unaligned_le16(queue_group->oq_id, 4444 &request.data.create_operational_oq.queue_id); 4445 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4446 &request.data.create_operational_oq.element_array_addr); 4447 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4448 &request.data.create_operational_oq.pi_addr); 4449 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4450 &request.data.create_operational_oq.num_elements); 4451 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4452 &request.data.create_operational_oq.element_length); 4453 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4454 put_unaligned_le16(queue_group->int_msg_num, 4455 &request.data.create_operational_oq.int_msg_num); 4456 4457 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4458 &response); 4459 if (rc) { 4460 dev_err(&ctrl_info->pci_dev->dev, 4461 "error creating outbound queue\n"); 4462 return rc; 4463 } 4464 4465 queue_group->oq_ci = ctrl_info->iomem_base + 4466 PQI_DEVICE_REGISTERS_OFFSET + 4467 get_unaligned_le64( 4468 &response.data.create_operational_oq.oq_ci_offset); 4469 4470 return 0; 4471 } 4472 4473 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4474 { 4475 int rc; 4476 unsigned int i; 4477 4478 rc = pqi_create_event_queue(ctrl_info); 4479 if (rc) { 4480 dev_err(&ctrl_info->pci_dev->dev, 4481 "error creating event queue\n"); 4482 return rc; 4483 } 4484 4485 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4486 rc = pqi_create_queue_group(ctrl_info, i); 4487 if (rc) { 4488 dev_err(&ctrl_info->pci_dev->dev, 4489 "error creating queue group number %u/%u\n", 4490 i, ctrl_info->num_queue_groups); 4491 return rc; 4492 } 4493 } 4494 4495 return 0; 4496 } 4497 4498 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4499 (offsetof(struct pqi_event_config, descriptors) + \ 4500 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4501 4502 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4503 bool enable_events) 4504 { 4505 int rc; 4506 unsigned int i; 4507 struct pqi_event_config *event_config; 4508 struct pqi_event_descriptor *event_descriptor; 4509 struct pqi_general_management_request request; 4510 4511 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4512 GFP_KERNEL); 4513 if (!event_config) 4514 return -ENOMEM; 4515 4516 memset(&request, 0, sizeof(request)); 4517 4518 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4519 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4520 data.report_event_configuration.sg_descriptors[1]) - 4521 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4522 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4523 &request.data.report_event_configuration.buffer_length); 4524 4525 rc = pqi_map_single(ctrl_info->pci_dev, 4526 request.data.report_event_configuration.sg_descriptors, 4527 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4528 DMA_FROM_DEVICE); 4529 if (rc) 4530 goto out; 4531 4532 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4533 0, NULL, NO_TIMEOUT); 4534 4535 pqi_pci_unmap(ctrl_info->pci_dev, 4536 request.data.report_event_configuration.sg_descriptors, 1, 4537 DMA_FROM_DEVICE); 4538 4539 if (rc) 4540 goto out; 4541 4542 for (i = 0; i < event_config->num_event_descriptors; i++) { 4543 event_descriptor = &event_config->descriptors[i]; 4544 if (enable_events && 4545 pqi_is_supported_event(event_descriptor->event_type)) 4546 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4547 &event_descriptor->oq_id); 4548 else 4549 put_unaligned_le16(0, &event_descriptor->oq_id); 4550 } 4551 4552 memset(&request, 0, sizeof(request)); 4553 4554 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4555 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4556 data.report_event_configuration.sg_descriptors[1]) - 4557 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4558 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4559 &request.data.report_event_configuration.buffer_length); 4560 4561 rc = pqi_map_single(ctrl_info->pci_dev, 4562 request.data.report_event_configuration.sg_descriptors, 4563 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4564 DMA_TO_DEVICE); 4565 if (rc) 4566 goto out; 4567 4568 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4569 NULL, NO_TIMEOUT); 4570 4571 pqi_pci_unmap(ctrl_info->pci_dev, 4572 request.data.report_event_configuration.sg_descriptors, 1, 4573 DMA_TO_DEVICE); 4574 4575 out: 4576 kfree(event_config); 4577 4578 return rc; 4579 } 4580 4581 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4582 { 4583 return pqi_configure_events(ctrl_info, true); 4584 } 4585 4586 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4587 { 4588 return pqi_configure_events(ctrl_info, false); 4589 } 4590 4591 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4592 { 4593 unsigned int i; 4594 struct device *dev; 4595 size_t sg_chain_buffer_length; 4596 struct pqi_io_request *io_request; 4597 4598 if (!ctrl_info->io_request_pool) 4599 return; 4600 4601 dev = &ctrl_info->pci_dev->dev; 4602 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4603 io_request = ctrl_info->io_request_pool; 4604 4605 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4606 kfree(io_request->iu); 4607 if (!io_request->sg_chain_buffer) 4608 break; 4609 dma_free_coherent(dev, sg_chain_buffer_length, 4610 io_request->sg_chain_buffer, 4611 io_request->sg_chain_buffer_dma_handle); 4612 io_request++; 4613 } 4614 4615 kfree(ctrl_info->io_request_pool); 4616 ctrl_info->io_request_pool = NULL; 4617 } 4618 4619 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4620 { 4621 4622 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4623 ctrl_info->error_buffer_length, 4624 &ctrl_info->error_buffer_dma_handle, 4625 GFP_KERNEL); 4626 if (!ctrl_info->error_buffer) 4627 return -ENOMEM; 4628 4629 return 0; 4630 } 4631 4632 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4633 { 4634 unsigned int i; 4635 void *sg_chain_buffer; 4636 size_t sg_chain_buffer_length; 4637 dma_addr_t sg_chain_buffer_dma_handle; 4638 struct device *dev; 4639 struct pqi_io_request *io_request; 4640 4641 ctrl_info->io_request_pool = 4642 kcalloc(ctrl_info->max_io_slots, 4643 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4644 4645 if (!ctrl_info->io_request_pool) { 4646 dev_err(&ctrl_info->pci_dev->dev, 4647 "failed to allocate I/O request pool\n"); 4648 goto error; 4649 } 4650 4651 dev = &ctrl_info->pci_dev->dev; 4652 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4653 io_request = ctrl_info->io_request_pool; 4654 4655 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4656 io_request->iu = 4657 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4658 4659 if (!io_request->iu) { 4660 dev_err(&ctrl_info->pci_dev->dev, 4661 "failed to allocate IU buffers\n"); 4662 goto error; 4663 } 4664 4665 sg_chain_buffer = dma_alloc_coherent(dev, 4666 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4667 GFP_KERNEL); 4668 4669 if (!sg_chain_buffer) { 4670 dev_err(&ctrl_info->pci_dev->dev, 4671 "failed to allocate PQI scatter-gather chain buffers\n"); 4672 goto error; 4673 } 4674 4675 io_request->index = i; 4676 io_request->sg_chain_buffer = sg_chain_buffer; 4677 io_request->sg_chain_buffer_dma_handle = 4678 sg_chain_buffer_dma_handle; 4679 io_request++; 4680 } 4681 4682 return 0; 4683 4684 error: 4685 pqi_free_all_io_requests(ctrl_info); 4686 4687 return -ENOMEM; 4688 } 4689 4690 /* 4691 * Calculate required resources that are sized based on max. outstanding 4692 * requests and max. transfer size. 4693 */ 4694 4695 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4696 { 4697 u32 max_transfer_size; 4698 u32 max_sg_entries; 4699 4700 ctrl_info->scsi_ml_can_queue = 4701 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4702 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4703 4704 ctrl_info->error_buffer_length = 4705 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4706 4707 if (reset_devices) 4708 max_transfer_size = min(ctrl_info->max_transfer_size, 4709 PQI_MAX_TRANSFER_SIZE_KDUMP); 4710 else 4711 max_transfer_size = min(ctrl_info->max_transfer_size, 4712 PQI_MAX_TRANSFER_SIZE); 4713 4714 max_sg_entries = max_transfer_size / PAGE_SIZE; 4715 4716 /* +1 to cover when the buffer is not page-aligned. */ 4717 max_sg_entries++; 4718 4719 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4720 4721 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4722 4723 ctrl_info->sg_chain_buffer_length = 4724 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4725 PQI_EXTRA_SGL_MEMORY; 4726 ctrl_info->sg_tablesize = max_sg_entries; 4727 ctrl_info->max_sectors = max_transfer_size / 512; 4728 } 4729 4730 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4731 { 4732 int num_queue_groups; 4733 u16 num_elements_per_iq; 4734 u16 num_elements_per_oq; 4735 4736 if (reset_devices) { 4737 num_queue_groups = 1; 4738 } else { 4739 int num_cpus; 4740 int max_queue_groups; 4741 4742 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4743 ctrl_info->max_outbound_queues - 1); 4744 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4745 4746 num_cpus = num_online_cpus(); 4747 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4748 num_queue_groups = min(num_queue_groups, max_queue_groups); 4749 } 4750 4751 ctrl_info->num_queue_groups = num_queue_groups; 4752 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4753 4754 /* 4755 * Make sure that the max. inbound IU length is an even multiple 4756 * of our inbound element length. 4757 */ 4758 ctrl_info->max_inbound_iu_length = 4759 (ctrl_info->max_inbound_iu_length_per_firmware / 4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4762 4763 num_elements_per_iq = 4764 (ctrl_info->max_inbound_iu_length / 4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4766 4767 /* Add one because one element in each queue is unusable. */ 4768 num_elements_per_iq++; 4769 4770 num_elements_per_iq = min(num_elements_per_iq, 4771 ctrl_info->max_elements_per_iq); 4772 4773 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4774 num_elements_per_oq = min(num_elements_per_oq, 4775 ctrl_info->max_elements_per_oq); 4776 4777 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4778 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4779 4780 ctrl_info->max_sg_per_iu = 4781 ((ctrl_info->max_inbound_iu_length - 4782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4783 sizeof(struct pqi_sg_descriptor)) + 4784 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4785 } 4786 4787 static inline void pqi_set_sg_descriptor( 4788 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4789 { 4790 u64 address = (u64)sg_dma_address(sg); 4791 unsigned int length = sg_dma_len(sg); 4792 4793 put_unaligned_le64(address, &sg_descriptor->address); 4794 put_unaligned_le32(length, &sg_descriptor->length); 4795 put_unaligned_le32(0, &sg_descriptor->flags); 4796 } 4797 4798 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4799 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4800 struct pqi_io_request *io_request) 4801 { 4802 int i; 4803 u16 iu_length; 4804 int sg_count; 4805 bool chained; 4806 unsigned int num_sg_in_iu; 4807 unsigned int max_sg_per_iu; 4808 struct scatterlist *sg; 4809 struct pqi_sg_descriptor *sg_descriptor; 4810 4811 sg_count = scsi_dma_map(scmd); 4812 if (sg_count < 0) 4813 return sg_count; 4814 4815 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4816 PQI_REQUEST_HEADER_LENGTH; 4817 4818 if (sg_count == 0) 4819 goto out; 4820 4821 sg = scsi_sglist(scmd); 4822 sg_descriptor = request->sg_descriptors; 4823 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4824 chained = false; 4825 num_sg_in_iu = 0; 4826 i = 0; 4827 4828 while (1) { 4829 pqi_set_sg_descriptor(sg_descriptor, sg); 4830 if (!chained) 4831 num_sg_in_iu++; 4832 i++; 4833 if (i == sg_count) 4834 break; 4835 sg_descriptor++; 4836 if (i == max_sg_per_iu) { 4837 put_unaligned_le64( 4838 (u64)io_request->sg_chain_buffer_dma_handle, 4839 &sg_descriptor->address); 4840 put_unaligned_le32((sg_count - num_sg_in_iu) 4841 * sizeof(*sg_descriptor), 4842 &sg_descriptor->length); 4843 put_unaligned_le32(CISS_SG_CHAIN, 4844 &sg_descriptor->flags); 4845 chained = true; 4846 num_sg_in_iu++; 4847 sg_descriptor = io_request->sg_chain_buffer; 4848 } 4849 sg = sg_next(sg); 4850 } 4851 4852 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4853 request->partial = chained; 4854 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4855 4856 out: 4857 put_unaligned_le16(iu_length, &request->header.iu_length); 4858 4859 return 0; 4860 } 4861 4862 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4863 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4864 struct pqi_io_request *io_request) 4865 { 4866 int i; 4867 u16 iu_length; 4868 int sg_count; 4869 bool chained; 4870 unsigned int num_sg_in_iu; 4871 unsigned int max_sg_per_iu; 4872 struct scatterlist *sg; 4873 struct pqi_sg_descriptor *sg_descriptor; 4874 4875 sg_count = scsi_dma_map(scmd); 4876 if (sg_count < 0) 4877 return sg_count; 4878 4879 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4880 PQI_REQUEST_HEADER_LENGTH; 4881 num_sg_in_iu = 0; 4882 4883 if (sg_count == 0) 4884 goto out; 4885 4886 sg = scsi_sglist(scmd); 4887 sg_descriptor = request->sg_descriptors; 4888 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4889 chained = false; 4890 i = 0; 4891 4892 while (1) { 4893 pqi_set_sg_descriptor(sg_descriptor, sg); 4894 if (!chained) 4895 num_sg_in_iu++; 4896 i++; 4897 if (i == sg_count) 4898 break; 4899 sg_descriptor++; 4900 if (i == max_sg_per_iu) { 4901 put_unaligned_le64( 4902 (u64)io_request->sg_chain_buffer_dma_handle, 4903 &sg_descriptor->address); 4904 put_unaligned_le32((sg_count - num_sg_in_iu) 4905 * sizeof(*sg_descriptor), 4906 &sg_descriptor->length); 4907 put_unaligned_le32(CISS_SG_CHAIN, 4908 &sg_descriptor->flags); 4909 chained = true; 4910 num_sg_in_iu++; 4911 sg_descriptor = io_request->sg_chain_buffer; 4912 } 4913 sg = sg_next(sg); 4914 } 4915 4916 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4917 request->partial = chained; 4918 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4919 4920 out: 4921 put_unaligned_le16(iu_length, &request->header.iu_length); 4922 request->num_sg_descriptors = num_sg_in_iu; 4923 4924 return 0; 4925 } 4926 4927 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4928 void *context) 4929 { 4930 struct scsi_cmnd *scmd; 4931 4932 scmd = io_request->scmd; 4933 pqi_free_io_request(io_request); 4934 scsi_dma_unmap(scmd); 4935 pqi_scsi_done(scmd); 4936 } 4937 4938 static int pqi_raid_submit_scsi_cmd_with_io_request( 4939 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4940 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4941 struct pqi_queue_group *queue_group) 4942 { 4943 int rc; 4944 size_t cdb_length; 4945 struct pqi_raid_path_request *request; 4946 4947 io_request->io_complete_callback = pqi_raid_io_complete; 4948 io_request->scmd = scmd; 4949 4950 request = io_request->iu; 4951 memset(request, 0, 4952 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4953 4954 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4955 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4956 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4957 put_unaligned_le16(io_request->index, &request->request_id); 4958 request->error_index = request->request_id; 4959 memcpy(request->lun_number, device->scsi3addr, 4960 sizeof(request->lun_number)); 4961 4962 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4963 memcpy(request->cdb, scmd->cmnd, cdb_length); 4964 4965 switch (cdb_length) { 4966 case 6: 4967 case 10: 4968 case 12: 4969 case 16: 4970 /* No bytes in the Additional CDB bytes field */ 4971 request->additional_cdb_bytes_usage = 4972 SOP_ADDITIONAL_CDB_BYTES_0; 4973 break; 4974 case 20: 4975 /* 4 bytes in the Additional cdb field */ 4976 request->additional_cdb_bytes_usage = 4977 SOP_ADDITIONAL_CDB_BYTES_4; 4978 break; 4979 case 24: 4980 /* 8 bytes in the Additional cdb field */ 4981 request->additional_cdb_bytes_usage = 4982 SOP_ADDITIONAL_CDB_BYTES_8; 4983 break; 4984 case 28: 4985 /* 12 bytes in the Additional cdb field */ 4986 request->additional_cdb_bytes_usage = 4987 SOP_ADDITIONAL_CDB_BYTES_12; 4988 break; 4989 case 32: 4990 default: 4991 /* 16 bytes in the Additional cdb field */ 4992 request->additional_cdb_bytes_usage = 4993 SOP_ADDITIONAL_CDB_BYTES_16; 4994 break; 4995 } 4996 4997 switch (scmd->sc_data_direction) { 4998 case DMA_TO_DEVICE: 4999 request->data_direction = SOP_READ_FLAG; 5000 break; 5001 case DMA_FROM_DEVICE: 5002 request->data_direction = SOP_WRITE_FLAG; 5003 break; 5004 case DMA_NONE: 5005 request->data_direction = SOP_NO_DIRECTION_FLAG; 5006 break; 5007 case DMA_BIDIRECTIONAL: 5008 request->data_direction = SOP_BIDIRECTIONAL; 5009 break; 5010 default: 5011 dev_err(&ctrl_info->pci_dev->dev, 5012 "unknown data direction: %d\n", 5013 scmd->sc_data_direction); 5014 break; 5015 } 5016 5017 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5018 if (rc) { 5019 pqi_free_io_request(io_request); 5020 return SCSI_MLQUEUE_HOST_BUSY; 5021 } 5022 5023 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5024 5025 return 0; 5026 } 5027 5028 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5029 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5030 struct pqi_queue_group *queue_group) 5031 { 5032 struct pqi_io_request *io_request; 5033 5034 io_request = pqi_alloc_io_request(ctrl_info); 5035 5036 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5037 device, scmd, queue_group); 5038 } 5039 5040 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 5041 { 5042 if (!pqi_ctrl_blocked(ctrl_info)) 5043 schedule_work(&ctrl_info->raid_bypass_retry_work); 5044 } 5045 5046 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5047 { 5048 struct scsi_cmnd *scmd; 5049 struct pqi_scsi_dev *device; 5050 struct pqi_ctrl_info *ctrl_info; 5051 5052 if (!io_request->raid_bypass) 5053 return false; 5054 5055 scmd = io_request->scmd; 5056 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5057 return false; 5058 if (host_byte(scmd->result) == DID_NO_CONNECT) 5059 return false; 5060 5061 device = scmd->device->hostdata; 5062 if (pqi_device_offline(device)) 5063 return false; 5064 5065 ctrl_info = shost_to_hba(scmd->device->host); 5066 if (pqi_ctrl_offline(ctrl_info)) 5067 return false; 5068 5069 return true; 5070 } 5071 5072 static inline void pqi_add_to_raid_bypass_retry_list( 5073 struct pqi_ctrl_info *ctrl_info, 5074 struct pqi_io_request *io_request, bool at_head) 5075 { 5076 unsigned long flags; 5077 5078 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5079 if (at_head) 5080 list_add(&io_request->request_list_entry, 5081 &ctrl_info->raid_bypass_retry_list); 5082 else 5083 list_add_tail(&io_request->request_list_entry, 5084 &ctrl_info->raid_bypass_retry_list); 5085 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5086 } 5087 5088 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 5089 void *context) 5090 { 5091 struct scsi_cmnd *scmd; 5092 5093 scmd = io_request->scmd; 5094 pqi_free_io_request(io_request); 5095 pqi_scsi_done(scmd); 5096 } 5097 5098 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 5099 { 5100 struct scsi_cmnd *scmd; 5101 struct pqi_ctrl_info *ctrl_info; 5102 5103 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 5104 scmd = io_request->scmd; 5105 scmd->result = 0; 5106 ctrl_info = shost_to_hba(scmd->device->host); 5107 5108 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 5109 pqi_schedule_bypass_retry(ctrl_info); 5110 } 5111 5112 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 5113 { 5114 struct scsi_cmnd *scmd; 5115 struct pqi_scsi_dev *device; 5116 struct pqi_ctrl_info *ctrl_info; 5117 struct pqi_queue_group *queue_group; 5118 5119 scmd = io_request->scmd; 5120 device = scmd->device->hostdata; 5121 if (pqi_device_in_reset(device)) { 5122 pqi_free_io_request(io_request); 5123 set_host_byte(scmd, DID_RESET); 5124 pqi_scsi_done(scmd); 5125 return 0; 5126 } 5127 5128 ctrl_info = shost_to_hba(scmd->device->host); 5129 queue_group = io_request->queue_group; 5130 5131 pqi_reinit_io_request(io_request); 5132 5133 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5134 device, scmd, queue_group); 5135 } 5136 5137 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 5138 struct pqi_ctrl_info *ctrl_info) 5139 { 5140 unsigned long flags; 5141 struct pqi_io_request *io_request; 5142 5143 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5144 io_request = list_first_entry_or_null( 5145 &ctrl_info->raid_bypass_retry_list, 5146 struct pqi_io_request, request_list_entry); 5147 if (io_request) 5148 list_del(&io_request->request_list_entry); 5149 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5150 5151 return io_request; 5152 } 5153 5154 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 5155 { 5156 int rc; 5157 struct pqi_io_request *io_request; 5158 5159 pqi_ctrl_busy(ctrl_info); 5160 5161 while (1) { 5162 if (pqi_ctrl_blocked(ctrl_info)) 5163 break; 5164 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 5165 if (!io_request) 5166 break; 5167 rc = pqi_retry_raid_bypass(io_request); 5168 if (rc) { 5169 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 5170 true); 5171 pqi_schedule_bypass_retry(ctrl_info); 5172 break; 5173 } 5174 } 5175 5176 pqi_ctrl_unbusy(ctrl_info); 5177 } 5178 5179 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 5180 { 5181 struct pqi_ctrl_info *ctrl_info; 5182 5183 ctrl_info = container_of(work, struct pqi_ctrl_info, 5184 raid_bypass_retry_work); 5185 pqi_retry_raid_bypass_requests(ctrl_info); 5186 } 5187 5188 static void pqi_clear_all_queued_raid_bypass_retries( 5189 struct pqi_ctrl_info *ctrl_info) 5190 { 5191 unsigned long flags; 5192 5193 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5194 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 5195 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5196 } 5197 5198 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5199 void *context) 5200 { 5201 struct scsi_cmnd *scmd; 5202 5203 scmd = io_request->scmd; 5204 scsi_dma_unmap(scmd); 5205 if (io_request->status == -EAGAIN) 5206 set_host_byte(scmd, DID_IMM_RETRY); 5207 else if (pqi_raid_bypass_retry_needed(io_request)) { 5208 pqi_queue_raid_bypass_retry(io_request); 5209 return; 5210 } 5211 pqi_free_io_request(io_request); 5212 pqi_scsi_done(scmd); 5213 } 5214 5215 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5216 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5217 struct pqi_queue_group *queue_group) 5218 { 5219 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5220 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 5221 } 5222 5223 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5224 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5225 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5226 struct pqi_encryption_info *encryption_info, bool raid_bypass) 5227 { 5228 int rc; 5229 struct pqi_io_request *io_request; 5230 struct pqi_aio_path_request *request; 5231 5232 io_request = pqi_alloc_io_request(ctrl_info); 5233 io_request->io_complete_callback = pqi_aio_io_complete; 5234 io_request->scmd = scmd; 5235 io_request->raid_bypass = raid_bypass; 5236 5237 request = io_request->iu; 5238 memset(request, 0, 5239 offsetof(struct pqi_raid_path_request, sg_descriptors)); 5240 5241 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5242 put_unaligned_le32(aio_handle, &request->nexus_id); 5243 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5244 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5245 put_unaligned_le16(io_request->index, &request->request_id); 5246 request->error_index = request->request_id; 5247 if (cdb_length > sizeof(request->cdb)) 5248 cdb_length = sizeof(request->cdb); 5249 request->cdb_length = cdb_length; 5250 memcpy(request->cdb, cdb, cdb_length); 5251 5252 switch (scmd->sc_data_direction) { 5253 case DMA_TO_DEVICE: 5254 request->data_direction = SOP_READ_FLAG; 5255 break; 5256 case DMA_FROM_DEVICE: 5257 request->data_direction = SOP_WRITE_FLAG; 5258 break; 5259 case DMA_NONE: 5260 request->data_direction = SOP_NO_DIRECTION_FLAG; 5261 break; 5262 case DMA_BIDIRECTIONAL: 5263 request->data_direction = SOP_BIDIRECTIONAL; 5264 break; 5265 default: 5266 dev_err(&ctrl_info->pci_dev->dev, 5267 "unknown data direction: %d\n", 5268 scmd->sc_data_direction); 5269 break; 5270 } 5271 5272 if (encryption_info) { 5273 request->encryption_enable = true; 5274 put_unaligned_le16(encryption_info->data_encryption_key_index, 5275 &request->data_encryption_key_index); 5276 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5277 &request->encrypt_tweak_lower); 5278 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5279 &request->encrypt_tweak_upper); 5280 } 5281 5282 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5283 if (rc) { 5284 pqi_free_io_request(io_request); 5285 return SCSI_MLQUEUE_HOST_BUSY; 5286 } 5287 5288 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5289 5290 return 0; 5291 } 5292 5293 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5294 struct scsi_cmnd *scmd) 5295 { 5296 u16 hw_queue; 5297 5298 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5299 if (hw_queue > ctrl_info->max_hw_queue_index) 5300 hw_queue = 0; 5301 5302 return hw_queue; 5303 } 5304 5305 /* 5306 * This function gets called just before we hand the completed SCSI request 5307 * back to the SML. 5308 */ 5309 5310 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5311 { 5312 struct pqi_scsi_dev *device; 5313 5314 if (!scmd->device) { 5315 set_host_byte(scmd, DID_NO_CONNECT); 5316 return; 5317 } 5318 5319 device = scmd->device->hostdata; 5320 if (!device) { 5321 set_host_byte(scmd, DID_NO_CONNECT); 5322 return; 5323 } 5324 5325 atomic_dec(&device->scsi_cmds_outstanding); 5326 } 5327 5328 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 5329 struct scsi_cmnd *scmd) 5330 { 5331 int rc; 5332 struct pqi_ctrl_info *ctrl_info; 5333 struct pqi_scsi_dev *device; 5334 u16 hw_queue; 5335 struct pqi_queue_group *queue_group; 5336 bool raid_bypassed; 5337 5338 device = scmd->device->hostdata; 5339 ctrl_info = shost_to_hba(shost); 5340 5341 if (!device) { 5342 set_host_byte(scmd, DID_NO_CONNECT); 5343 pqi_scsi_done(scmd); 5344 return 0; 5345 } 5346 5347 atomic_inc(&device->scsi_cmds_outstanding); 5348 5349 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { 5350 set_host_byte(scmd, DID_NO_CONNECT); 5351 pqi_scsi_done(scmd); 5352 return 0; 5353 } 5354 5355 pqi_ctrl_busy(ctrl_info); 5356 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || 5357 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { 5358 rc = SCSI_MLQUEUE_HOST_BUSY; 5359 goto out; 5360 } 5361 5362 /* 5363 * This is necessary because the SML doesn't zero out this field during 5364 * error recovery. 5365 */ 5366 scmd->result = 0; 5367 5368 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 5369 queue_group = &ctrl_info->queue_groups[hw_queue]; 5370 5371 if (pqi_is_logical_device(device)) { 5372 raid_bypassed = false; 5373 if (device->raid_bypass_enabled && 5374 !blk_rq_is_passthrough(scmd->request)) { 5375 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5376 scmd, queue_group); 5377 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 5378 raid_bypassed = true; 5379 atomic_inc(&device->raid_bypass_cnt); 5380 } 5381 } 5382 if (!raid_bypassed) 5383 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 5384 } else { 5385 if (device->aio_enabled) 5386 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 5387 else 5388 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 5389 } 5390 5391 out: 5392 pqi_ctrl_unbusy(ctrl_info); 5393 if (rc) 5394 atomic_dec(&device->scsi_cmds_outstanding); 5395 5396 return rc; 5397 } 5398 5399 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5400 struct pqi_queue_group *queue_group) 5401 { 5402 unsigned int path; 5403 unsigned long flags; 5404 bool list_is_empty; 5405 5406 for (path = 0; path < 2; path++) { 5407 while (1) { 5408 spin_lock_irqsave( 5409 &queue_group->submit_lock[path], flags); 5410 list_is_empty = 5411 list_empty(&queue_group->request_list[path]); 5412 spin_unlock_irqrestore( 5413 &queue_group->submit_lock[path], flags); 5414 if (list_is_empty) 5415 break; 5416 pqi_check_ctrl_health(ctrl_info); 5417 if (pqi_ctrl_offline(ctrl_info)) 5418 return -ENXIO; 5419 usleep_range(1000, 2000); 5420 } 5421 } 5422 5423 return 0; 5424 } 5425 5426 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5427 { 5428 int rc; 5429 unsigned int i; 5430 unsigned int path; 5431 struct pqi_queue_group *queue_group; 5432 pqi_index_t iq_pi; 5433 pqi_index_t iq_ci; 5434 5435 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5436 queue_group = &ctrl_info->queue_groups[i]; 5437 5438 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5439 if (rc) 5440 return rc; 5441 5442 for (path = 0; path < 2; path++) { 5443 iq_pi = queue_group->iq_pi_copy[path]; 5444 5445 while (1) { 5446 iq_ci = readl(queue_group->iq_ci[path]); 5447 if (iq_ci == iq_pi) 5448 break; 5449 pqi_check_ctrl_health(ctrl_info); 5450 if (pqi_ctrl_offline(ctrl_info)) 5451 return -ENXIO; 5452 usleep_range(1000, 2000); 5453 } 5454 } 5455 } 5456 5457 return 0; 5458 } 5459 5460 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5461 struct pqi_scsi_dev *device) 5462 { 5463 unsigned int i; 5464 unsigned int path; 5465 struct pqi_queue_group *queue_group; 5466 unsigned long flags; 5467 struct pqi_io_request *io_request; 5468 struct pqi_io_request *next; 5469 struct scsi_cmnd *scmd; 5470 struct pqi_scsi_dev *scsi_device; 5471 5472 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5473 queue_group = &ctrl_info->queue_groups[i]; 5474 5475 for (path = 0; path < 2; path++) { 5476 spin_lock_irqsave( 5477 &queue_group->submit_lock[path], flags); 5478 5479 list_for_each_entry_safe(io_request, next, 5480 &queue_group->request_list[path], 5481 request_list_entry) { 5482 scmd = io_request->scmd; 5483 if (!scmd) 5484 continue; 5485 5486 scsi_device = scmd->device->hostdata; 5487 if (scsi_device != device) 5488 continue; 5489 5490 list_del(&io_request->request_list_entry); 5491 set_host_byte(scmd, DID_RESET); 5492 pqi_scsi_done(scmd); 5493 } 5494 5495 spin_unlock_irqrestore( 5496 &queue_group->submit_lock[path], flags); 5497 } 5498 } 5499 } 5500 5501 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) 5502 { 5503 unsigned int i; 5504 unsigned int path; 5505 struct pqi_queue_group *queue_group; 5506 unsigned long flags; 5507 struct pqi_io_request *io_request; 5508 struct pqi_io_request *next; 5509 struct scsi_cmnd *scmd; 5510 5511 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5512 queue_group = &ctrl_info->queue_groups[i]; 5513 5514 for (path = 0; path < 2; path++) { 5515 spin_lock_irqsave(&queue_group->submit_lock[path], 5516 flags); 5517 5518 list_for_each_entry_safe(io_request, next, 5519 &queue_group->request_list[path], 5520 request_list_entry) { 5521 5522 scmd = io_request->scmd; 5523 if (!scmd) 5524 continue; 5525 5526 list_del(&io_request->request_list_entry); 5527 set_host_byte(scmd, DID_RESET); 5528 pqi_scsi_done(scmd); 5529 } 5530 5531 spin_unlock_irqrestore( 5532 &queue_group->submit_lock[path], flags); 5533 } 5534 } 5535 } 5536 5537 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5538 struct pqi_scsi_dev *device, unsigned long timeout_secs) 5539 { 5540 unsigned long timeout; 5541 5542 timeout = (timeout_secs * PQI_HZ) + jiffies; 5543 5544 while (atomic_read(&device->scsi_cmds_outstanding)) { 5545 pqi_check_ctrl_health(ctrl_info); 5546 if (pqi_ctrl_offline(ctrl_info)) 5547 return -ENXIO; 5548 if (timeout_secs != NO_TIMEOUT) { 5549 if (time_after(jiffies, timeout)) { 5550 dev_err(&ctrl_info->pci_dev->dev, 5551 "timed out waiting for pending IO\n"); 5552 return -ETIMEDOUT; 5553 } 5554 } 5555 usleep_range(1000, 2000); 5556 } 5557 5558 return 0; 5559 } 5560 5561 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5562 unsigned long timeout_secs) 5563 { 5564 bool io_pending; 5565 unsigned long flags; 5566 unsigned long timeout; 5567 struct pqi_scsi_dev *device; 5568 5569 timeout = (timeout_secs * PQI_HZ) + jiffies; 5570 while (1) { 5571 io_pending = false; 5572 5573 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5574 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5575 scsi_device_list_entry) { 5576 if (atomic_read(&device->scsi_cmds_outstanding)) { 5577 io_pending = true; 5578 break; 5579 } 5580 } 5581 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5582 flags); 5583 5584 if (!io_pending) 5585 break; 5586 5587 pqi_check_ctrl_health(ctrl_info); 5588 if (pqi_ctrl_offline(ctrl_info)) 5589 return -ENXIO; 5590 5591 if (timeout_secs != NO_TIMEOUT) { 5592 if (time_after(jiffies, timeout)) { 5593 dev_err(&ctrl_info->pci_dev->dev, 5594 "timed out waiting for pending IO\n"); 5595 return -ETIMEDOUT; 5596 } 5597 } 5598 usleep_range(1000, 2000); 5599 } 5600 5601 return 0; 5602 } 5603 5604 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) 5605 { 5606 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { 5607 pqi_check_ctrl_health(ctrl_info); 5608 if (pqi_ctrl_offline(ctrl_info)) 5609 return -ENXIO; 5610 usleep_range(1000, 2000); 5611 } 5612 5613 return 0; 5614 } 5615 5616 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5617 void *context) 5618 { 5619 struct completion *waiting = context; 5620 5621 complete(waiting); 5622 } 5623 5624 #define PQI_LUN_RESET_TIMEOUT_SECS 30 5625 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 5626 5627 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5628 struct pqi_scsi_dev *device, struct completion *wait) 5629 { 5630 int rc; 5631 5632 while (1) { 5633 if (wait_for_completion_io_timeout(wait, 5634 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { 5635 rc = 0; 5636 break; 5637 } 5638 5639 pqi_check_ctrl_health(ctrl_info); 5640 if (pqi_ctrl_offline(ctrl_info)) { 5641 rc = -ENXIO; 5642 break; 5643 } 5644 } 5645 5646 return rc; 5647 } 5648 5649 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5650 struct pqi_scsi_dev *device) 5651 { 5652 int rc; 5653 struct pqi_io_request *io_request; 5654 DECLARE_COMPLETION_ONSTACK(wait); 5655 struct pqi_task_management_request *request; 5656 5657 io_request = pqi_alloc_io_request(ctrl_info); 5658 io_request->io_complete_callback = pqi_lun_reset_complete; 5659 io_request->context = &wait; 5660 5661 request = io_request->iu; 5662 memset(request, 0, sizeof(*request)); 5663 5664 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5665 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5666 &request->header.iu_length); 5667 put_unaligned_le16(io_request->index, &request->request_id); 5668 memcpy(request->lun_number, device->scsi3addr, 5669 sizeof(request->lun_number)); 5670 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5671 if (ctrl_info->tmf_iu_timeout_supported) 5672 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS, 5673 &request->timeout); 5674 5675 pqi_start_io(ctrl_info, 5676 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5677 io_request); 5678 5679 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5680 if (rc == 0) 5681 rc = io_request->status; 5682 5683 pqi_free_io_request(io_request); 5684 5685 return rc; 5686 } 5687 5688 /* Performs a reset at the LUN level. */ 5689 5690 #define PQI_LUN_RESET_RETRIES 3 5691 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5692 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120 5693 5694 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5695 struct pqi_scsi_dev *device) 5696 { 5697 int rc; 5698 unsigned int retries; 5699 unsigned long timeout_secs; 5700 5701 for (retries = 0;;) { 5702 rc = pqi_lun_reset(ctrl_info, device); 5703 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) 5704 break; 5705 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5706 } 5707 5708 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT; 5709 5710 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); 5711 5712 return rc == 0 ? SUCCESS : FAILED; 5713 } 5714 5715 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5716 struct pqi_scsi_dev *device) 5717 { 5718 int rc; 5719 5720 mutex_lock(&ctrl_info->lun_reset_mutex); 5721 5722 pqi_ctrl_block_requests(ctrl_info); 5723 pqi_ctrl_wait_until_quiesced(ctrl_info); 5724 pqi_fail_io_queued_for_device(ctrl_info, device); 5725 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5726 pqi_device_reset_start(device); 5727 pqi_ctrl_unblock_requests(ctrl_info); 5728 5729 if (rc) 5730 rc = FAILED; 5731 else 5732 rc = _pqi_device_reset(ctrl_info, device); 5733 5734 pqi_device_reset_done(device); 5735 5736 mutex_unlock(&ctrl_info->lun_reset_mutex); 5737 5738 return rc; 5739 } 5740 5741 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5742 { 5743 int rc; 5744 struct Scsi_Host *shost; 5745 struct pqi_ctrl_info *ctrl_info; 5746 struct pqi_scsi_dev *device; 5747 5748 shost = scmd->device->host; 5749 ctrl_info = shost_to_hba(shost); 5750 device = scmd->device->hostdata; 5751 5752 dev_err(&ctrl_info->pci_dev->dev, 5753 "resetting scsi %d:%d:%d:%d\n", 5754 shost->host_no, device->bus, device->target, device->lun); 5755 5756 pqi_check_ctrl_health(ctrl_info); 5757 if (pqi_ctrl_offline(ctrl_info) || 5758 pqi_device_reset_blocked(ctrl_info)) { 5759 rc = FAILED; 5760 goto out; 5761 } 5762 5763 pqi_wait_until_ofa_finished(ctrl_info); 5764 5765 atomic_inc(&ctrl_info->sync_cmds_outstanding); 5766 rc = pqi_device_reset(ctrl_info, device); 5767 atomic_dec(&ctrl_info->sync_cmds_outstanding); 5768 5769 out: 5770 dev_err(&ctrl_info->pci_dev->dev, 5771 "reset of scsi %d:%d:%d:%d: %s\n", 5772 shost->host_no, device->bus, device->target, device->lun, 5773 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5774 5775 return rc; 5776 } 5777 5778 static int pqi_slave_alloc(struct scsi_device *sdev) 5779 { 5780 struct pqi_scsi_dev *device; 5781 unsigned long flags; 5782 struct pqi_ctrl_info *ctrl_info; 5783 struct scsi_target *starget; 5784 struct sas_rphy *rphy; 5785 5786 ctrl_info = shost_to_hba(sdev->host); 5787 5788 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5789 5790 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5791 starget = scsi_target(sdev); 5792 rphy = target_to_rphy(starget); 5793 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5794 if (device) { 5795 device->target = sdev_id(sdev); 5796 device->lun = sdev->lun; 5797 device->target_lun_valid = true; 5798 } 5799 } else { 5800 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5801 sdev_id(sdev), sdev->lun); 5802 } 5803 5804 if (device) { 5805 sdev->hostdata = device; 5806 device->sdev = sdev; 5807 if (device->queue_depth) { 5808 device->advertised_queue_depth = device->queue_depth; 5809 scsi_change_queue_depth(sdev, 5810 device->advertised_queue_depth); 5811 } 5812 if (pqi_is_logical_device(device)) 5813 pqi_disable_write_same(sdev); 5814 else 5815 sdev->allow_restart = 1; 5816 } 5817 5818 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5819 5820 return 0; 5821 } 5822 5823 static int pqi_map_queues(struct Scsi_Host *shost) 5824 { 5825 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5826 5827 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 5828 ctrl_info->pci_dev, 0); 5829 } 5830 5831 static int pqi_slave_configure(struct scsi_device *sdev) 5832 { 5833 struct pqi_scsi_dev *device; 5834 5835 device = sdev->hostdata; 5836 device->devtype = sdev->type; 5837 5838 return 0; 5839 } 5840 5841 static void pqi_slave_destroy(struct scsi_device *sdev) 5842 { 5843 unsigned long flags; 5844 struct pqi_scsi_dev *device; 5845 struct pqi_ctrl_info *ctrl_info; 5846 5847 ctrl_info = shost_to_hba(sdev->host); 5848 5849 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5850 5851 device = sdev->hostdata; 5852 if (device) { 5853 sdev->hostdata = NULL; 5854 if (!list_empty(&device->scsi_device_list_entry)) 5855 list_del(&device->scsi_device_list_entry); 5856 } 5857 5858 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5859 5860 if (device) { 5861 pqi_dev_info(ctrl_info, "removed", device); 5862 pqi_free_device(device); 5863 } 5864 } 5865 5866 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5867 { 5868 struct pci_dev *pci_dev; 5869 u32 subsystem_vendor; 5870 u32 subsystem_device; 5871 cciss_pci_info_struct pciinfo; 5872 5873 if (!arg) 5874 return -EINVAL; 5875 5876 pci_dev = ctrl_info->pci_dev; 5877 5878 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5879 pciinfo.bus = pci_dev->bus->number; 5880 pciinfo.dev_fn = pci_dev->devfn; 5881 subsystem_vendor = pci_dev->subsystem_vendor; 5882 subsystem_device = pci_dev->subsystem_device; 5883 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 5884 5885 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5886 return -EFAULT; 5887 5888 return 0; 5889 } 5890 5891 static int pqi_getdrivver_ioctl(void __user *arg) 5892 { 5893 u32 version; 5894 5895 if (!arg) 5896 return -EINVAL; 5897 5898 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5899 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5900 5901 if (copy_to_user(arg, &version, sizeof(version))) 5902 return -EFAULT; 5903 5904 return 0; 5905 } 5906 5907 struct ciss_error_info { 5908 u8 scsi_status; 5909 int command_status; 5910 size_t sense_data_length; 5911 }; 5912 5913 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5914 struct ciss_error_info *ciss_error_info) 5915 { 5916 int ciss_cmd_status; 5917 size_t sense_data_length; 5918 5919 switch (pqi_error_info->data_out_result) { 5920 case PQI_DATA_IN_OUT_GOOD: 5921 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5922 break; 5923 case PQI_DATA_IN_OUT_UNDERFLOW: 5924 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5925 break; 5926 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5927 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5928 break; 5929 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5930 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5932 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5933 case PQI_DATA_IN_OUT_ERROR: 5934 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5935 break; 5936 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5937 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5938 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5939 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5940 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5941 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5942 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5943 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5944 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5945 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5946 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5947 break; 5948 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5949 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5950 break; 5951 case PQI_DATA_IN_OUT_ABORTED: 5952 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5953 break; 5954 case PQI_DATA_IN_OUT_TIMEOUT: 5955 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5956 break; 5957 default: 5958 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5959 break; 5960 } 5961 5962 sense_data_length = 5963 get_unaligned_le16(&pqi_error_info->sense_data_length); 5964 if (sense_data_length == 0) 5965 sense_data_length = 5966 get_unaligned_le16(&pqi_error_info->response_data_length); 5967 if (sense_data_length) 5968 if (sense_data_length > sizeof(pqi_error_info->data)) 5969 sense_data_length = sizeof(pqi_error_info->data); 5970 5971 ciss_error_info->scsi_status = pqi_error_info->status; 5972 ciss_error_info->command_status = ciss_cmd_status; 5973 ciss_error_info->sense_data_length = sense_data_length; 5974 } 5975 5976 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5977 { 5978 int rc; 5979 char *kernel_buffer = NULL; 5980 u16 iu_length; 5981 size_t sense_data_length; 5982 IOCTL_Command_struct iocommand; 5983 struct pqi_raid_path_request request; 5984 struct pqi_raid_error_info pqi_error_info; 5985 struct ciss_error_info ciss_error_info; 5986 5987 if (pqi_ctrl_offline(ctrl_info)) 5988 return -ENXIO; 5989 if (!arg) 5990 return -EINVAL; 5991 if (!capable(CAP_SYS_RAWIO)) 5992 return -EPERM; 5993 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5994 return -EFAULT; 5995 if (iocommand.buf_size < 1 && 5996 iocommand.Request.Type.Direction != XFER_NONE) 5997 return -EINVAL; 5998 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5999 return -EINVAL; 6000 if (iocommand.Request.Type.Type != TYPE_CMD) 6001 return -EINVAL; 6002 6003 switch (iocommand.Request.Type.Direction) { 6004 case XFER_NONE: 6005 case XFER_WRITE: 6006 case XFER_READ: 6007 case XFER_READ | XFER_WRITE: 6008 break; 6009 default: 6010 return -EINVAL; 6011 } 6012 6013 if (iocommand.buf_size > 0) { 6014 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 6015 if (!kernel_buffer) 6016 return -ENOMEM; 6017 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6018 if (copy_from_user(kernel_buffer, iocommand.buf, 6019 iocommand.buf_size)) { 6020 rc = -EFAULT; 6021 goto out; 6022 } 6023 } else { 6024 memset(kernel_buffer, 0, iocommand.buf_size); 6025 } 6026 } 6027 6028 memset(&request, 0, sizeof(request)); 6029 6030 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6031 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6032 PQI_REQUEST_HEADER_LENGTH; 6033 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6034 sizeof(request.lun_number)); 6035 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6036 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6037 6038 switch (iocommand.Request.Type.Direction) { 6039 case XFER_NONE: 6040 request.data_direction = SOP_NO_DIRECTION_FLAG; 6041 break; 6042 case XFER_WRITE: 6043 request.data_direction = SOP_WRITE_FLAG; 6044 break; 6045 case XFER_READ: 6046 request.data_direction = SOP_READ_FLAG; 6047 break; 6048 case XFER_READ | XFER_WRITE: 6049 request.data_direction = SOP_BIDIRECTIONAL; 6050 break; 6051 } 6052 6053 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6054 6055 if (iocommand.buf_size > 0) { 6056 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6057 6058 rc = pqi_map_single(ctrl_info->pci_dev, 6059 &request.sg_descriptors[0], kernel_buffer, 6060 iocommand.buf_size, DMA_BIDIRECTIONAL); 6061 if (rc) 6062 goto out; 6063 6064 iu_length += sizeof(request.sg_descriptors[0]); 6065 } 6066 6067 put_unaligned_le16(iu_length, &request.header.iu_length); 6068 6069 if (ctrl_info->raid_iu_timeout_supported) 6070 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6071 6072 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6073 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 6074 6075 if (iocommand.buf_size > 0) 6076 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6077 DMA_BIDIRECTIONAL); 6078 6079 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6080 6081 if (rc == 0) { 6082 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6083 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6084 iocommand.error_info.CommandStatus = 6085 ciss_error_info.command_status; 6086 sense_data_length = ciss_error_info.sense_data_length; 6087 if (sense_data_length) { 6088 if (sense_data_length > 6089 sizeof(iocommand.error_info.SenseInfo)) 6090 sense_data_length = 6091 sizeof(iocommand.error_info.SenseInfo); 6092 memcpy(iocommand.error_info.SenseInfo, 6093 pqi_error_info.data, sense_data_length); 6094 iocommand.error_info.SenseLen = sense_data_length; 6095 } 6096 } 6097 6098 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6099 rc = -EFAULT; 6100 goto out; 6101 } 6102 6103 if (rc == 0 && iocommand.buf_size > 0 && 6104 (iocommand.Request.Type.Direction & XFER_READ)) { 6105 if (copy_to_user(iocommand.buf, kernel_buffer, 6106 iocommand.buf_size)) { 6107 rc = -EFAULT; 6108 } 6109 } 6110 6111 out: 6112 kfree(kernel_buffer); 6113 6114 return rc; 6115 } 6116 6117 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6118 void __user *arg) 6119 { 6120 int rc; 6121 struct pqi_ctrl_info *ctrl_info; 6122 6123 ctrl_info = shost_to_hba(sdev->host); 6124 6125 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) 6126 return -EBUSY; 6127 6128 switch (cmd) { 6129 case CCISS_DEREGDISK: 6130 case CCISS_REGNEWDISK: 6131 case CCISS_REGNEWD: 6132 rc = pqi_scan_scsi_devices(ctrl_info); 6133 break; 6134 case CCISS_GETPCIINFO: 6135 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6136 break; 6137 case CCISS_GETDRIVVER: 6138 rc = pqi_getdrivver_ioctl(arg); 6139 break; 6140 case CCISS_PASSTHRU: 6141 rc = pqi_passthru_ioctl(ctrl_info, arg); 6142 break; 6143 default: 6144 rc = -EINVAL; 6145 break; 6146 } 6147 6148 return rc; 6149 } 6150 6151 static ssize_t pqi_firmware_version_show(struct device *dev, 6152 struct device_attribute *attr, char *buffer) 6153 { 6154 struct Scsi_Host *shost; 6155 struct pqi_ctrl_info *ctrl_info; 6156 6157 shost = class_to_shost(dev); 6158 ctrl_info = shost_to_hba(shost); 6159 6160 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6161 } 6162 6163 static ssize_t pqi_driver_version_show(struct device *dev, 6164 struct device_attribute *attr, char *buffer) 6165 { 6166 return snprintf(buffer, PAGE_SIZE, "%s\n", 6167 DRIVER_VERSION BUILD_TIMESTAMP); 6168 } 6169 6170 static ssize_t pqi_serial_number_show(struct device *dev, 6171 struct device_attribute *attr, char *buffer) 6172 { 6173 struct Scsi_Host *shost; 6174 struct pqi_ctrl_info *ctrl_info; 6175 6176 shost = class_to_shost(dev); 6177 ctrl_info = shost_to_hba(shost); 6178 6179 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6180 } 6181 6182 static ssize_t pqi_model_show(struct device *dev, 6183 struct device_attribute *attr, char *buffer) 6184 { 6185 struct Scsi_Host *shost; 6186 struct pqi_ctrl_info *ctrl_info; 6187 6188 shost = class_to_shost(dev); 6189 ctrl_info = shost_to_hba(shost); 6190 6191 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6192 } 6193 6194 static ssize_t pqi_vendor_show(struct device *dev, 6195 struct device_attribute *attr, char *buffer) 6196 { 6197 struct Scsi_Host *shost; 6198 struct pqi_ctrl_info *ctrl_info; 6199 6200 shost = class_to_shost(dev); 6201 ctrl_info = shost_to_hba(shost); 6202 6203 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6204 } 6205 6206 static ssize_t pqi_host_rescan_store(struct device *dev, 6207 struct device_attribute *attr, const char *buffer, size_t count) 6208 { 6209 struct Scsi_Host *shost = class_to_shost(dev); 6210 6211 pqi_scan_start(shost); 6212 6213 return count; 6214 } 6215 6216 static ssize_t pqi_lockup_action_show(struct device *dev, 6217 struct device_attribute *attr, char *buffer) 6218 { 6219 int count = 0; 6220 unsigned int i; 6221 6222 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6223 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6224 count += scnprintf(buffer + count, PAGE_SIZE - count, 6225 "[%s] ", pqi_lockup_actions[i].name); 6226 else 6227 count += scnprintf(buffer + count, PAGE_SIZE - count, 6228 "%s ", pqi_lockup_actions[i].name); 6229 } 6230 6231 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6232 6233 return count; 6234 } 6235 6236 static ssize_t pqi_lockup_action_store(struct device *dev, 6237 struct device_attribute *attr, const char *buffer, size_t count) 6238 { 6239 unsigned int i; 6240 char *action_name; 6241 char action_name_buffer[32]; 6242 6243 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6244 action_name = strstrip(action_name_buffer); 6245 6246 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6247 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6248 pqi_lockup_action = pqi_lockup_actions[i].action; 6249 return count; 6250 } 6251 } 6252 6253 return -EINVAL; 6254 } 6255 6256 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); 6257 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 6258 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 6259 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 6260 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 6261 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6262 static DEVICE_ATTR(lockup_action, 0644, 6263 pqi_lockup_action_show, pqi_lockup_action_store); 6264 6265 static struct device_attribute *pqi_shost_attrs[] = { 6266 &dev_attr_driver_version, 6267 &dev_attr_firmware_version, 6268 &dev_attr_model, 6269 &dev_attr_serial_number, 6270 &dev_attr_vendor, 6271 &dev_attr_rescan, 6272 &dev_attr_lockup_action, 6273 NULL 6274 }; 6275 6276 static ssize_t pqi_unique_id_show(struct device *dev, 6277 struct device_attribute *attr, char *buffer) 6278 { 6279 struct pqi_ctrl_info *ctrl_info; 6280 struct scsi_device *sdev; 6281 struct pqi_scsi_dev *device; 6282 unsigned long flags; 6283 u8 unique_id[16]; 6284 6285 sdev = to_scsi_device(dev); 6286 ctrl_info = shost_to_hba(sdev->host); 6287 6288 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6289 6290 device = sdev->hostdata; 6291 if (!device) { 6292 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6293 return -ENODEV; 6294 } 6295 6296 if (device->is_physical_device) { 6297 memset(unique_id, 0, 8); 6298 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); 6299 } else { 6300 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 6301 } 6302 6303 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6304 6305 return snprintf(buffer, PAGE_SIZE, 6306 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", 6307 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 6308 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 6309 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 6310 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 6311 } 6312 6313 static ssize_t pqi_lunid_show(struct device *dev, 6314 struct device_attribute *attr, char *buffer) 6315 { 6316 struct pqi_ctrl_info *ctrl_info; 6317 struct scsi_device *sdev; 6318 struct pqi_scsi_dev *device; 6319 unsigned long flags; 6320 u8 lunid[8]; 6321 6322 sdev = to_scsi_device(dev); 6323 ctrl_info = shost_to_hba(sdev->host); 6324 6325 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6326 6327 device = sdev->hostdata; 6328 if (!device) { 6329 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6330 return -ENODEV; 6331 } 6332 6333 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 6334 6335 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6336 6337 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 6338 } 6339 6340 #define MAX_PATHS 8 6341 6342 static ssize_t pqi_path_info_show(struct device *dev, 6343 struct device_attribute *attr, char *buf) 6344 { 6345 struct pqi_ctrl_info *ctrl_info; 6346 struct scsi_device *sdev; 6347 struct pqi_scsi_dev *device; 6348 unsigned long flags; 6349 int i; 6350 int output_len = 0; 6351 u8 box; 6352 u8 bay; 6353 u8 path_map_index; 6354 char *active; 6355 u8 phys_connector[2]; 6356 6357 sdev = to_scsi_device(dev); 6358 ctrl_info = shost_to_hba(sdev->host); 6359 6360 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6361 6362 device = sdev->hostdata; 6363 if (!device) { 6364 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6365 return -ENODEV; 6366 } 6367 6368 bay = device->bay; 6369 for (i = 0; i < MAX_PATHS; i++) { 6370 path_map_index = 1 << i; 6371 if (i == device->active_path_index) 6372 active = "Active"; 6373 else if (device->path_map & path_map_index) 6374 active = "Inactive"; 6375 else 6376 continue; 6377 6378 output_len += scnprintf(buf + output_len, 6379 PAGE_SIZE - output_len, 6380 "[%d:%d:%d:%d] %20.20s ", 6381 ctrl_info->scsi_host->host_no, 6382 device->bus, device->target, 6383 device->lun, 6384 scsi_device_type(device->devtype)); 6385 6386 if (device->devtype == TYPE_RAID || 6387 pqi_is_logical_device(device)) 6388 goto end_buffer; 6389 6390 memcpy(&phys_connector, &device->phys_connector[i], 6391 sizeof(phys_connector)); 6392 if (phys_connector[0] < '0') 6393 phys_connector[0] = '0'; 6394 if (phys_connector[1] < '0') 6395 phys_connector[1] = '0'; 6396 6397 output_len += scnprintf(buf + output_len, 6398 PAGE_SIZE - output_len, 6399 "PORT: %.2s ", phys_connector); 6400 6401 box = device->box[i]; 6402 if (box != 0 && box != 0xFF) 6403 output_len += scnprintf(buf + output_len, 6404 PAGE_SIZE - output_len, 6405 "BOX: %hhu ", box); 6406 6407 if ((device->devtype == TYPE_DISK || 6408 device->devtype == TYPE_ZBC) && 6409 pqi_expose_device(device)) 6410 output_len += scnprintf(buf + output_len, 6411 PAGE_SIZE - output_len, 6412 "BAY: %hhu ", bay); 6413 6414 end_buffer: 6415 output_len += scnprintf(buf + output_len, 6416 PAGE_SIZE - output_len, 6417 "%s\n", active); 6418 } 6419 6420 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6421 6422 return output_len; 6423 } 6424 6425 static ssize_t pqi_sas_address_show(struct device *dev, 6426 struct device_attribute *attr, char *buffer) 6427 { 6428 struct pqi_ctrl_info *ctrl_info; 6429 struct scsi_device *sdev; 6430 struct pqi_scsi_dev *device; 6431 unsigned long flags; 6432 u64 sas_address; 6433 6434 sdev = to_scsi_device(dev); 6435 ctrl_info = shost_to_hba(sdev->host); 6436 6437 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6438 6439 device = sdev->hostdata; 6440 if (!device || !pqi_is_device_with_sas_address(device)) { 6441 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6442 return -ENODEV; 6443 } 6444 6445 sas_address = device->sas_address; 6446 6447 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6448 6449 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 6450 } 6451 6452 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 6453 struct device_attribute *attr, char *buffer) 6454 { 6455 struct pqi_ctrl_info *ctrl_info; 6456 struct scsi_device *sdev; 6457 struct pqi_scsi_dev *device; 6458 unsigned long flags; 6459 6460 sdev = to_scsi_device(dev); 6461 ctrl_info = shost_to_hba(sdev->host); 6462 6463 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6464 6465 device = sdev->hostdata; 6466 if (!device) { 6467 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6468 return -ENODEV; 6469 } 6470 6471 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 6472 buffer[1] = '\n'; 6473 buffer[2] = '\0'; 6474 6475 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6476 6477 return 2; 6478 } 6479 6480 static ssize_t pqi_raid_level_show(struct device *dev, 6481 struct device_attribute *attr, char *buffer) 6482 { 6483 struct pqi_ctrl_info *ctrl_info; 6484 struct scsi_device *sdev; 6485 struct pqi_scsi_dev *device; 6486 unsigned long flags; 6487 char *raid_level; 6488 6489 sdev = to_scsi_device(dev); 6490 ctrl_info = shost_to_hba(sdev->host); 6491 6492 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6493 6494 device = sdev->hostdata; 6495 if (!device) { 6496 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6497 return -ENODEV; 6498 } 6499 6500 if (pqi_is_logical_device(device)) 6501 raid_level = pqi_raid_level_to_string(device->raid_level); 6502 else 6503 raid_level = "N/A"; 6504 6505 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6506 6507 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 6508 } 6509 6510 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 6511 struct device_attribute *attr, char *buffer) 6512 { 6513 struct pqi_ctrl_info *ctrl_info; 6514 struct scsi_device *sdev; 6515 struct pqi_scsi_dev *device; 6516 unsigned long flags; 6517 int raid_bypass_cnt; 6518 6519 sdev = to_scsi_device(dev); 6520 ctrl_info = shost_to_hba(sdev->host); 6521 6522 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6523 6524 device = sdev->hostdata; 6525 if (!device) { 6526 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6527 return -ENODEV; 6528 } 6529 6530 raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); 6531 6532 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6533 6534 return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); 6535 } 6536 6537 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 6538 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 6539 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 6540 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 6541 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 6542 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 6543 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 6544 6545 static struct device_attribute *pqi_sdev_attrs[] = { 6546 &dev_attr_lunid, 6547 &dev_attr_unique_id, 6548 &dev_attr_path_info, 6549 &dev_attr_sas_address, 6550 &dev_attr_ssd_smart_path_enabled, 6551 &dev_attr_raid_level, 6552 &dev_attr_raid_bypass_cnt, 6553 NULL 6554 }; 6555 6556 static struct scsi_host_template pqi_driver_template = { 6557 .module = THIS_MODULE, 6558 .name = DRIVER_NAME_SHORT, 6559 .proc_name = DRIVER_NAME_SHORT, 6560 .queuecommand = pqi_scsi_queue_command, 6561 .scan_start = pqi_scan_start, 6562 .scan_finished = pqi_scan_finished, 6563 .this_id = -1, 6564 .eh_device_reset_handler = pqi_eh_device_reset_handler, 6565 .ioctl = pqi_ioctl, 6566 .slave_alloc = pqi_slave_alloc, 6567 .slave_configure = pqi_slave_configure, 6568 .slave_destroy = pqi_slave_destroy, 6569 .map_queues = pqi_map_queues, 6570 .sdev_attrs = pqi_sdev_attrs, 6571 .shost_attrs = pqi_shost_attrs, 6572 }; 6573 6574 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 6575 { 6576 int rc; 6577 struct Scsi_Host *shost; 6578 6579 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 6580 if (!shost) { 6581 dev_err(&ctrl_info->pci_dev->dev, 6582 "scsi_host_alloc failed for controller %u\n", 6583 ctrl_info->ctrl_id); 6584 return -ENOMEM; 6585 } 6586 6587 shost->io_port = 0; 6588 shost->n_io_port = 0; 6589 shost->this_id = -1; 6590 shost->max_channel = PQI_MAX_BUS; 6591 shost->max_cmd_len = MAX_COMMAND_SIZE; 6592 shost->max_lun = ~0; 6593 shost->max_id = ~0; 6594 shost->max_sectors = ctrl_info->max_sectors; 6595 shost->can_queue = ctrl_info->scsi_ml_can_queue; 6596 shost->cmd_per_lun = shost->can_queue; 6597 shost->sg_tablesize = ctrl_info->sg_tablesize; 6598 shost->transportt = pqi_sas_transport_template; 6599 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 6600 shost->unique_id = shost->irq; 6601 shost->nr_hw_queues = ctrl_info->num_queue_groups; 6602 shost->hostdata[0] = (unsigned long)ctrl_info; 6603 6604 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 6605 if (rc) { 6606 dev_err(&ctrl_info->pci_dev->dev, 6607 "scsi_add_host failed for controller %u\n", 6608 ctrl_info->ctrl_id); 6609 goto free_host; 6610 } 6611 6612 rc = pqi_add_sas_host(shost, ctrl_info); 6613 if (rc) { 6614 dev_err(&ctrl_info->pci_dev->dev, 6615 "add SAS host failed for controller %u\n", 6616 ctrl_info->ctrl_id); 6617 goto remove_host; 6618 } 6619 6620 ctrl_info->scsi_host = shost; 6621 6622 return 0; 6623 6624 remove_host: 6625 scsi_remove_host(shost); 6626 free_host: 6627 scsi_host_put(shost); 6628 6629 return rc; 6630 } 6631 6632 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 6633 { 6634 struct Scsi_Host *shost; 6635 6636 pqi_delete_sas_host(ctrl_info); 6637 6638 shost = ctrl_info->scsi_host; 6639 if (!shost) 6640 return; 6641 6642 scsi_remove_host(shost); 6643 scsi_host_put(shost); 6644 } 6645 6646 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 6647 { 6648 int rc = 0; 6649 struct pqi_device_registers __iomem *pqi_registers; 6650 unsigned long timeout; 6651 unsigned int timeout_msecs; 6652 union pqi_reset_register reset_reg; 6653 6654 pqi_registers = ctrl_info->pqi_registers; 6655 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 6656 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 6657 6658 while (1) { 6659 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 6660 reset_reg.all_bits = readl(&pqi_registers->device_reset); 6661 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 6662 break; 6663 pqi_check_ctrl_health(ctrl_info); 6664 if (pqi_ctrl_offline(ctrl_info)) { 6665 rc = -ENXIO; 6666 break; 6667 } 6668 if (time_after(jiffies, timeout)) { 6669 rc = -ETIMEDOUT; 6670 break; 6671 } 6672 } 6673 6674 return rc; 6675 } 6676 6677 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 6678 { 6679 int rc; 6680 union pqi_reset_register reset_reg; 6681 6682 if (ctrl_info->pqi_reset_quiesce_supported) { 6683 rc = sis_pqi_reset_quiesce(ctrl_info); 6684 if (rc) { 6685 dev_err(&ctrl_info->pci_dev->dev, 6686 "PQI reset failed during quiesce with error %d\n", 6687 rc); 6688 return rc; 6689 } 6690 } 6691 6692 reset_reg.all_bits = 0; 6693 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 6694 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 6695 6696 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 6697 6698 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 6699 if (rc) 6700 dev_err(&ctrl_info->pci_dev->dev, 6701 "PQI reset failed with error %d\n", rc); 6702 6703 return rc; 6704 } 6705 6706 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 6707 { 6708 int rc; 6709 struct bmic_sense_subsystem_info *sense_info; 6710 6711 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 6712 if (!sense_info) 6713 return -ENOMEM; 6714 6715 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 6716 if (rc) 6717 goto out; 6718 6719 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 6720 sizeof(sense_info->ctrl_serial_number)); 6721 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 6722 6723 out: 6724 kfree(sense_info); 6725 6726 return rc; 6727 } 6728 6729 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 6730 { 6731 int rc; 6732 struct bmic_identify_controller *identify; 6733 6734 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 6735 if (!identify) 6736 return -ENOMEM; 6737 6738 rc = pqi_identify_controller(ctrl_info, identify); 6739 if (rc) 6740 goto out; 6741 6742 memcpy(ctrl_info->firmware_version, identify->firmware_version, 6743 sizeof(identify->firmware_version)); 6744 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 6745 snprintf(ctrl_info->firmware_version + 6746 strlen(ctrl_info->firmware_version), 6747 sizeof(ctrl_info->firmware_version), 6748 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 6749 6750 memcpy(ctrl_info->model, identify->product_id, 6751 sizeof(identify->product_id)); 6752 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 6753 6754 memcpy(ctrl_info->vendor, identify->vendor_id, 6755 sizeof(identify->vendor_id)); 6756 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 6757 6758 out: 6759 kfree(identify); 6760 6761 return rc; 6762 } 6763 6764 struct pqi_config_table_section_info { 6765 struct pqi_ctrl_info *ctrl_info; 6766 void *section; 6767 u32 section_offset; 6768 void __iomem *section_iomem_addr; 6769 }; 6770 6771 static inline bool pqi_is_firmware_feature_supported( 6772 struct pqi_config_table_firmware_features *firmware_features, 6773 unsigned int bit_position) 6774 { 6775 unsigned int byte_index; 6776 6777 byte_index = bit_position / BITS_PER_BYTE; 6778 6779 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 6780 return false; 6781 6782 return firmware_features->features_supported[byte_index] & 6783 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6784 } 6785 6786 static inline bool pqi_is_firmware_feature_enabled( 6787 struct pqi_config_table_firmware_features *firmware_features, 6788 void __iomem *firmware_features_iomem_addr, 6789 unsigned int bit_position) 6790 { 6791 unsigned int byte_index; 6792 u8 __iomem *features_enabled_iomem_addr; 6793 6794 byte_index = (bit_position / BITS_PER_BYTE) + 6795 (le16_to_cpu(firmware_features->num_elements) * 2); 6796 6797 features_enabled_iomem_addr = firmware_features_iomem_addr + 6798 offsetof(struct pqi_config_table_firmware_features, 6799 features_supported) + byte_index; 6800 6801 return *((__force u8 *)features_enabled_iomem_addr) & 6802 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6803 } 6804 6805 static inline void pqi_request_firmware_feature( 6806 struct pqi_config_table_firmware_features *firmware_features, 6807 unsigned int bit_position) 6808 { 6809 unsigned int byte_index; 6810 6811 byte_index = (bit_position / BITS_PER_BYTE) + 6812 le16_to_cpu(firmware_features->num_elements); 6813 6814 firmware_features->features_supported[byte_index] |= 6815 (1 << (bit_position % BITS_PER_BYTE)); 6816 } 6817 6818 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 6819 u16 first_section, u16 last_section) 6820 { 6821 struct pqi_vendor_general_request request; 6822 6823 memset(&request, 0, sizeof(request)); 6824 6825 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 6826 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 6827 &request.header.iu_length); 6828 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 6829 &request.function_code); 6830 put_unaligned_le16(first_section, 6831 &request.data.config_table_update.first_section); 6832 put_unaligned_le16(last_section, 6833 &request.data.config_table_update.last_section); 6834 6835 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6836 0, NULL, NO_TIMEOUT); 6837 } 6838 6839 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 6840 struct pqi_config_table_firmware_features *firmware_features, 6841 void __iomem *firmware_features_iomem_addr) 6842 { 6843 void *features_requested; 6844 void __iomem *features_requested_iomem_addr; 6845 6846 features_requested = firmware_features->features_supported + 6847 le16_to_cpu(firmware_features->num_elements); 6848 6849 features_requested_iomem_addr = firmware_features_iomem_addr + 6850 (features_requested - (void *)firmware_features); 6851 6852 memcpy_toio(features_requested_iomem_addr, features_requested, 6853 le16_to_cpu(firmware_features->num_elements)); 6854 6855 return pqi_config_table_update(ctrl_info, 6856 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 6857 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 6858 } 6859 6860 struct pqi_firmware_feature { 6861 char *feature_name; 6862 unsigned int feature_bit; 6863 bool supported; 6864 bool enabled; 6865 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 6866 struct pqi_firmware_feature *firmware_feature); 6867 }; 6868 6869 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 6870 struct pqi_firmware_feature *firmware_feature) 6871 { 6872 if (!firmware_feature->supported) { 6873 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 6874 firmware_feature->feature_name); 6875 return; 6876 } 6877 6878 if (firmware_feature->enabled) { 6879 dev_info(&ctrl_info->pci_dev->dev, 6880 "%s enabled\n", firmware_feature->feature_name); 6881 return; 6882 } 6883 6884 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 6885 firmware_feature->feature_name); 6886 } 6887 6888 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 6889 struct pqi_firmware_feature *firmware_feature) 6890 { 6891 switch (firmware_feature->feature_bit) { 6892 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 6893 ctrl_info->soft_reset_handshake_supported = 6894 firmware_feature->enabled; 6895 break; 6896 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 6897 ctrl_info->raid_iu_timeout_supported = 6898 firmware_feature->enabled; 6899 break; 6900 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 6901 ctrl_info->tmf_iu_timeout_supported = 6902 firmware_feature->enabled; 6903 break; 6904 } 6905 6906 pqi_firmware_feature_status(ctrl_info, firmware_feature); 6907 } 6908 6909 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 6910 struct pqi_firmware_feature *firmware_feature) 6911 { 6912 if (firmware_feature->feature_status) 6913 firmware_feature->feature_status(ctrl_info, firmware_feature); 6914 } 6915 6916 static DEFINE_MUTEX(pqi_firmware_features_mutex); 6917 6918 static struct pqi_firmware_feature pqi_firmware_features[] = { 6919 { 6920 .feature_name = "Online Firmware Activation", 6921 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 6922 .feature_status = pqi_firmware_feature_status, 6923 }, 6924 { 6925 .feature_name = "Serial Management Protocol", 6926 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6927 .feature_status = pqi_firmware_feature_status, 6928 }, 6929 { 6930 .feature_name = "New Soft Reset Handshake", 6931 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 6932 .feature_status = pqi_ctrl_update_feature_flags, 6933 }, 6934 { 6935 .feature_name = "RAID IU Timeout", 6936 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 6937 .feature_status = pqi_ctrl_update_feature_flags, 6938 }, 6939 { 6940 .feature_name = "TMF IU Timeout", 6941 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 6942 .feature_status = pqi_ctrl_update_feature_flags, 6943 }, 6944 }; 6945 6946 static void pqi_process_firmware_features( 6947 struct pqi_config_table_section_info *section_info) 6948 { 6949 int rc; 6950 struct pqi_ctrl_info *ctrl_info; 6951 struct pqi_config_table_firmware_features *firmware_features; 6952 void __iomem *firmware_features_iomem_addr; 6953 unsigned int i; 6954 unsigned int num_features_supported; 6955 6956 ctrl_info = section_info->ctrl_info; 6957 firmware_features = section_info->section; 6958 firmware_features_iomem_addr = section_info->section_iomem_addr; 6959 6960 for (i = 0, num_features_supported = 0; 6961 i < ARRAY_SIZE(pqi_firmware_features); i++) { 6962 if (pqi_is_firmware_feature_supported(firmware_features, 6963 pqi_firmware_features[i].feature_bit)) { 6964 pqi_firmware_features[i].supported = true; 6965 num_features_supported++; 6966 } else { 6967 pqi_firmware_feature_update(ctrl_info, 6968 &pqi_firmware_features[i]); 6969 } 6970 } 6971 6972 if (num_features_supported == 0) 6973 return; 6974 6975 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6976 if (!pqi_firmware_features[i].supported) 6977 continue; 6978 pqi_request_firmware_feature(firmware_features, 6979 pqi_firmware_features[i].feature_bit); 6980 } 6981 6982 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 6983 firmware_features_iomem_addr); 6984 if (rc) { 6985 dev_err(&ctrl_info->pci_dev->dev, 6986 "failed to enable firmware features in PQI configuration table\n"); 6987 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6988 if (!pqi_firmware_features[i].supported) 6989 continue; 6990 pqi_firmware_feature_update(ctrl_info, 6991 &pqi_firmware_features[i]); 6992 } 6993 return; 6994 } 6995 6996 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6997 if (!pqi_firmware_features[i].supported) 6998 continue; 6999 if (pqi_is_firmware_feature_enabled(firmware_features, 7000 firmware_features_iomem_addr, 7001 pqi_firmware_features[i].feature_bit)) { 7002 pqi_firmware_features[i].enabled = true; 7003 } 7004 pqi_firmware_feature_update(ctrl_info, 7005 &pqi_firmware_features[i]); 7006 } 7007 } 7008 7009 static void pqi_init_firmware_features(void) 7010 { 7011 unsigned int i; 7012 7013 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 7014 pqi_firmware_features[i].supported = false; 7015 pqi_firmware_features[i].enabled = false; 7016 } 7017 } 7018 7019 static void pqi_process_firmware_features_section( 7020 struct pqi_config_table_section_info *section_info) 7021 { 7022 mutex_lock(&pqi_firmware_features_mutex); 7023 pqi_init_firmware_features(); 7024 pqi_process_firmware_features(section_info); 7025 mutex_unlock(&pqi_firmware_features_mutex); 7026 } 7027 7028 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 7029 { 7030 u32 table_length; 7031 u32 section_offset; 7032 void __iomem *table_iomem_addr; 7033 struct pqi_config_table *config_table; 7034 struct pqi_config_table_section_header *section; 7035 struct pqi_config_table_section_info section_info; 7036 7037 table_length = ctrl_info->config_table_length; 7038 if (table_length == 0) 7039 return 0; 7040 7041 config_table = kmalloc(table_length, GFP_KERNEL); 7042 if (!config_table) { 7043 dev_err(&ctrl_info->pci_dev->dev, 7044 "failed to allocate memory for PQI configuration table\n"); 7045 return -ENOMEM; 7046 } 7047 7048 /* 7049 * Copy the config table contents from I/O memory space into the 7050 * temporary buffer. 7051 */ 7052 table_iomem_addr = ctrl_info->iomem_base + 7053 ctrl_info->config_table_offset; 7054 memcpy_fromio(config_table, table_iomem_addr, table_length); 7055 7056 section_info.ctrl_info = ctrl_info; 7057 section_offset = 7058 get_unaligned_le32(&config_table->first_section_offset); 7059 7060 while (section_offset) { 7061 section = (void *)config_table + section_offset; 7062 7063 section_info.section = section; 7064 section_info.section_offset = section_offset; 7065 section_info.section_iomem_addr = 7066 table_iomem_addr + section_offset; 7067 7068 switch (get_unaligned_le16(§ion->section_id)) { 7069 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 7070 pqi_process_firmware_features_section(§ion_info); 7071 break; 7072 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 7073 if (pqi_disable_heartbeat) 7074 dev_warn(&ctrl_info->pci_dev->dev, 7075 "heartbeat disabled by module parameter\n"); 7076 else 7077 ctrl_info->heartbeat_counter = 7078 table_iomem_addr + 7079 section_offset + 7080 offsetof( 7081 struct pqi_config_table_heartbeat, 7082 heartbeat_counter); 7083 break; 7084 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 7085 ctrl_info->soft_reset_status = 7086 table_iomem_addr + 7087 section_offset + 7088 offsetof(struct pqi_config_table_soft_reset, 7089 soft_reset_status); 7090 break; 7091 } 7092 7093 section_offset = 7094 get_unaligned_le16(§ion->next_section_offset); 7095 } 7096 7097 kfree(config_table); 7098 7099 return 0; 7100 } 7101 7102 /* Switches the controller from PQI mode back into SIS mode. */ 7103 7104 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 7105 { 7106 int rc; 7107 7108 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 7109 rc = pqi_reset(ctrl_info); 7110 if (rc) 7111 return rc; 7112 rc = sis_reenable_sis_mode(ctrl_info); 7113 if (rc) { 7114 dev_err(&ctrl_info->pci_dev->dev, 7115 "re-enabling SIS mode failed with error %d\n", rc); 7116 return rc; 7117 } 7118 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7119 7120 return 0; 7121 } 7122 7123 /* 7124 * If the controller isn't already in SIS mode, this function forces it into 7125 * SIS mode. 7126 */ 7127 7128 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 7129 { 7130 if (!sis_is_firmware_running(ctrl_info)) 7131 return -ENXIO; 7132 7133 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 7134 return 0; 7135 7136 if (sis_is_kernel_up(ctrl_info)) { 7137 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7138 return 0; 7139 } 7140 7141 return pqi_revert_to_sis_mode(ctrl_info); 7142 } 7143 7144 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 7145 7146 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 7147 { 7148 int rc; 7149 7150 if (reset_devices) { 7151 sis_soft_reset(ctrl_info); 7152 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7153 } else { 7154 rc = pqi_force_sis_mode(ctrl_info); 7155 if (rc) 7156 return rc; 7157 } 7158 7159 /* 7160 * Wait until the controller is ready to start accepting SIS 7161 * commands. 7162 */ 7163 rc = sis_wait_for_ctrl_ready(ctrl_info); 7164 if (rc) 7165 return rc; 7166 7167 /* 7168 * Get the controller properties. This allows us to determine 7169 * whether or not it supports PQI mode. 7170 */ 7171 rc = sis_get_ctrl_properties(ctrl_info); 7172 if (rc) { 7173 dev_err(&ctrl_info->pci_dev->dev, 7174 "error obtaining controller properties\n"); 7175 return rc; 7176 } 7177 7178 rc = sis_get_pqi_capabilities(ctrl_info); 7179 if (rc) { 7180 dev_err(&ctrl_info->pci_dev->dev, 7181 "error obtaining controller capabilities\n"); 7182 return rc; 7183 } 7184 7185 if (reset_devices) { 7186 if (ctrl_info->max_outstanding_requests > 7187 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 7188 ctrl_info->max_outstanding_requests = 7189 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 7190 } else { 7191 if (ctrl_info->max_outstanding_requests > 7192 PQI_MAX_OUTSTANDING_REQUESTS) 7193 ctrl_info->max_outstanding_requests = 7194 PQI_MAX_OUTSTANDING_REQUESTS; 7195 } 7196 7197 pqi_calculate_io_resources(ctrl_info); 7198 7199 rc = pqi_alloc_error_buffer(ctrl_info); 7200 if (rc) { 7201 dev_err(&ctrl_info->pci_dev->dev, 7202 "failed to allocate PQI error buffer\n"); 7203 return rc; 7204 } 7205 7206 /* 7207 * If the function we are about to call succeeds, the 7208 * controller will transition from legacy SIS mode 7209 * into PQI mode. 7210 */ 7211 rc = sis_init_base_struct_addr(ctrl_info); 7212 if (rc) { 7213 dev_err(&ctrl_info->pci_dev->dev, 7214 "error initializing PQI mode\n"); 7215 return rc; 7216 } 7217 7218 /* Wait for the controller to complete the SIS -> PQI transition. */ 7219 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7220 if (rc) { 7221 dev_err(&ctrl_info->pci_dev->dev, 7222 "transition to PQI mode failed\n"); 7223 return rc; 7224 } 7225 7226 /* From here on, we are running in PQI mode. */ 7227 ctrl_info->pqi_mode_enabled = true; 7228 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7229 7230 rc = pqi_alloc_admin_queues(ctrl_info); 7231 if (rc) { 7232 dev_err(&ctrl_info->pci_dev->dev, 7233 "failed to allocate admin queues\n"); 7234 return rc; 7235 } 7236 7237 rc = pqi_create_admin_queues(ctrl_info); 7238 if (rc) { 7239 dev_err(&ctrl_info->pci_dev->dev, 7240 "error creating admin queues\n"); 7241 return rc; 7242 } 7243 7244 rc = pqi_report_device_capability(ctrl_info); 7245 if (rc) { 7246 dev_err(&ctrl_info->pci_dev->dev, 7247 "obtaining device capability failed\n"); 7248 return rc; 7249 } 7250 7251 rc = pqi_validate_device_capability(ctrl_info); 7252 if (rc) 7253 return rc; 7254 7255 pqi_calculate_queue_resources(ctrl_info); 7256 7257 rc = pqi_enable_msix_interrupts(ctrl_info); 7258 if (rc) 7259 return rc; 7260 7261 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 7262 ctrl_info->max_msix_vectors = 7263 ctrl_info->num_msix_vectors_enabled; 7264 pqi_calculate_queue_resources(ctrl_info); 7265 } 7266 7267 rc = pqi_alloc_io_resources(ctrl_info); 7268 if (rc) 7269 return rc; 7270 7271 rc = pqi_alloc_operational_queues(ctrl_info); 7272 if (rc) { 7273 dev_err(&ctrl_info->pci_dev->dev, 7274 "failed to allocate operational queues\n"); 7275 return rc; 7276 } 7277 7278 pqi_init_operational_queues(ctrl_info); 7279 7280 rc = pqi_request_irqs(ctrl_info); 7281 if (rc) 7282 return rc; 7283 7284 rc = pqi_create_queues(ctrl_info); 7285 if (rc) 7286 return rc; 7287 7288 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7289 7290 ctrl_info->controller_online = true; 7291 7292 rc = pqi_process_config_table(ctrl_info); 7293 if (rc) 7294 return rc; 7295 7296 pqi_start_heartbeat_timer(ctrl_info); 7297 7298 rc = pqi_enable_events(ctrl_info); 7299 if (rc) { 7300 dev_err(&ctrl_info->pci_dev->dev, 7301 "error enabling events\n"); 7302 return rc; 7303 } 7304 7305 /* Register with the SCSI subsystem. */ 7306 rc = pqi_register_scsi(ctrl_info); 7307 if (rc) 7308 return rc; 7309 7310 rc = pqi_get_ctrl_product_details(ctrl_info); 7311 if (rc) { 7312 dev_err(&ctrl_info->pci_dev->dev, 7313 "error obtaining product details\n"); 7314 return rc; 7315 } 7316 7317 rc = pqi_get_ctrl_serial_number(ctrl_info); 7318 if (rc) { 7319 dev_err(&ctrl_info->pci_dev->dev, 7320 "error obtaining ctrl serial number\n"); 7321 return rc; 7322 } 7323 7324 rc = pqi_set_diag_rescan(ctrl_info); 7325 if (rc) { 7326 dev_err(&ctrl_info->pci_dev->dev, 7327 "error enabling multi-lun rescan\n"); 7328 return rc; 7329 } 7330 7331 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7332 if (rc) { 7333 dev_err(&ctrl_info->pci_dev->dev, 7334 "error updating host wellness\n"); 7335 return rc; 7336 } 7337 7338 pqi_schedule_update_time_worker(ctrl_info); 7339 7340 pqi_scan_scsi_devices(ctrl_info); 7341 7342 return 0; 7343 } 7344 7345 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 7346 { 7347 unsigned int i; 7348 struct pqi_admin_queues *admin_queues; 7349 struct pqi_event_queue *event_queue; 7350 7351 admin_queues = &ctrl_info->admin_queues; 7352 admin_queues->iq_pi_copy = 0; 7353 admin_queues->oq_ci_copy = 0; 7354 writel(0, admin_queues->oq_pi); 7355 7356 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 7357 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 7358 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 7359 ctrl_info->queue_groups[i].oq_ci_copy = 0; 7360 7361 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 7362 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 7363 writel(0, ctrl_info->queue_groups[i].oq_pi); 7364 } 7365 7366 event_queue = &ctrl_info->event_queue; 7367 writel(0, event_queue->oq_pi); 7368 event_queue->oq_ci_copy = 0; 7369 } 7370 7371 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 7372 { 7373 int rc; 7374 7375 rc = pqi_force_sis_mode(ctrl_info); 7376 if (rc) 7377 return rc; 7378 7379 /* 7380 * Wait until the controller is ready to start accepting SIS 7381 * commands. 7382 */ 7383 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 7384 if (rc) 7385 return rc; 7386 7387 /* 7388 * Get the controller properties. This allows us to determine 7389 * whether or not it supports PQI mode. 7390 */ 7391 rc = sis_get_ctrl_properties(ctrl_info); 7392 if (rc) { 7393 dev_err(&ctrl_info->pci_dev->dev, 7394 "error obtaining controller properties\n"); 7395 return rc; 7396 } 7397 7398 rc = sis_get_pqi_capabilities(ctrl_info); 7399 if (rc) { 7400 dev_err(&ctrl_info->pci_dev->dev, 7401 "error obtaining controller capabilities\n"); 7402 return rc; 7403 } 7404 7405 /* 7406 * If the function we are about to call succeeds, the 7407 * controller will transition from legacy SIS mode 7408 * into PQI mode. 7409 */ 7410 rc = sis_init_base_struct_addr(ctrl_info); 7411 if (rc) { 7412 dev_err(&ctrl_info->pci_dev->dev, 7413 "error initializing PQI mode\n"); 7414 return rc; 7415 } 7416 7417 /* Wait for the controller to complete the SIS -> PQI transition. */ 7418 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7419 if (rc) { 7420 dev_err(&ctrl_info->pci_dev->dev, 7421 "transition to PQI mode failed\n"); 7422 return rc; 7423 } 7424 7425 /* From here on, we are running in PQI mode. */ 7426 ctrl_info->pqi_mode_enabled = true; 7427 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7428 7429 pqi_reinit_queues(ctrl_info); 7430 7431 rc = pqi_create_admin_queues(ctrl_info); 7432 if (rc) { 7433 dev_err(&ctrl_info->pci_dev->dev, 7434 "error creating admin queues\n"); 7435 return rc; 7436 } 7437 7438 rc = pqi_create_queues(ctrl_info); 7439 if (rc) 7440 return rc; 7441 7442 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7443 7444 ctrl_info->controller_online = true; 7445 pqi_ctrl_unblock_requests(ctrl_info); 7446 7447 rc = pqi_process_config_table(ctrl_info); 7448 if (rc) 7449 return rc; 7450 7451 pqi_start_heartbeat_timer(ctrl_info); 7452 7453 rc = pqi_enable_events(ctrl_info); 7454 if (rc) { 7455 dev_err(&ctrl_info->pci_dev->dev, 7456 "error enabling events\n"); 7457 return rc; 7458 } 7459 7460 rc = pqi_get_ctrl_product_details(ctrl_info); 7461 if (rc) { 7462 dev_err(&ctrl_info->pci_dev->dev, 7463 "error obtaining product details\n"); 7464 return rc; 7465 } 7466 7467 rc = pqi_set_diag_rescan(ctrl_info); 7468 if (rc) { 7469 dev_err(&ctrl_info->pci_dev->dev, 7470 "error enabling multi-lun rescan\n"); 7471 return rc; 7472 } 7473 7474 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7475 if (rc) { 7476 dev_err(&ctrl_info->pci_dev->dev, 7477 "error updating host wellness\n"); 7478 return rc; 7479 } 7480 7481 pqi_schedule_update_time_worker(ctrl_info); 7482 7483 pqi_scan_scsi_devices(ctrl_info); 7484 7485 return 0; 7486 } 7487 7488 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 7489 u16 timeout) 7490 { 7491 int rc; 7492 7493 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 7494 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 7495 7496 return pcibios_err_to_errno(rc); 7497 } 7498 7499 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 7500 { 7501 int rc; 7502 u64 mask; 7503 7504 rc = pci_enable_device(ctrl_info->pci_dev); 7505 if (rc) { 7506 dev_err(&ctrl_info->pci_dev->dev, 7507 "failed to enable PCI device\n"); 7508 return rc; 7509 } 7510 7511 if (sizeof(dma_addr_t) > 4) 7512 mask = DMA_BIT_MASK(64); 7513 else 7514 mask = DMA_BIT_MASK(32); 7515 7516 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 7517 if (rc) { 7518 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 7519 goto disable_device; 7520 } 7521 7522 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 7523 if (rc) { 7524 dev_err(&ctrl_info->pci_dev->dev, 7525 "failed to obtain PCI resources\n"); 7526 goto disable_device; 7527 } 7528 7529 ctrl_info->iomem_base = ioremap(pci_resource_start( 7530 ctrl_info->pci_dev, 0), 7531 sizeof(struct pqi_ctrl_registers)); 7532 if (!ctrl_info->iomem_base) { 7533 dev_err(&ctrl_info->pci_dev->dev, 7534 "failed to map memory for controller registers\n"); 7535 rc = -ENOMEM; 7536 goto release_regions; 7537 } 7538 7539 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 7540 7541 /* Increase the PCIe completion timeout. */ 7542 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 7543 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 7544 if (rc) { 7545 dev_err(&ctrl_info->pci_dev->dev, 7546 "failed to set PCIe completion timeout\n"); 7547 goto release_regions; 7548 } 7549 7550 /* Enable bus mastering. */ 7551 pci_set_master(ctrl_info->pci_dev); 7552 7553 ctrl_info->registers = ctrl_info->iomem_base; 7554 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 7555 7556 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 7557 7558 return 0; 7559 7560 release_regions: 7561 pci_release_regions(ctrl_info->pci_dev); 7562 disable_device: 7563 pci_disable_device(ctrl_info->pci_dev); 7564 7565 return rc; 7566 } 7567 7568 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 7569 { 7570 iounmap(ctrl_info->iomem_base); 7571 pci_release_regions(ctrl_info->pci_dev); 7572 if (pci_is_enabled(ctrl_info->pci_dev)) 7573 pci_disable_device(ctrl_info->pci_dev); 7574 pci_set_drvdata(ctrl_info->pci_dev, NULL); 7575 } 7576 7577 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 7578 { 7579 struct pqi_ctrl_info *ctrl_info; 7580 7581 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 7582 GFP_KERNEL, numa_node); 7583 if (!ctrl_info) 7584 return NULL; 7585 7586 mutex_init(&ctrl_info->scan_mutex); 7587 mutex_init(&ctrl_info->lun_reset_mutex); 7588 mutex_init(&ctrl_info->ofa_mutex); 7589 7590 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7591 spin_lock_init(&ctrl_info->scsi_device_list_lock); 7592 7593 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 7594 atomic_set(&ctrl_info->num_interrupts, 0); 7595 atomic_set(&ctrl_info->sync_cmds_outstanding, 0); 7596 7597 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 7598 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 7599 7600 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 7601 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 7602 7603 sema_init(&ctrl_info->sync_request_sem, 7604 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 7605 init_waitqueue_head(&ctrl_info->block_requests_wait); 7606 7607 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 7608 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 7609 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 7610 pqi_raid_bypass_retry_worker); 7611 7612 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 7613 ctrl_info->irq_mode = IRQ_MODE_NONE; 7614 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 7615 7616 return ctrl_info; 7617 } 7618 7619 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 7620 { 7621 kfree(ctrl_info); 7622 } 7623 7624 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 7625 { 7626 pqi_free_irqs(ctrl_info); 7627 pqi_disable_msix_interrupts(ctrl_info); 7628 } 7629 7630 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 7631 { 7632 pqi_stop_heartbeat_timer(ctrl_info); 7633 pqi_free_interrupts(ctrl_info); 7634 if (ctrl_info->queue_memory_base) 7635 dma_free_coherent(&ctrl_info->pci_dev->dev, 7636 ctrl_info->queue_memory_length, 7637 ctrl_info->queue_memory_base, 7638 ctrl_info->queue_memory_base_dma_handle); 7639 if (ctrl_info->admin_queue_memory_base) 7640 dma_free_coherent(&ctrl_info->pci_dev->dev, 7641 ctrl_info->admin_queue_memory_length, 7642 ctrl_info->admin_queue_memory_base, 7643 ctrl_info->admin_queue_memory_base_dma_handle); 7644 pqi_free_all_io_requests(ctrl_info); 7645 if (ctrl_info->error_buffer) 7646 dma_free_coherent(&ctrl_info->pci_dev->dev, 7647 ctrl_info->error_buffer_length, 7648 ctrl_info->error_buffer, 7649 ctrl_info->error_buffer_dma_handle); 7650 if (ctrl_info->iomem_base) 7651 pqi_cleanup_pci_init(ctrl_info); 7652 pqi_free_ctrl_info(ctrl_info); 7653 } 7654 7655 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 7656 { 7657 pqi_cancel_rescan_worker(ctrl_info); 7658 pqi_cancel_update_time_worker(ctrl_info); 7659 pqi_unregister_scsi(ctrl_info); 7660 if (ctrl_info->pqi_mode_enabled) 7661 pqi_revert_to_sis_mode(ctrl_info); 7662 pqi_free_ctrl_resources(ctrl_info); 7663 } 7664 7665 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 7666 { 7667 pqi_cancel_update_time_worker(ctrl_info); 7668 pqi_cancel_rescan_worker(ctrl_info); 7669 pqi_wait_until_lun_reset_finished(ctrl_info); 7670 pqi_wait_until_scan_finished(ctrl_info); 7671 pqi_ctrl_ofa_start(ctrl_info); 7672 pqi_ctrl_block_requests(ctrl_info); 7673 pqi_ctrl_wait_until_quiesced(ctrl_info); 7674 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); 7675 pqi_fail_io_queued_for_all_devices(ctrl_info); 7676 pqi_wait_until_inbound_queues_empty(ctrl_info); 7677 pqi_stop_heartbeat_timer(ctrl_info); 7678 ctrl_info->pqi_mode_enabled = false; 7679 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7680 } 7681 7682 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 7683 { 7684 pqi_ofa_free_host_buffer(ctrl_info); 7685 ctrl_info->pqi_mode_enabled = true; 7686 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7687 ctrl_info->controller_online = true; 7688 pqi_ctrl_unblock_requests(ctrl_info); 7689 pqi_start_heartbeat_timer(ctrl_info); 7690 pqi_schedule_update_time_worker(ctrl_info); 7691 pqi_clear_soft_reset_status(ctrl_info, 7692 PQI_SOFT_RESET_ABORT); 7693 pqi_scan_scsi_devices(ctrl_info); 7694 } 7695 7696 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, 7697 u32 total_size, u32 chunk_size) 7698 { 7699 u32 sg_count; 7700 u32 size; 7701 int i; 7702 struct pqi_sg_descriptor *mem_descriptor = NULL; 7703 struct device *dev; 7704 struct pqi_ofa_memory *ofap; 7705 7706 dev = &ctrl_info->pci_dev->dev; 7707 7708 sg_count = (total_size + chunk_size - 1); 7709 sg_count /= chunk_size; 7710 7711 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7712 7713 if (sg_count*chunk_size < total_size) 7714 goto out; 7715 7716 ctrl_info->pqi_ofa_chunk_virt_addr = 7717 kcalloc(sg_count, sizeof(void *), GFP_KERNEL); 7718 if (!ctrl_info->pqi_ofa_chunk_virt_addr) 7719 goto out; 7720 7721 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { 7722 dma_addr_t dma_handle; 7723 7724 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7725 dma_alloc_coherent(dev, chunk_size, &dma_handle, 7726 GFP_KERNEL); 7727 7728 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7729 break; 7730 7731 mem_descriptor = &ofap->sg_descriptor[i]; 7732 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); 7733 put_unaligned_le32 (chunk_size, &mem_descriptor->length); 7734 } 7735 7736 if (!size || size < total_size) 7737 goto out_free_chunks; 7738 7739 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 7740 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 7741 put_unaligned_le32(size, &ofap->bytes_allocated); 7742 7743 return 0; 7744 7745 out_free_chunks: 7746 while (--i >= 0) { 7747 mem_descriptor = &ofap->sg_descriptor[i]; 7748 dma_free_coherent(dev, chunk_size, 7749 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7750 get_unaligned_le64(&mem_descriptor->address)); 7751 } 7752 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7753 7754 out: 7755 put_unaligned_le32 (0, &ofap->bytes_allocated); 7756 return -ENOMEM; 7757 } 7758 7759 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 7760 { 7761 u32 total_size; 7762 u32 min_chunk_size; 7763 u32 chunk_sz; 7764 7765 total_size = le32_to_cpu( 7766 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); 7767 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; 7768 7769 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) 7770 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) 7771 return 0; 7772 7773 return -ENOMEM; 7774 } 7775 7776 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 7777 u32 bytes_requested) 7778 { 7779 struct pqi_ofa_memory *pqi_ofa_memory; 7780 struct device *dev; 7781 7782 dev = &ctrl_info->pci_dev->dev; 7783 pqi_ofa_memory = dma_alloc_coherent(dev, 7784 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7785 &ctrl_info->pqi_ofa_mem_dma_handle, 7786 GFP_KERNEL); 7787 7788 if (!pqi_ofa_memory) 7789 return; 7790 7791 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); 7792 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, 7793 sizeof(pqi_ofa_memory->signature)); 7794 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); 7795 7796 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; 7797 7798 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 7799 dev_err(dev, "Failed to allocate host buffer of size = %u", 7800 bytes_requested); 7801 } 7802 7803 return; 7804 } 7805 7806 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 7807 { 7808 int i; 7809 struct pqi_sg_descriptor *mem_descriptor; 7810 struct pqi_ofa_memory *ofap; 7811 7812 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7813 7814 if (!ofap) 7815 return; 7816 7817 if (!ofap->bytes_allocated) 7818 goto out; 7819 7820 mem_descriptor = ofap->sg_descriptor; 7821 7822 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); 7823 i++) { 7824 dma_free_coherent(&ctrl_info->pci_dev->dev, 7825 get_unaligned_le32(&mem_descriptor[i].length), 7826 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7827 get_unaligned_le64(&mem_descriptor[i].address)); 7828 } 7829 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7830 7831 out: 7832 dma_free_coherent(&ctrl_info->pci_dev->dev, 7833 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, 7834 ctrl_info->pqi_ofa_mem_dma_handle); 7835 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 7836 } 7837 7838 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 7839 { 7840 struct pqi_vendor_general_request request; 7841 size_t size; 7842 struct pqi_ofa_memory *ofap; 7843 7844 memset(&request, 0, sizeof(request)); 7845 7846 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7847 7848 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7849 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7850 &request.header.iu_length); 7851 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 7852 &request.function_code); 7853 7854 if (ofap) { 7855 size = offsetof(struct pqi_ofa_memory, sg_descriptor) + 7856 get_unaligned_le16(&ofap->num_memory_descriptors) * 7857 sizeof(struct pqi_sg_descriptor); 7858 7859 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 7860 &request.data.ofa_memory_allocation.buffer_address); 7861 put_unaligned_le32(size, 7862 &request.data.ofa_memory_allocation.buffer_length); 7863 7864 } 7865 7866 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 7867 0, NULL, NO_TIMEOUT); 7868 } 7869 7870 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) 7871 { 7872 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7873 return pqi_ctrl_init_resume(ctrl_info); 7874 } 7875 7876 static void pqi_perform_lockup_action(void) 7877 { 7878 switch (pqi_lockup_action) { 7879 case PANIC: 7880 panic("FATAL: Smart Family Controller lockup detected"); 7881 break; 7882 case REBOOT: 7883 emergency_restart(); 7884 break; 7885 case NONE: 7886 default: 7887 break; 7888 } 7889 } 7890 7891 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 7892 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 7893 .status = SAM_STAT_CHECK_CONDITION, 7894 }; 7895 7896 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 7897 { 7898 unsigned int i; 7899 struct pqi_io_request *io_request; 7900 struct scsi_cmnd *scmd; 7901 7902 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7903 io_request = &ctrl_info->io_request_pool[i]; 7904 if (atomic_read(&io_request->refcount) == 0) 7905 continue; 7906 7907 scmd = io_request->scmd; 7908 if (scmd) { 7909 set_host_byte(scmd, DID_NO_CONNECT); 7910 } else { 7911 io_request->status = -ENXIO; 7912 io_request->error_info = 7913 &pqi_ctrl_offline_raid_error_info; 7914 } 7915 7916 io_request->io_complete_callback(io_request, 7917 io_request->context); 7918 } 7919 } 7920 7921 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 7922 { 7923 pqi_perform_lockup_action(); 7924 pqi_stop_heartbeat_timer(ctrl_info); 7925 pqi_free_interrupts(ctrl_info); 7926 pqi_cancel_rescan_worker(ctrl_info); 7927 pqi_cancel_update_time_worker(ctrl_info); 7928 pqi_ctrl_wait_until_quiesced(ctrl_info); 7929 pqi_fail_all_outstanding_requests(ctrl_info); 7930 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 7931 pqi_ctrl_unblock_requests(ctrl_info); 7932 } 7933 7934 static void pqi_ctrl_offline_worker(struct work_struct *work) 7935 { 7936 struct pqi_ctrl_info *ctrl_info; 7937 7938 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 7939 pqi_take_ctrl_offline_deferred(ctrl_info); 7940 } 7941 7942 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 7943 { 7944 if (!ctrl_info->controller_online) 7945 return; 7946 7947 ctrl_info->controller_online = false; 7948 ctrl_info->pqi_mode_enabled = false; 7949 pqi_ctrl_block_requests(ctrl_info); 7950 if (!pqi_disable_ctrl_shutdown) 7951 sis_shutdown_ctrl(ctrl_info); 7952 pci_disable_device(ctrl_info->pci_dev); 7953 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 7954 schedule_work(&ctrl_info->ctrl_offline_work); 7955 } 7956 7957 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 7958 const struct pci_device_id *id) 7959 { 7960 char *ctrl_description; 7961 7962 if (id->driver_data) 7963 ctrl_description = (char *)id->driver_data; 7964 else 7965 ctrl_description = "Microsemi Smart Family Controller"; 7966 7967 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 7968 } 7969 7970 static int pqi_pci_probe(struct pci_dev *pci_dev, 7971 const struct pci_device_id *id) 7972 { 7973 int rc; 7974 int node, cp_node; 7975 struct pqi_ctrl_info *ctrl_info; 7976 7977 pqi_print_ctrl_info(pci_dev, id); 7978 7979 if (pqi_disable_device_id_wildcards && 7980 id->subvendor == PCI_ANY_ID && 7981 id->subdevice == PCI_ANY_ID) { 7982 dev_warn(&pci_dev->dev, 7983 "controller not probed because device ID wildcards are disabled\n"); 7984 return -ENODEV; 7985 } 7986 7987 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 7988 dev_warn(&pci_dev->dev, 7989 "controller device ID matched using wildcards\n"); 7990 7991 node = dev_to_node(&pci_dev->dev); 7992 if (node == NUMA_NO_NODE) { 7993 cp_node = cpu_to_node(0); 7994 if (cp_node == NUMA_NO_NODE) 7995 cp_node = 0; 7996 set_dev_node(&pci_dev->dev, cp_node); 7997 } 7998 7999 ctrl_info = pqi_alloc_ctrl_info(node); 8000 if (!ctrl_info) { 8001 dev_err(&pci_dev->dev, 8002 "failed to allocate controller info block\n"); 8003 return -ENOMEM; 8004 } 8005 8006 ctrl_info->pci_dev = pci_dev; 8007 8008 rc = pqi_pci_init(ctrl_info); 8009 if (rc) 8010 goto error; 8011 8012 rc = pqi_ctrl_init(ctrl_info); 8013 if (rc) 8014 goto error; 8015 8016 return 0; 8017 8018 error: 8019 pqi_remove_ctrl(ctrl_info); 8020 8021 return rc; 8022 } 8023 8024 static void pqi_pci_remove(struct pci_dev *pci_dev) 8025 { 8026 struct pqi_ctrl_info *ctrl_info; 8027 8028 ctrl_info = pci_get_drvdata(pci_dev); 8029 if (!ctrl_info) 8030 return; 8031 8032 pqi_remove_ctrl(ctrl_info); 8033 } 8034 8035 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 8036 { 8037 unsigned int i; 8038 struct pqi_io_request *io_request; 8039 struct scsi_cmnd *scmd; 8040 8041 for (i = 0; i < ctrl_info->max_io_slots; i++) { 8042 io_request = &ctrl_info->io_request_pool[i]; 8043 if (atomic_read(&io_request->refcount) == 0) 8044 continue; 8045 scmd = io_request->scmd; 8046 WARN_ON(scmd != NULL); /* IO command from SML */ 8047 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 8048 } 8049 } 8050 8051 static void pqi_shutdown(struct pci_dev *pci_dev) 8052 { 8053 int rc; 8054 struct pqi_ctrl_info *ctrl_info; 8055 8056 ctrl_info = pci_get_drvdata(pci_dev); 8057 if (!ctrl_info) { 8058 dev_err(&pci_dev->dev, 8059 "cache could not be flushed\n"); 8060 return; 8061 } 8062 8063 pqi_disable_events(ctrl_info); 8064 pqi_wait_until_ofa_finished(ctrl_info); 8065 pqi_cancel_update_time_worker(ctrl_info); 8066 pqi_cancel_rescan_worker(ctrl_info); 8067 pqi_cancel_event_worker(ctrl_info); 8068 8069 pqi_ctrl_shutdown_start(ctrl_info); 8070 pqi_ctrl_wait_until_quiesced(ctrl_info); 8071 8072 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8073 if (rc) { 8074 dev_err(&pci_dev->dev, 8075 "wait for pending I/O failed\n"); 8076 return; 8077 } 8078 8079 pqi_ctrl_block_device_reset(ctrl_info); 8080 pqi_wait_until_lun_reset_finished(ctrl_info); 8081 8082 /* 8083 * Write all data in the controller's battery-backed cache to 8084 * storage. 8085 */ 8086 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 8087 if (rc) 8088 dev_err(&pci_dev->dev, 8089 "unable to flush controller cache\n"); 8090 8091 pqi_ctrl_block_requests(ctrl_info); 8092 8093 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); 8094 if (rc) { 8095 dev_err(&pci_dev->dev, 8096 "wait for pending sync cmds failed\n"); 8097 return; 8098 } 8099 8100 pqi_crash_if_pending_command(ctrl_info); 8101 pqi_reset(ctrl_info); 8102 } 8103 8104 static void pqi_process_lockup_action_param(void) 8105 { 8106 unsigned int i; 8107 8108 if (!pqi_lockup_action_param) 8109 return; 8110 8111 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 8112 if (strcmp(pqi_lockup_action_param, 8113 pqi_lockup_actions[i].name) == 0) { 8114 pqi_lockup_action = pqi_lockup_actions[i].action; 8115 return; 8116 } 8117 } 8118 8119 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 8120 DRIVER_NAME_SHORT, pqi_lockup_action_param); 8121 } 8122 8123 static void pqi_process_module_params(void) 8124 { 8125 pqi_process_lockup_action_param(); 8126 } 8127 8128 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 8129 { 8130 struct pqi_ctrl_info *ctrl_info; 8131 8132 ctrl_info = pci_get_drvdata(pci_dev); 8133 8134 pqi_disable_events(ctrl_info); 8135 pqi_cancel_update_time_worker(ctrl_info); 8136 pqi_cancel_rescan_worker(ctrl_info); 8137 pqi_wait_until_scan_finished(ctrl_info); 8138 pqi_wait_until_lun_reset_finished(ctrl_info); 8139 pqi_wait_until_ofa_finished(ctrl_info); 8140 pqi_flush_cache(ctrl_info, SUSPEND); 8141 pqi_ctrl_block_requests(ctrl_info); 8142 pqi_ctrl_wait_until_quiesced(ctrl_info); 8143 pqi_wait_until_inbound_queues_empty(ctrl_info); 8144 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8145 pqi_stop_heartbeat_timer(ctrl_info); 8146 8147 if (state.event == PM_EVENT_FREEZE) 8148 return 0; 8149 8150 pci_save_state(pci_dev); 8151 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 8152 8153 ctrl_info->controller_online = false; 8154 ctrl_info->pqi_mode_enabled = false; 8155 8156 return 0; 8157 } 8158 8159 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 8160 { 8161 int rc; 8162 struct pqi_ctrl_info *ctrl_info; 8163 8164 ctrl_info = pci_get_drvdata(pci_dev); 8165 8166 if (pci_dev->current_state != PCI_D0) { 8167 ctrl_info->max_hw_queue_index = 0; 8168 pqi_free_interrupts(ctrl_info); 8169 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 8170 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 8171 IRQF_SHARED, DRIVER_NAME_SHORT, 8172 &ctrl_info->queue_groups[0]); 8173 if (rc) { 8174 dev_err(&ctrl_info->pci_dev->dev, 8175 "irq %u init failed with error %d\n", 8176 pci_dev->irq, rc); 8177 return rc; 8178 } 8179 pqi_start_heartbeat_timer(ctrl_info); 8180 pqi_ctrl_unblock_requests(ctrl_info); 8181 return 0; 8182 } 8183 8184 pci_set_power_state(pci_dev, PCI_D0); 8185 pci_restore_state(pci_dev); 8186 8187 return pqi_ctrl_init_resume(ctrl_info); 8188 } 8189 8190 /* Define the PCI IDs for the controllers that we support. */ 8191 static const struct pci_device_id pqi_pci_id_table[] = { 8192 { 8193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8194 0x105b, 0x1211) 8195 }, 8196 { 8197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8198 0x105b, 0x1321) 8199 }, 8200 { 8201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8202 0x152d, 0x8a22) 8203 }, 8204 { 8205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8206 0x152d, 0x8a23) 8207 }, 8208 { 8209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8210 0x152d, 0x8a24) 8211 }, 8212 { 8213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8214 0x152d, 0x8a36) 8215 }, 8216 { 8217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8218 0x152d, 0x8a37) 8219 }, 8220 { 8221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8222 0x193d, 0x1104) 8223 }, 8224 { 8225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8226 0x193d, 0x1105) 8227 }, 8228 { 8229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8230 0x193d, 0x1106) 8231 }, 8232 { 8233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8234 0x193d, 0x1107) 8235 }, 8236 { 8237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8238 0x193d, 0x8460) 8239 }, 8240 { 8241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8242 0x193d, 0x8461) 8243 }, 8244 { 8245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8246 0x193d, 0xc460) 8247 }, 8248 { 8249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8250 0x193d, 0xc461) 8251 }, 8252 { 8253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8254 0x193d, 0xf460) 8255 }, 8256 { 8257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8258 0x193d, 0xf461) 8259 }, 8260 { 8261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8262 0x1bd4, 0x0045) 8263 }, 8264 { 8265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8266 0x1bd4, 0x0046) 8267 }, 8268 { 8269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8270 0x1bd4, 0x0047) 8271 }, 8272 { 8273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8274 0x1bd4, 0x0048) 8275 }, 8276 { 8277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8278 0x1bd4, 0x004a) 8279 }, 8280 { 8281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8282 0x1bd4, 0x004b) 8283 }, 8284 { 8285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8286 0x1bd4, 0x004c) 8287 }, 8288 { 8289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8290 0x1bd4, 0x004f) 8291 }, 8292 { 8293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8294 0x19e5, 0xd227) 8295 }, 8296 { 8297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8298 0x19e5, 0xd228) 8299 }, 8300 { 8301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8302 0x19e5, 0xd229) 8303 }, 8304 { 8305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8306 0x19e5, 0xd22a) 8307 }, 8308 { 8309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8310 0x19e5, 0xd22b) 8311 }, 8312 { 8313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8314 0x19e5, 0xd22c) 8315 }, 8316 { 8317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8318 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 8319 }, 8320 { 8321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8322 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 8323 }, 8324 { 8325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8326 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 8327 }, 8328 { 8329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8330 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 8331 }, 8332 { 8333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8334 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 8335 }, 8336 { 8337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8338 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 8339 }, 8340 { 8341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8342 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 8343 }, 8344 { 8345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8346 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 8347 }, 8348 { 8349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8350 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 8351 }, 8352 { 8353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8354 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 8355 }, 8356 { 8357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8358 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 8359 }, 8360 { 8361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8362 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 8363 }, 8364 { 8365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8366 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 8367 }, 8368 { 8369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8370 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 8371 }, 8372 { 8373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8374 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 8375 }, 8376 { 8377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8378 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 8379 }, 8380 { 8381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8382 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 8383 }, 8384 { 8385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8386 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 8387 }, 8388 { 8389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8390 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 8391 }, 8392 { 8393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8394 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 8395 }, 8396 { 8397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8398 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 8399 }, 8400 { 8401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8402 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 8403 }, 8404 { 8405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8406 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 8407 }, 8408 { 8409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8410 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 8411 }, 8412 { 8413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8414 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 8415 }, 8416 { 8417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8418 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 8419 }, 8420 { 8421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8422 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 8423 }, 8424 { 8425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8426 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 8427 }, 8428 { 8429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8430 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 8431 }, 8432 { 8433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8434 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 8435 }, 8436 { 8437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8438 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 8439 }, 8440 { 8441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8442 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 8443 }, 8444 { 8445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8446 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 8447 }, 8448 { 8449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8450 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 8451 }, 8452 { 8453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8454 PCI_VENDOR_ID_ADVANTECH, 0x8312) 8455 }, 8456 { 8457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8458 PCI_VENDOR_ID_DELL, 0x1fe0) 8459 }, 8460 { 8461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8462 PCI_VENDOR_ID_HP, 0x0600) 8463 }, 8464 { 8465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8466 PCI_VENDOR_ID_HP, 0x0601) 8467 }, 8468 { 8469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8470 PCI_VENDOR_ID_HP, 0x0602) 8471 }, 8472 { 8473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8474 PCI_VENDOR_ID_HP, 0x0603) 8475 }, 8476 { 8477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8478 PCI_VENDOR_ID_HP, 0x0609) 8479 }, 8480 { 8481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8482 PCI_VENDOR_ID_HP, 0x0650) 8483 }, 8484 { 8485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8486 PCI_VENDOR_ID_HP, 0x0651) 8487 }, 8488 { 8489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8490 PCI_VENDOR_ID_HP, 0x0652) 8491 }, 8492 { 8493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8494 PCI_VENDOR_ID_HP, 0x0653) 8495 }, 8496 { 8497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8498 PCI_VENDOR_ID_HP, 0x0654) 8499 }, 8500 { 8501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8502 PCI_VENDOR_ID_HP, 0x0655) 8503 }, 8504 { 8505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8506 PCI_VENDOR_ID_HP, 0x0700) 8507 }, 8508 { 8509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8510 PCI_VENDOR_ID_HP, 0x0701) 8511 }, 8512 { 8513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8514 PCI_VENDOR_ID_HP, 0x1001) 8515 }, 8516 { 8517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8518 PCI_VENDOR_ID_HP, 0x1100) 8519 }, 8520 { 8521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8522 PCI_VENDOR_ID_HP, 0x1101) 8523 }, 8524 { 8525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8526 0x1d8d, 0x0800) 8527 }, 8528 { 8529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8530 0x1d8d, 0x0908) 8531 }, 8532 { 8533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8534 0x1d8d, 0x0806) 8535 }, 8536 { 8537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8538 0x1d8d, 0x0916) 8539 }, 8540 { 8541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8542 PCI_VENDOR_ID_GIGABYTE, 0x1000) 8543 }, 8544 { 8545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8546 PCI_ANY_ID, PCI_ANY_ID) 8547 }, 8548 { 0 } 8549 }; 8550 8551 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 8552 8553 static struct pci_driver pqi_pci_driver = { 8554 .name = DRIVER_NAME_SHORT, 8555 .id_table = pqi_pci_id_table, 8556 .probe = pqi_pci_probe, 8557 .remove = pqi_pci_remove, 8558 .shutdown = pqi_shutdown, 8559 #if defined(CONFIG_PM) 8560 .suspend = pqi_suspend, 8561 .resume = pqi_resume, 8562 #endif 8563 }; 8564 8565 static int __init pqi_init(void) 8566 { 8567 int rc; 8568 8569 pr_info(DRIVER_NAME "\n"); 8570 8571 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 8572 if (!pqi_sas_transport_template) 8573 return -ENODEV; 8574 8575 pqi_process_module_params(); 8576 8577 rc = pci_register_driver(&pqi_pci_driver); 8578 if (rc) 8579 sas_release_transport(pqi_sas_transport_template); 8580 8581 return rc; 8582 } 8583 8584 static void __exit pqi_cleanup(void) 8585 { 8586 pci_unregister_driver(&pqi_pci_driver); 8587 sas_release_transport(pqi_sas_transport_template); 8588 } 8589 8590 module_init(pqi_init); 8591 module_exit(pqi_cleanup); 8592 8593 static void __attribute__((unused)) verify_structures(void) 8594 { 8595 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8596 sis_host_to_ctrl_doorbell) != 0x20); 8597 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8598 sis_interrupt_mask) != 0x34); 8599 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8600 sis_ctrl_to_host_doorbell) != 0x9c); 8601 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8602 sis_ctrl_to_host_doorbell_clear) != 0xa0); 8603 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8604 sis_driver_scratch) != 0xb0); 8605 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8606 sis_firmware_status) != 0xbc); 8607 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8608 sis_mailbox) != 0x1000); 8609 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8610 pqi_registers) != 0x4000); 8611 8612 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8613 iu_type) != 0x0); 8614 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8615 iu_length) != 0x2); 8616 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8617 response_queue_id) != 0x4); 8618 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8619 work_area) != 0x6); 8620 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 8621 8622 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8623 status) != 0x0); 8624 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8625 service_response) != 0x1); 8626 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8627 data_present) != 0x2); 8628 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8629 reserved) != 0x3); 8630 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8631 residual_count) != 0x4); 8632 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8633 data_length) != 0x8); 8634 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8635 reserved1) != 0xa); 8636 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8637 data) != 0xc); 8638 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 8639 8640 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8641 data_in_result) != 0x0); 8642 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8643 data_out_result) != 0x1); 8644 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8645 reserved) != 0x2); 8646 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8647 status) != 0x5); 8648 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8649 status_qualifier) != 0x6); 8650 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8651 sense_data_length) != 0x8); 8652 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8653 response_data_length) != 0xa); 8654 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8655 data_in_transferred) != 0xc); 8656 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8657 data_out_transferred) != 0x10); 8658 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8659 data) != 0x14); 8660 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 8661 8662 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8663 signature) != 0x0); 8664 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8665 function_and_status_code) != 0x8); 8666 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8667 max_admin_iq_elements) != 0x10); 8668 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8669 max_admin_oq_elements) != 0x11); 8670 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8671 admin_iq_element_length) != 0x12); 8672 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8673 admin_oq_element_length) != 0x13); 8674 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8675 max_reset_timeout) != 0x14); 8676 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8677 legacy_intx_status) != 0x18); 8678 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8679 legacy_intx_mask_set) != 0x1c); 8680 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8681 legacy_intx_mask_clear) != 0x20); 8682 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8683 device_status) != 0x40); 8684 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8685 admin_iq_pi_offset) != 0x48); 8686 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8687 admin_oq_ci_offset) != 0x50); 8688 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8689 admin_iq_element_array_addr) != 0x58); 8690 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8691 admin_oq_element_array_addr) != 0x60); 8692 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8693 admin_iq_ci_addr) != 0x68); 8694 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8695 admin_oq_pi_addr) != 0x70); 8696 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8697 admin_iq_num_elements) != 0x78); 8698 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8699 admin_oq_num_elements) != 0x79); 8700 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8701 admin_queue_int_msg_num) != 0x7a); 8702 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8703 device_error) != 0x80); 8704 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8705 error_details) != 0x88); 8706 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8707 device_reset) != 0x90); 8708 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8709 power_action) != 0x94); 8710 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 8711 8712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8713 header.iu_type) != 0); 8714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8715 header.iu_length) != 2); 8716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8717 header.work_area) != 6); 8718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8719 request_id) != 8); 8720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8721 function_code) != 10); 8722 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8723 data.report_device_capability.buffer_length) != 44); 8724 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8725 data.report_device_capability.sg_descriptor) != 48); 8726 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8727 data.create_operational_iq.queue_id) != 12); 8728 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8729 data.create_operational_iq.element_array_addr) != 16); 8730 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8731 data.create_operational_iq.ci_addr) != 24); 8732 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8733 data.create_operational_iq.num_elements) != 32); 8734 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8735 data.create_operational_iq.element_length) != 34); 8736 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8737 data.create_operational_iq.queue_protocol) != 36); 8738 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8739 data.create_operational_oq.queue_id) != 12); 8740 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8741 data.create_operational_oq.element_array_addr) != 16); 8742 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8743 data.create_operational_oq.pi_addr) != 24); 8744 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8745 data.create_operational_oq.num_elements) != 32); 8746 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8747 data.create_operational_oq.element_length) != 34); 8748 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8749 data.create_operational_oq.queue_protocol) != 36); 8750 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8751 data.create_operational_oq.int_msg_num) != 40); 8752 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8753 data.create_operational_oq.coalescing_count) != 42); 8754 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8755 data.create_operational_oq.min_coalescing_time) != 44); 8756 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8757 data.create_operational_oq.max_coalescing_time) != 48); 8758 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8759 data.delete_operational_queue.queue_id) != 12); 8760 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 8761 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8762 data.create_operational_iq) != 64 - 11); 8763 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8764 data.create_operational_oq) != 64 - 11); 8765 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8766 data.delete_operational_queue) != 64 - 11); 8767 8768 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8769 header.iu_type) != 0); 8770 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8771 header.iu_length) != 2); 8772 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8773 header.work_area) != 6); 8774 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8775 request_id) != 8); 8776 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8777 function_code) != 10); 8778 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8779 status) != 11); 8780 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8781 data.create_operational_iq.status_descriptor) != 12); 8782 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8783 data.create_operational_iq.iq_pi_offset) != 16); 8784 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8785 data.create_operational_oq.status_descriptor) != 12); 8786 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8787 data.create_operational_oq.oq_ci_offset) != 16); 8788 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 8789 8790 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8791 header.iu_type) != 0); 8792 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8793 header.iu_length) != 2); 8794 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8795 header.response_queue_id) != 4); 8796 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8797 header.work_area) != 6); 8798 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8799 request_id) != 8); 8800 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8801 nexus_id) != 10); 8802 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8803 buffer_length) != 12); 8804 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8805 lun_number) != 16); 8806 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8807 protocol_specific) != 24); 8808 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8809 error_index) != 27); 8810 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8811 cdb) != 32); 8812 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8813 timeout) != 60); 8814 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8815 sg_descriptors) != 64); 8816 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 8817 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8818 8819 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8820 header.iu_type) != 0); 8821 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8822 header.iu_length) != 2); 8823 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8824 header.response_queue_id) != 4); 8825 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8826 header.work_area) != 6); 8827 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8828 request_id) != 8); 8829 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8830 nexus_id) != 12); 8831 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8832 buffer_length) != 16); 8833 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8834 data_encryption_key_index) != 22); 8835 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8836 encrypt_tweak_lower) != 24); 8837 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8838 encrypt_tweak_upper) != 28); 8839 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8840 cdb) != 32); 8841 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8842 error_index) != 48); 8843 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8844 num_sg_descriptors) != 50); 8845 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8846 cdb_length) != 51); 8847 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8848 lun_number) != 52); 8849 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8850 sg_descriptors) != 64); 8851 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 8852 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8853 8854 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8855 header.iu_type) != 0); 8856 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8857 header.iu_length) != 2); 8858 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8859 request_id) != 8); 8860 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8861 error_index) != 10); 8862 8863 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8864 header.iu_type) != 0); 8865 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8866 header.iu_length) != 2); 8867 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8868 header.response_queue_id) != 4); 8869 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8870 request_id) != 8); 8871 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8872 data.report_event_configuration.buffer_length) != 12); 8873 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8874 data.report_event_configuration.sg_descriptors) != 16); 8875 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8876 data.set_event_configuration.global_event_oq_id) != 10); 8877 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8878 data.set_event_configuration.buffer_length) != 12); 8879 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8880 data.set_event_configuration.sg_descriptors) != 16); 8881 8882 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8883 max_inbound_iu_length) != 6); 8884 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8885 max_outbound_iu_length) != 14); 8886 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 8887 8888 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8889 data_length) != 0); 8890 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8891 iq_arbitration_priority_support_bitmask) != 8); 8892 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8893 maximum_aw_a) != 9); 8894 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8895 maximum_aw_b) != 10); 8896 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8897 maximum_aw_c) != 11); 8898 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8899 max_inbound_queues) != 16); 8900 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8901 max_elements_per_iq) != 18); 8902 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8903 max_iq_element_length) != 24); 8904 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8905 min_iq_element_length) != 26); 8906 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8907 max_outbound_queues) != 30); 8908 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8909 max_elements_per_oq) != 32); 8910 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8911 intr_coalescing_time_granularity) != 34); 8912 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8913 max_oq_element_length) != 36); 8914 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8915 min_oq_element_length) != 38); 8916 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8917 iu_layer_descriptors) != 64); 8918 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 8919 8920 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8921 event_type) != 0); 8922 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8923 oq_id) != 2); 8924 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 8925 8926 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8927 num_event_descriptors) != 2); 8928 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8929 descriptors) != 4); 8930 8931 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 8932 ARRAY_SIZE(pqi_supported_event_types)); 8933 8934 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8935 header.iu_type) != 0); 8936 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8937 header.iu_length) != 2); 8938 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8939 event_type) != 8); 8940 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8941 event_id) != 10); 8942 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8943 additional_event_id) != 12); 8944 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8945 data) != 16); 8946 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 8947 8948 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8949 header.iu_type) != 0); 8950 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8951 header.iu_length) != 2); 8952 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8953 event_type) != 8); 8954 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8955 event_id) != 10); 8956 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8957 additional_event_id) != 12); 8958 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 8959 8960 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8961 header.iu_type) != 0); 8962 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8963 header.iu_length) != 2); 8964 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8965 request_id) != 8); 8966 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8967 nexus_id) != 10); 8968 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8969 timeout) != 14); 8970 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8971 lun_number) != 16); 8972 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8973 protocol_specific) != 24); 8974 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8975 outbound_queue_id_to_manage) != 26); 8976 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8977 request_id_to_manage) != 28); 8978 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8979 task_management_function) != 30); 8980 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 8981 8982 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8983 header.iu_type) != 0); 8984 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8985 header.iu_length) != 2); 8986 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8987 request_id) != 8); 8988 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8989 nexus_id) != 10); 8990 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8991 additional_response_info) != 12); 8992 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8993 response_code) != 15); 8994 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 8995 8996 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8997 configured_logical_drive_count) != 0); 8998 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8999 configuration_signature) != 1); 9000 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 9001 firmware_version) != 5); 9002 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 9003 extended_logical_unit_count) != 154); 9004 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 9005 firmware_build_number) != 190); 9006 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 9007 controller_mode) != 292); 9008 9009 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9010 phys_bay_in_box) != 115); 9011 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9012 device_type) != 120); 9013 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9014 redundant_path_present_map) != 1736); 9015 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9016 active_path_number) != 1738); 9017 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9018 alternate_paths_phys_connector) != 1739); 9019 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9020 alternate_paths_phys_box_on_port) != 1755); 9021 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 9022 current_queue_depth_limit) != 1796); 9023 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 9024 9025 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 9026 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 9027 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 9028 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 9029 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 9030 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 9031 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 9032 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 9033 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 9034 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 9035 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 9036 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 9037 9038 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 9039 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 9040 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 9041 } 9042