1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microsemi PQI-based storage controllers 4 * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/blk-mq-pci.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_eh.h> 27 #include <scsi/scsi_transport_sas.h> 28 #include <asm/unaligned.h> 29 #include "smartpqi.h" 30 #include "smartpqi_sis.h" 31 32 #if !defined(BUILD_TIMESTAMP) 33 #define BUILD_TIMESTAMP 34 #endif 35 36 #define DRIVER_VERSION "1.2.10-025" 37 #define DRIVER_MAJOR 1 38 #define DRIVER_MINOR 2 39 #define DRIVER_RELEASE 10 40 #define DRIVER_REVISION 25 41 42 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 43 DRIVER_VERSION BUILD_TIMESTAMP ")" 44 #define DRIVER_NAME_SHORT "smartpqi" 45 46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 47 48 MODULE_AUTHOR("Microsemi"); 49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 50 DRIVER_VERSION); 51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 52 MODULE_VERSION(DRIVER_VERSION); 53 MODULE_LICENSE("GPL"); 54 55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 56 static void pqi_ctrl_offline_worker(struct work_struct *work); 57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 59 static void pqi_scan_start(struct Scsi_Host *shost); 60 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 61 struct pqi_queue_group *queue_group, enum pqi_io_path path, 62 struct pqi_io_request *io_request); 63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 64 struct pqi_iu_header *request, unsigned int flags, 65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 66 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 68 unsigned int cdb_length, struct pqi_queue_group *queue_group, 69 struct pqi_encryption_info *encryption_info, bool raid_bypass); 70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 74 u32 bytes_requested); 75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 78 struct pqi_scsi_dev *device, unsigned long timeout_secs); 79 80 /* for flags argument to pqi_submit_raid_request_synchronous() */ 81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 82 83 static struct scsi_transport_template *pqi_sas_transport_template; 84 85 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 86 87 enum pqi_lockup_action { 88 NONE, 89 REBOOT, 90 PANIC 91 }; 92 93 static enum pqi_lockup_action pqi_lockup_action = NONE; 94 95 static struct { 96 enum pqi_lockup_action action; 97 char *name; 98 } pqi_lockup_actions[] = { 99 { 100 .action = NONE, 101 .name = "none", 102 }, 103 { 104 .action = REBOOT, 105 .name = "reboot", 106 }, 107 { 108 .action = PANIC, 109 .name = "panic", 110 }, 111 }; 112 113 static unsigned int pqi_supported_event_types[] = { 114 PQI_EVENT_TYPE_HOTPLUG, 115 PQI_EVENT_TYPE_HARDWARE, 116 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 117 PQI_EVENT_TYPE_LOGICAL_DEVICE, 118 PQI_EVENT_TYPE_OFA, 119 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 121 }; 122 123 static int pqi_disable_device_id_wildcards; 124 module_param_named(disable_device_id_wildcards, 125 pqi_disable_device_id_wildcards, int, 0644); 126 MODULE_PARM_DESC(disable_device_id_wildcards, 127 "Disable device ID wildcards."); 128 129 static int pqi_disable_heartbeat; 130 module_param_named(disable_heartbeat, 131 pqi_disable_heartbeat, int, 0644); 132 MODULE_PARM_DESC(disable_heartbeat, 133 "Disable heartbeat."); 134 135 static int pqi_disable_ctrl_shutdown; 136 module_param_named(disable_ctrl_shutdown, 137 pqi_disable_ctrl_shutdown, int, 0644); 138 MODULE_PARM_DESC(disable_ctrl_shutdown, 139 "Disable controller shutdown when controller locked up."); 140 141 static char *pqi_lockup_action_param; 142 module_param_named(lockup_action, 143 pqi_lockup_action_param, charp, 0644); 144 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 145 "\t\tSupported: none, reboot, panic\n" 146 "\t\tDefault: none"); 147 148 static int pqi_expose_ld_first; 149 module_param_named(expose_ld_first, 150 pqi_expose_ld_first, int, 0644); 151 MODULE_PARM_DESC(expose_ld_first, 152 "Expose logical drives before physical drives."); 153 154 static int pqi_hide_vsep; 155 module_param_named(hide_vsep, 156 pqi_hide_vsep, int, 0644); 157 MODULE_PARM_DESC(hide_vsep, 158 "Hide the virtual SEP for direct attached drives."); 159 160 static char *raid_levels[] = { 161 "RAID-0", 162 "RAID-4", 163 "RAID-1(1+0)", 164 "RAID-5", 165 "RAID-5+1", 166 "RAID-ADG", 167 "RAID-1(ADM)", 168 }; 169 170 static char *pqi_raid_level_to_string(u8 raid_level) 171 { 172 if (raid_level < ARRAY_SIZE(raid_levels)) 173 return raid_levels[raid_level]; 174 175 return "RAID UNKNOWN"; 176 } 177 178 #define SA_RAID_0 0 179 #define SA_RAID_4 1 180 #define SA_RAID_1 2 /* also used for RAID 10 */ 181 #define SA_RAID_5 3 /* also used for RAID 50 */ 182 #define SA_RAID_51 4 183 #define SA_RAID_6 5 /* also used for RAID 60 */ 184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 185 #define SA_RAID_MAX SA_RAID_ADM 186 #define SA_RAID_UNKNOWN 0xff 187 188 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 189 { 190 pqi_prep_for_scsi_done(scmd); 191 scmd->scsi_done(scmd); 192 } 193 194 static inline void pqi_disable_write_same(struct scsi_device *sdev) 195 { 196 sdev->no_write_same = 1; 197 } 198 199 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 200 { 201 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 202 } 203 204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 205 { 206 return !device->is_physical_device; 207 } 208 209 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 210 { 211 return scsi3addr[2] != 0; 212 } 213 214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 215 { 216 return !ctrl_info->controller_online; 217 } 218 219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 220 { 221 if (ctrl_info->controller_online) 222 if (!sis_is_firmware_running(ctrl_info)) 223 pqi_take_ctrl_offline(ctrl_info); 224 } 225 226 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 227 { 228 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 229 } 230 231 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 232 struct pqi_ctrl_info *ctrl_info) 233 { 234 return sis_read_driver_scratch(ctrl_info); 235 } 236 237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 238 enum pqi_ctrl_mode mode) 239 { 240 sis_write_driver_scratch(ctrl_info, mode); 241 } 242 243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 244 { 245 ctrl_info->block_device_reset = true; 246 } 247 248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) 249 { 250 return ctrl_info->block_device_reset; 251 } 252 253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 254 { 255 return ctrl_info->block_requests; 256 } 257 258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 259 { 260 ctrl_info->block_requests = true; 261 scsi_block_requests(ctrl_info->scsi_host); 262 } 263 264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 265 { 266 ctrl_info->block_requests = false; 267 wake_up_all(&ctrl_info->block_requests_wait); 268 pqi_retry_raid_bypass_requests(ctrl_info); 269 scsi_unblock_requests(ctrl_info->scsi_host); 270 } 271 272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 273 unsigned long timeout_msecs) 274 { 275 unsigned long remaining_msecs; 276 277 if (!pqi_ctrl_blocked(ctrl_info)) 278 return timeout_msecs; 279 280 atomic_inc(&ctrl_info->num_blocked_threads); 281 282 if (timeout_msecs == NO_TIMEOUT) { 283 wait_event(ctrl_info->block_requests_wait, 284 !pqi_ctrl_blocked(ctrl_info)); 285 remaining_msecs = timeout_msecs; 286 } else { 287 unsigned long remaining_jiffies; 288 289 remaining_jiffies = 290 wait_event_timeout(ctrl_info->block_requests_wait, 291 !pqi_ctrl_blocked(ctrl_info), 292 msecs_to_jiffies(timeout_msecs)); 293 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 294 } 295 296 atomic_dec(&ctrl_info->num_blocked_threads); 297 298 return remaining_msecs; 299 } 300 301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 302 { 303 while (atomic_read(&ctrl_info->num_busy_threads) > 304 atomic_read(&ctrl_info->num_blocked_threads)) 305 usleep_range(1000, 2000); 306 } 307 308 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 309 { 310 return device->device_offline; 311 } 312 313 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 314 { 315 device->in_reset = true; 316 } 317 318 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 319 { 320 device->in_reset = false; 321 } 322 323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 324 { 325 return device->in_reset; 326 } 327 328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 329 { 330 ctrl_info->in_ofa = true; 331 } 332 333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 334 { 335 ctrl_info->in_ofa = false; 336 } 337 338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) 339 { 340 return ctrl_info->in_ofa; 341 } 342 343 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 344 { 345 device->in_remove = true; 346 } 347 348 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 349 struct pqi_scsi_dev *device) 350 { 351 return device->in_remove && !ctrl_info->in_shutdown; 352 } 353 354 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) 355 { 356 ctrl_info->in_shutdown = true; 357 } 358 359 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) 360 { 361 return ctrl_info->in_shutdown; 362 } 363 364 static inline void pqi_schedule_rescan_worker_with_delay( 365 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 366 { 367 if (pqi_ctrl_offline(ctrl_info)) 368 return; 369 if (pqi_ctrl_in_ofa(ctrl_info)) 370 return; 371 372 schedule_delayed_work(&ctrl_info->rescan_work, delay); 373 } 374 375 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 376 { 377 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 378 } 379 380 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) 381 382 static inline void pqi_schedule_rescan_worker_delayed( 383 struct pqi_ctrl_info *ctrl_info) 384 { 385 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 386 } 387 388 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 389 { 390 cancel_delayed_work_sync(&ctrl_info->rescan_work); 391 } 392 393 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) 394 { 395 cancel_work_sync(&ctrl_info->event_work); 396 } 397 398 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 399 { 400 if (!ctrl_info->heartbeat_counter) 401 return 0; 402 403 return readl(ctrl_info->heartbeat_counter); 404 } 405 406 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 407 { 408 if (!ctrl_info->soft_reset_status) 409 return 0; 410 411 return readb(ctrl_info->soft_reset_status); 412 } 413 414 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, 415 u8 clear) 416 { 417 u8 status; 418 419 if (!ctrl_info->soft_reset_status) 420 return; 421 422 status = pqi_read_soft_reset_status(ctrl_info); 423 status &= ~clear; 424 writeb(status, ctrl_info->soft_reset_status); 425 } 426 427 static int pqi_map_single(struct pci_dev *pci_dev, 428 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 429 size_t buffer_length, enum dma_data_direction data_direction) 430 { 431 dma_addr_t bus_address; 432 433 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 434 return 0; 435 436 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 437 data_direction); 438 if (dma_mapping_error(&pci_dev->dev, bus_address)) 439 return -ENOMEM; 440 441 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 442 put_unaligned_le32(buffer_length, &sg_descriptor->length); 443 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 444 445 return 0; 446 } 447 448 static void pqi_pci_unmap(struct pci_dev *pci_dev, 449 struct pqi_sg_descriptor *descriptors, int num_descriptors, 450 enum dma_data_direction data_direction) 451 { 452 int i; 453 454 if (data_direction == DMA_NONE) 455 return; 456 457 for (i = 0; i < num_descriptors; i++) 458 dma_unmap_single(&pci_dev->dev, 459 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 460 get_unaligned_le32(&descriptors[i].length), 461 data_direction); 462 } 463 464 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 465 struct pqi_raid_path_request *request, u8 cmd, 466 u8 *scsi3addr, void *buffer, size_t buffer_length, 467 u16 vpd_page, enum dma_data_direction *dir) 468 { 469 u8 *cdb; 470 size_t cdb_length = buffer_length; 471 472 memset(request, 0, sizeof(*request)); 473 474 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 475 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 476 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 477 &request->header.iu_length); 478 put_unaligned_le32(buffer_length, &request->buffer_length); 479 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 480 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 481 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 482 483 cdb = request->cdb; 484 485 switch (cmd) { 486 case INQUIRY: 487 request->data_direction = SOP_READ_FLAG; 488 cdb[0] = INQUIRY; 489 if (vpd_page & VPD_PAGE) { 490 cdb[1] = 0x1; 491 cdb[2] = (u8)vpd_page; 492 } 493 cdb[4] = (u8)cdb_length; 494 break; 495 case CISS_REPORT_LOG: 496 case CISS_REPORT_PHYS: 497 request->data_direction = SOP_READ_FLAG; 498 cdb[0] = cmd; 499 if (cmd == CISS_REPORT_PHYS) 500 cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; 501 else 502 cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 503 put_unaligned_be32(cdb_length, &cdb[6]); 504 break; 505 case CISS_GET_RAID_MAP: 506 request->data_direction = SOP_READ_FLAG; 507 cdb[0] = CISS_READ; 508 cdb[1] = CISS_GET_RAID_MAP; 509 put_unaligned_be32(cdb_length, &cdb[6]); 510 break; 511 case SA_FLUSH_CACHE: 512 request->data_direction = SOP_WRITE_FLAG; 513 cdb[0] = BMIC_WRITE; 514 cdb[6] = BMIC_FLUSH_CACHE; 515 put_unaligned_be16(cdb_length, &cdb[7]); 516 break; 517 case BMIC_SENSE_DIAG_OPTIONS: 518 cdb_length = 0; 519 /* fall through */ 520 case BMIC_IDENTIFY_CONTROLLER: 521 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 522 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 523 request->data_direction = SOP_READ_FLAG; 524 cdb[0] = BMIC_READ; 525 cdb[6] = cmd; 526 put_unaligned_be16(cdb_length, &cdb[7]); 527 break; 528 case BMIC_SET_DIAG_OPTIONS: 529 cdb_length = 0; 530 /* fall through */ 531 case BMIC_WRITE_HOST_WELLNESS: 532 request->data_direction = SOP_WRITE_FLAG; 533 cdb[0] = BMIC_WRITE; 534 cdb[6] = cmd; 535 put_unaligned_be16(cdb_length, &cdb[7]); 536 break; 537 case BMIC_CSMI_PASSTHRU: 538 request->data_direction = SOP_BIDIRECTIONAL; 539 cdb[0] = BMIC_WRITE; 540 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 541 cdb[6] = cmd; 542 put_unaligned_be16(cdb_length, &cdb[7]); 543 break; 544 default: 545 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 546 cmd); 547 break; 548 } 549 550 switch (request->data_direction) { 551 case SOP_READ_FLAG: 552 *dir = DMA_FROM_DEVICE; 553 break; 554 case SOP_WRITE_FLAG: 555 *dir = DMA_TO_DEVICE; 556 break; 557 case SOP_NO_DIRECTION_FLAG: 558 *dir = DMA_NONE; 559 break; 560 default: 561 *dir = DMA_BIDIRECTIONAL; 562 break; 563 } 564 565 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 566 buffer, buffer_length, *dir); 567 } 568 569 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 570 { 571 io_request->scmd = NULL; 572 io_request->status = 0; 573 io_request->error_info = NULL; 574 io_request->raid_bypass = false; 575 } 576 577 static struct pqi_io_request *pqi_alloc_io_request( 578 struct pqi_ctrl_info *ctrl_info) 579 { 580 struct pqi_io_request *io_request; 581 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 582 583 while (1) { 584 io_request = &ctrl_info->io_request_pool[i]; 585 if (atomic_inc_return(&io_request->refcount) == 1) 586 break; 587 atomic_dec(&io_request->refcount); 588 i = (i + 1) % ctrl_info->max_io_slots; 589 } 590 591 /* benignly racy */ 592 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 593 594 pqi_reinit_io_request(io_request); 595 596 return io_request; 597 } 598 599 static void pqi_free_io_request(struct pqi_io_request *io_request) 600 { 601 atomic_dec(&io_request->refcount); 602 } 603 604 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 605 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 606 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 607 { 608 int rc; 609 struct pqi_raid_path_request request; 610 enum dma_data_direction dir; 611 612 rc = pqi_build_raid_path_request(ctrl_info, &request, 613 cmd, scsi3addr, buffer, 614 buffer_length, vpd_page, &dir); 615 if (rc) 616 return rc; 617 618 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 619 error_info, timeout_msecs); 620 621 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 622 623 return rc; 624 } 625 626 /* helper functions for pqi_send_scsi_raid_request */ 627 628 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 629 u8 cmd, void *buffer, size_t buffer_length) 630 { 631 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 632 buffer, buffer_length, 0, NULL, NO_TIMEOUT); 633 } 634 635 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 636 u8 cmd, void *buffer, size_t buffer_length, 637 struct pqi_raid_error_info *error_info) 638 { 639 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 640 buffer, buffer_length, 0, error_info, NO_TIMEOUT); 641 } 642 643 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 644 struct bmic_identify_controller *buffer) 645 { 646 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 647 buffer, sizeof(*buffer)); 648 } 649 650 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 651 struct bmic_sense_subsystem_info *sense_info) 652 { 653 return pqi_send_ctrl_raid_request(ctrl_info, 654 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 655 sizeof(*sense_info)); 656 } 657 658 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 659 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 660 { 661 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 662 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); 663 } 664 665 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 666 struct pqi_scsi_dev *device, 667 struct bmic_identify_physical_device *buffer, size_t buffer_length) 668 { 669 int rc; 670 enum dma_data_direction dir; 671 u16 bmic_device_index; 672 struct pqi_raid_path_request request; 673 674 rc = pqi_build_raid_path_request(ctrl_info, &request, 675 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 676 buffer_length, 0, &dir); 677 if (rc) 678 return rc; 679 680 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 681 request.cdb[2] = (u8)bmic_device_index; 682 request.cdb[9] = (u8)(bmic_device_index >> 8); 683 684 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 685 0, NULL, NO_TIMEOUT); 686 687 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 688 689 return rc; 690 } 691 692 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 693 enum bmic_flush_cache_shutdown_event shutdown_event) 694 { 695 int rc; 696 struct bmic_flush_cache *flush_cache; 697 698 /* 699 * Don't bother trying to flush the cache if the controller is 700 * locked up. 701 */ 702 if (pqi_ctrl_offline(ctrl_info)) 703 return -ENXIO; 704 705 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 706 if (!flush_cache) 707 return -ENOMEM; 708 709 flush_cache->shutdown_event = shutdown_event; 710 711 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 712 sizeof(*flush_cache)); 713 714 kfree(flush_cache); 715 716 return rc; 717 } 718 719 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 720 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 721 struct pqi_raid_error_info *error_info) 722 { 723 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 724 buffer, buffer_length, error_info); 725 } 726 727 #define PQI_FETCH_PTRAID_DATA (1 << 31) 728 729 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 730 { 731 int rc; 732 struct bmic_diag_options *diag; 733 734 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 735 if (!diag) 736 return -ENOMEM; 737 738 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 739 diag, sizeof(*diag)); 740 if (rc) 741 goto out; 742 743 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 744 745 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 746 sizeof(*diag)); 747 748 out: 749 kfree(diag); 750 751 return rc; 752 } 753 754 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 755 void *buffer, size_t buffer_length) 756 { 757 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 758 buffer, buffer_length); 759 } 760 761 #pragma pack(1) 762 763 struct bmic_host_wellness_driver_version { 764 u8 start_tag[4]; 765 u8 driver_version_tag[2]; 766 __le16 driver_version_length; 767 char driver_version[32]; 768 u8 dont_write_tag[2]; 769 u8 end_tag[2]; 770 }; 771 772 #pragma pack() 773 774 static int pqi_write_driver_version_to_host_wellness( 775 struct pqi_ctrl_info *ctrl_info) 776 { 777 int rc; 778 struct bmic_host_wellness_driver_version *buffer; 779 size_t buffer_length; 780 781 buffer_length = sizeof(*buffer); 782 783 buffer = kmalloc(buffer_length, GFP_KERNEL); 784 if (!buffer) 785 return -ENOMEM; 786 787 buffer->start_tag[0] = '<'; 788 buffer->start_tag[1] = 'H'; 789 buffer->start_tag[2] = 'W'; 790 buffer->start_tag[3] = '>'; 791 buffer->driver_version_tag[0] = 'D'; 792 buffer->driver_version_tag[1] = 'V'; 793 put_unaligned_le16(sizeof(buffer->driver_version), 794 &buffer->driver_version_length); 795 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 796 sizeof(buffer->driver_version) - 1); 797 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 798 buffer->dont_write_tag[0] = 'D'; 799 buffer->dont_write_tag[1] = 'W'; 800 buffer->end_tag[0] = 'Z'; 801 buffer->end_tag[1] = 'Z'; 802 803 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 804 805 kfree(buffer); 806 807 return rc; 808 } 809 810 #pragma pack(1) 811 812 struct bmic_host_wellness_time { 813 u8 start_tag[4]; 814 u8 time_tag[2]; 815 __le16 time_length; 816 u8 time[8]; 817 u8 dont_write_tag[2]; 818 u8 end_tag[2]; 819 }; 820 821 #pragma pack() 822 823 static int pqi_write_current_time_to_host_wellness( 824 struct pqi_ctrl_info *ctrl_info) 825 { 826 int rc; 827 struct bmic_host_wellness_time *buffer; 828 size_t buffer_length; 829 time64_t local_time; 830 unsigned int year; 831 struct tm tm; 832 833 buffer_length = sizeof(*buffer); 834 835 buffer = kmalloc(buffer_length, GFP_KERNEL); 836 if (!buffer) 837 return -ENOMEM; 838 839 buffer->start_tag[0] = '<'; 840 buffer->start_tag[1] = 'H'; 841 buffer->start_tag[2] = 'W'; 842 buffer->start_tag[3] = '>'; 843 buffer->time_tag[0] = 'T'; 844 buffer->time_tag[1] = 'D'; 845 put_unaligned_le16(sizeof(buffer->time), 846 &buffer->time_length); 847 848 local_time = ktime_get_real_seconds(); 849 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 850 year = tm.tm_year + 1900; 851 852 buffer->time[0] = bin2bcd(tm.tm_hour); 853 buffer->time[1] = bin2bcd(tm.tm_min); 854 buffer->time[2] = bin2bcd(tm.tm_sec); 855 buffer->time[3] = 0; 856 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 857 buffer->time[5] = bin2bcd(tm.tm_mday); 858 buffer->time[6] = bin2bcd(year / 100); 859 buffer->time[7] = bin2bcd(year % 100); 860 861 buffer->dont_write_tag[0] = 'D'; 862 buffer->dont_write_tag[1] = 'W'; 863 buffer->end_tag[0] = 'Z'; 864 buffer->end_tag[1] = 'Z'; 865 866 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 867 868 kfree(buffer); 869 870 return rc; 871 } 872 873 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) 874 875 static void pqi_update_time_worker(struct work_struct *work) 876 { 877 int rc; 878 struct pqi_ctrl_info *ctrl_info; 879 880 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 881 update_time_work); 882 883 if (pqi_ctrl_offline(ctrl_info)) 884 return; 885 886 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 887 if (rc) 888 dev_warn(&ctrl_info->pci_dev->dev, 889 "error updating time on controller\n"); 890 891 schedule_delayed_work(&ctrl_info->update_time_work, 892 PQI_UPDATE_TIME_WORK_INTERVAL); 893 } 894 895 static inline void pqi_schedule_update_time_worker( 896 struct pqi_ctrl_info *ctrl_info) 897 { 898 schedule_delayed_work(&ctrl_info->update_time_work, 0); 899 } 900 901 static inline void pqi_cancel_update_time_worker( 902 struct pqi_ctrl_info *ctrl_info) 903 { 904 cancel_delayed_work_sync(&ctrl_info->update_time_work); 905 } 906 907 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 908 void *buffer, size_t buffer_length) 909 { 910 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, 911 buffer_length); 912 } 913 914 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 915 void **buffer) 916 { 917 int rc; 918 size_t lun_list_length; 919 size_t lun_data_length; 920 size_t new_lun_list_length; 921 void *lun_data = NULL; 922 struct report_lun_header *report_lun_header; 923 924 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 925 if (!report_lun_header) { 926 rc = -ENOMEM; 927 goto out; 928 } 929 930 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 931 sizeof(*report_lun_header)); 932 if (rc) 933 goto out; 934 935 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 936 937 again: 938 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 939 940 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 941 if (!lun_data) { 942 rc = -ENOMEM; 943 goto out; 944 } 945 946 if (lun_list_length == 0) { 947 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 948 goto out; 949 } 950 951 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 952 if (rc) 953 goto out; 954 955 new_lun_list_length = get_unaligned_be32( 956 &((struct report_lun_header *)lun_data)->list_length); 957 958 if (new_lun_list_length > lun_list_length) { 959 lun_list_length = new_lun_list_length; 960 kfree(lun_data); 961 goto again; 962 } 963 964 out: 965 kfree(report_lun_header); 966 967 if (rc) { 968 kfree(lun_data); 969 lun_data = NULL; 970 } 971 972 *buffer = lun_data; 973 974 return rc; 975 } 976 977 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 978 void **buffer) 979 { 980 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 981 buffer); 982 } 983 984 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 985 void **buffer) 986 { 987 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 988 } 989 990 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 991 struct report_phys_lun_extended **physdev_list, 992 struct report_log_lun_extended **logdev_list) 993 { 994 int rc; 995 size_t logdev_list_length; 996 size_t logdev_data_length; 997 struct report_log_lun_extended *internal_logdev_list; 998 struct report_log_lun_extended *logdev_data; 999 struct report_lun_header report_lun_header; 1000 1001 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1002 if (rc) 1003 dev_err(&ctrl_info->pci_dev->dev, 1004 "report physical LUNs failed\n"); 1005 1006 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1007 if (rc) 1008 dev_err(&ctrl_info->pci_dev->dev, 1009 "report logical LUNs failed\n"); 1010 1011 /* 1012 * Tack the controller itself onto the end of the logical device list. 1013 */ 1014 1015 logdev_data = *logdev_list; 1016 1017 if (logdev_data) { 1018 logdev_list_length = 1019 get_unaligned_be32(&logdev_data->header.list_length); 1020 } else { 1021 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1022 logdev_data = 1023 (struct report_log_lun_extended *)&report_lun_header; 1024 logdev_list_length = 0; 1025 } 1026 1027 logdev_data_length = sizeof(struct report_lun_header) + 1028 logdev_list_length; 1029 1030 internal_logdev_list = kmalloc(logdev_data_length + 1031 sizeof(struct report_log_lun_extended), GFP_KERNEL); 1032 if (!internal_logdev_list) { 1033 kfree(*logdev_list); 1034 *logdev_list = NULL; 1035 return -ENOMEM; 1036 } 1037 1038 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1039 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1040 sizeof(struct report_log_lun_extended_entry)); 1041 put_unaligned_be32(logdev_list_length + 1042 sizeof(struct report_log_lun_extended_entry), 1043 &internal_logdev_list->header.list_length); 1044 1045 kfree(*logdev_list); 1046 *logdev_list = internal_logdev_list; 1047 1048 return 0; 1049 } 1050 1051 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1052 int bus, int target, int lun) 1053 { 1054 device->bus = bus; 1055 device->target = target; 1056 device->lun = lun; 1057 } 1058 1059 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1060 { 1061 u8 *scsi3addr; 1062 u32 lunid; 1063 int bus; 1064 int target; 1065 int lun; 1066 1067 scsi3addr = device->scsi3addr; 1068 lunid = get_unaligned_le32(scsi3addr); 1069 1070 if (pqi_is_hba_lunid(scsi3addr)) { 1071 /* The specified device is the controller. */ 1072 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1073 device->target_lun_valid = true; 1074 return; 1075 } 1076 1077 if (pqi_is_logical_device(device)) { 1078 if (device->is_external_raid_device) { 1079 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1080 target = (lunid >> 16) & 0x3fff; 1081 lun = lunid & 0xff; 1082 } else { 1083 bus = PQI_RAID_VOLUME_BUS; 1084 target = 0; 1085 lun = lunid & 0x3fff; 1086 } 1087 pqi_set_bus_target_lun(device, bus, target, lun); 1088 device->target_lun_valid = true; 1089 return; 1090 } 1091 1092 /* 1093 * Defer target and LUN assignment for non-controller physical devices 1094 * because the SAS transport layer will make these assignments later. 1095 */ 1096 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1097 } 1098 1099 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1100 struct pqi_scsi_dev *device) 1101 { 1102 int rc; 1103 u8 raid_level; 1104 u8 *buffer; 1105 1106 raid_level = SA_RAID_UNKNOWN; 1107 1108 buffer = kmalloc(64, GFP_KERNEL); 1109 if (buffer) { 1110 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1111 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1112 if (rc == 0) { 1113 raid_level = buffer[8]; 1114 if (raid_level > SA_RAID_MAX) 1115 raid_level = SA_RAID_UNKNOWN; 1116 } 1117 kfree(buffer); 1118 } 1119 1120 device->raid_level = raid_level; 1121 } 1122 1123 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1124 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1125 { 1126 char *err_msg; 1127 u32 raid_map_size; 1128 u32 r5or6_blocks_per_row; 1129 1130 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1131 1132 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1133 err_msg = "RAID map too small"; 1134 goto bad_raid_map; 1135 } 1136 1137 if (device->raid_level == SA_RAID_1) { 1138 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1139 err_msg = "invalid RAID-1 map"; 1140 goto bad_raid_map; 1141 } 1142 } else if (device->raid_level == SA_RAID_ADM) { 1143 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1144 err_msg = "invalid RAID-1(ADM) map"; 1145 goto bad_raid_map; 1146 } 1147 } else if ((device->raid_level == SA_RAID_5 || 1148 device->raid_level == SA_RAID_6) && 1149 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1150 /* RAID 50/60 */ 1151 r5or6_blocks_per_row = 1152 get_unaligned_le16(&raid_map->strip_size) * 1153 get_unaligned_le16(&raid_map->data_disks_per_row); 1154 if (r5or6_blocks_per_row == 0) { 1155 err_msg = "invalid RAID-5 or RAID-6 map"; 1156 goto bad_raid_map; 1157 } 1158 } 1159 1160 return 0; 1161 1162 bad_raid_map: 1163 dev_warn(&ctrl_info->pci_dev->dev, 1164 "logical device %08x%08x %s\n", 1165 *((u32 *)&device->scsi3addr), 1166 *((u32 *)&device->scsi3addr[4]), err_msg); 1167 1168 return -EINVAL; 1169 } 1170 1171 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1172 struct pqi_scsi_dev *device) 1173 { 1174 int rc; 1175 u32 raid_map_size; 1176 struct raid_map *raid_map; 1177 1178 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1179 if (!raid_map) 1180 return -ENOMEM; 1181 1182 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1183 device->scsi3addr, raid_map, sizeof(*raid_map), 1184 0, NULL, NO_TIMEOUT); 1185 1186 if (rc) 1187 goto error; 1188 1189 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1190 1191 if (raid_map_size > sizeof(*raid_map)) { 1192 1193 kfree(raid_map); 1194 1195 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1196 if (!raid_map) 1197 return -ENOMEM; 1198 1199 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1200 device->scsi3addr, raid_map, raid_map_size, 1201 0, NULL, NO_TIMEOUT); 1202 if (rc) 1203 goto error; 1204 1205 if (get_unaligned_le32(&raid_map->structure_size) 1206 != raid_map_size) { 1207 dev_warn(&ctrl_info->pci_dev->dev, 1208 "Requested %d bytes, received %d bytes", 1209 raid_map_size, 1210 get_unaligned_le32(&raid_map->structure_size)); 1211 goto error; 1212 } 1213 } 1214 1215 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1216 if (rc) 1217 goto error; 1218 1219 device->raid_map = raid_map; 1220 1221 return 0; 1222 1223 error: 1224 kfree(raid_map); 1225 1226 return rc; 1227 } 1228 1229 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1230 struct pqi_scsi_dev *device) 1231 { 1232 int rc; 1233 u8 *buffer; 1234 u8 bypass_status; 1235 1236 buffer = kmalloc(64, GFP_KERNEL); 1237 if (!buffer) 1238 return; 1239 1240 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1241 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1242 if (rc) 1243 goto out; 1244 1245 #define RAID_BYPASS_STATUS 4 1246 #define RAID_BYPASS_CONFIGURED 0x1 1247 #define RAID_BYPASS_ENABLED 0x2 1248 1249 bypass_status = buffer[RAID_BYPASS_STATUS]; 1250 device->raid_bypass_configured = 1251 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1252 if (device->raid_bypass_configured && 1253 (bypass_status & RAID_BYPASS_ENABLED) && 1254 pqi_get_raid_map(ctrl_info, device) == 0) 1255 device->raid_bypass_enabled = true; 1256 1257 out: 1258 kfree(buffer); 1259 } 1260 1261 /* 1262 * Use vendor-specific VPD to determine online/offline status of a volume. 1263 */ 1264 1265 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1266 struct pqi_scsi_dev *device) 1267 { 1268 int rc; 1269 size_t page_length; 1270 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1271 bool volume_offline = true; 1272 u32 volume_flags; 1273 struct ciss_vpd_logical_volume_status *vpd; 1274 1275 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1276 if (!vpd) 1277 goto no_buffer; 1278 1279 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1280 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1281 if (rc) 1282 goto out; 1283 1284 if (vpd->page_code != CISS_VPD_LV_STATUS) 1285 goto out; 1286 1287 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1288 volume_status) + vpd->page_length; 1289 if (page_length < sizeof(*vpd)) 1290 goto out; 1291 1292 volume_status = vpd->volume_status; 1293 volume_flags = get_unaligned_be32(&vpd->flags); 1294 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1295 1296 out: 1297 kfree(vpd); 1298 no_buffer: 1299 device->volume_status = volume_status; 1300 device->volume_offline = volume_offline; 1301 } 1302 1303 #define PQI_INQUIRY_PAGE0_RETRIES 3 1304 1305 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1306 struct pqi_scsi_dev *device) 1307 { 1308 int rc; 1309 u8 *buffer; 1310 unsigned int retries; 1311 1312 if (device->is_expander_smp_device) 1313 return 0; 1314 1315 buffer = kmalloc(64, GFP_KERNEL); 1316 if (!buffer) 1317 return -ENOMEM; 1318 1319 /* Send an inquiry to the device to see what it is. */ 1320 for (retries = 0;;) { 1321 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, 1322 buffer, 64); 1323 if (rc == 0) 1324 break; 1325 if (pqi_is_logical_device(device) || 1326 rc != PQI_CMD_STATUS_ABORTED || 1327 ++retries > PQI_INQUIRY_PAGE0_RETRIES) 1328 goto out; 1329 } 1330 1331 scsi_sanitize_inquiry_string(&buffer[8], 8); 1332 scsi_sanitize_inquiry_string(&buffer[16], 16); 1333 1334 device->devtype = buffer[0] & 0x1f; 1335 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1336 memcpy(device->model, &buffer[16], sizeof(device->model)); 1337 1338 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1339 if (device->is_external_raid_device) { 1340 device->raid_level = SA_RAID_UNKNOWN; 1341 device->volume_status = CISS_LV_OK; 1342 device->volume_offline = false; 1343 } else { 1344 pqi_get_raid_level(ctrl_info, device); 1345 pqi_get_raid_bypass_status(ctrl_info, device); 1346 pqi_get_volume_status(ctrl_info, device); 1347 } 1348 } 1349 1350 out: 1351 kfree(buffer); 1352 1353 return rc; 1354 } 1355 1356 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1357 struct pqi_scsi_dev *device, 1358 struct bmic_identify_physical_device *id_phys) 1359 { 1360 int rc; 1361 1362 memset(id_phys, 0, sizeof(*id_phys)); 1363 1364 rc = pqi_identify_physical_device(ctrl_info, device, 1365 id_phys, sizeof(*id_phys)); 1366 if (rc) { 1367 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1368 return; 1369 } 1370 1371 device->box_index = id_phys->box_index; 1372 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1373 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1374 device->queue_depth = 1375 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1376 device->device_type = id_phys->device_type; 1377 device->active_path_index = id_phys->active_path_number; 1378 device->path_map = id_phys->redundant_path_present_map; 1379 memcpy(&device->box, 1380 &id_phys->alternate_paths_phys_box_on_port, 1381 sizeof(device->box)); 1382 memcpy(&device->phys_connector, 1383 &id_phys->alternate_paths_phys_connector, 1384 sizeof(device->phys_connector)); 1385 device->bay = id_phys->phys_bay_in_box; 1386 } 1387 1388 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1389 struct pqi_scsi_dev *device) 1390 { 1391 char *status; 1392 static const char unknown_state_str[] = 1393 "Volume is in an unknown state (%u)"; 1394 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1395 1396 switch (device->volume_status) { 1397 case CISS_LV_OK: 1398 status = "Volume online"; 1399 break; 1400 case CISS_LV_FAILED: 1401 status = "Volume failed"; 1402 break; 1403 case CISS_LV_NOT_CONFIGURED: 1404 status = "Volume not configured"; 1405 break; 1406 case CISS_LV_DEGRADED: 1407 status = "Volume degraded"; 1408 break; 1409 case CISS_LV_READY_FOR_RECOVERY: 1410 status = "Volume ready for recovery operation"; 1411 break; 1412 case CISS_LV_UNDERGOING_RECOVERY: 1413 status = "Volume undergoing recovery"; 1414 break; 1415 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1416 status = "Wrong physical drive was replaced"; 1417 break; 1418 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1419 status = "A physical drive not properly connected"; 1420 break; 1421 case CISS_LV_HARDWARE_OVERHEATING: 1422 status = "Hardware is overheating"; 1423 break; 1424 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1425 status = "Hardware has overheated"; 1426 break; 1427 case CISS_LV_UNDERGOING_EXPANSION: 1428 status = "Volume undergoing expansion"; 1429 break; 1430 case CISS_LV_NOT_AVAILABLE: 1431 status = "Volume waiting for transforming volume"; 1432 break; 1433 case CISS_LV_QUEUED_FOR_EXPANSION: 1434 status = "Volume queued for expansion"; 1435 break; 1436 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1437 status = "Volume disabled due to SCSI ID conflict"; 1438 break; 1439 case CISS_LV_EJECTED: 1440 status = "Volume has been ejected"; 1441 break; 1442 case CISS_LV_UNDERGOING_ERASE: 1443 status = "Volume undergoing background erase"; 1444 break; 1445 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1446 status = "Volume ready for predictive spare rebuild"; 1447 break; 1448 case CISS_LV_UNDERGOING_RPI: 1449 status = "Volume undergoing rapid parity initialization"; 1450 break; 1451 case CISS_LV_PENDING_RPI: 1452 status = "Volume queued for rapid parity initialization"; 1453 break; 1454 case CISS_LV_ENCRYPTED_NO_KEY: 1455 status = "Encrypted volume inaccessible - key not present"; 1456 break; 1457 case CISS_LV_UNDERGOING_ENCRYPTION: 1458 status = "Volume undergoing encryption process"; 1459 break; 1460 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1461 status = "Volume undergoing encryption re-keying process"; 1462 break; 1463 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1464 status = "Volume encrypted but encryption is disabled"; 1465 break; 1466 case CISS_LV_PENDING_ENCRYPTION: 1467 status = "Volume pending migration to encrypted state"; 1468 break; 1469 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1470 status = "Volume pending encryption rekeying"; 1471 break; 1472 case CISS_LV_NOT_SUPPORTED: 1473 status = "Volume not supported on this controller"; 1474 break; 1475 case CISS_LV_STATUS_UNAVAILABLE: 1476 status = "Volume status not available"; 1477 break; 1478 default: 1479 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1480 unknown_state_str, device->volume_status); 1481 status = unknown_state_buffer; 1482 break; 1483 } 1484 1485 dev_info(&ctrl_info->pci_dev->dev, 1486 "scsi %d:%d:%d:%d %s\n", 1487 ctrl_info->scsi_host->host_no, 1488 device->bus, device->target, device->lun, status); 1489 } 1490 1491 static void pqi_rescan_worker(struct work_struct *work) 1492 { 1493 struct pqi_ctrl_info *ctrl_info; 1494 1495 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1496 rescan_work); 1497 1498 pqi_scan_scsi_devices(ctrl_info); 1499 } 1500 1501 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1502 struct pqi_scsi_dev *device) 1503 { 1504 int rc; 1505 1506 if (pqi_is_logical_device(device)) 1507 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1508 device->target, device->lun); 1509 else 1510 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1511 1512 return rc; 1513 } 1514 1515 #define PQI_PENDING_IO_TIMEOUT_SECS 20 1516 1517 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1518 struct pqi_scsi_dev *device) 1519 { 1520 int rc; 1521 1522 pqi_device_remove_start(device); 1523 1524 rc = pqi_device_wait_for_pending_io(ctrl_info, device, 1525 PQI_PENDING_IO_TIMEOUT_SECS); 1526 if (rc) 1527 dev_err(&ctrl_info->pci_dev->dev, 1528 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n", 1529 ctrl_info->scsi_host->host_no, device->bus, 1530 device->target, device->lun, 1531 atomic_read(&device->scsi_cmds_outstanding)); 1532 1533 if (pqi_is_logical_device(device)) 1534 scsi_remove_device(device->sdev); 1535 else 1536 pqi_remove_sas_device(device); 1537 } 1538 1539 /* Assumes the SCSI device list lock is held. */ 1540 1541 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1542 int bus, int target, int lun) 1543 { 1544 struct pqi_scsi_dev *device; 1545 1546 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1547 scsi_device_list_entry) 1548 if (device->bus == bus && device->target == target && 1549 device->lun == lun) 1550 return device; 1551 1552 return NULL; 1553 } 1554 1555 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1556 struct pqi_scsi_dev *dev2) 1557 { 1558 if (dev1->is_physical_device != dev2->is_physical_device) 1559 return false; 1560 1561 if (dev1->is_physical_device) 1562 return dev1->wwid == dev2->wwid; 1563 1564 return memcmp(dev1->volume_id, dev2->volume_id, 1565 sizeof(dev1->volume_id)) == 0; 1566 } 1567 1568 enum pqi_find_result { 1569 DEVICE_NOT_FOUND, 1570 DEVICE_CHANGED, 1571 DEVICE_SAME, 1572 }; 1573 1574 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1575 struct pqi_scsi_dev *device_to_find, 1576 struct pqi_scsi_dev **matching_device) 1577 { 1578 struct pqi_scsi_dev *device; 1579 1580 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1581 scsi_device_list_entry) { 1582 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1583 device->scsi3addr)) { 1584 *matching_device = device; 1585 if (pqi_device_equal(device_to_find, device)) { 1586 if (device_to_find->volume_offline) 1587 return DEVICE_CHANGED; 1588 return DEVICE_SAME; 1589 } 1590 return DEVICE_CHANGED; 1591 } 1592 } 1593 1594 return DEVICE_NOT_FOUND; 1595 } 1596 1597 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1598 { 1599 if (device->is_expander_smp_device) 1600 return "Enclosure SMP "; 1601 1602 return scsi_device_type(device->devtype); 1603 } 1604 1605 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1606 1607 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1608 char *action, struct pqi_scsi_dev *device) 1609 { 1610 ssize_t count; 1611 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1612 1613 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1614 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1615 1616 if (device->target_lun_valid) 1617 count += snprintf(buffer + count, 1618 PQI_DEV_INFO_BUFFER_LENGTH - count, 1619 "%d:%d", 1620 device->target, 1621 device->lun); 1622 else 1623 count += snprintf(buffer + count, 1624 PQI_DEV_INFO_BUFFER_LENGTH - count, 1625 "-:-"); 1626 1627 if (pqi_is_logical_device(device)) 1628 count += snprintf(buffer + count, 1629 PQI_DEV_INFO_BUFFER_LENGTH - count, 1630 " %08x%08x", 1631 *((u32 *)&device->scsi3addr), 1632 *((u32 *)&device->scsi3addr[4])); 1633 else 1634 count += snprintf(buffer + count, 1635 PQI_DEV_INFO_BUFFER_LENGTH - count, 1636 " %016llx", device->sas_address); 1637 1638 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1639 " %s %.8s %.16s ", 1640 pqi_device_type(device), 1641 device->vendor, 1642 device->model); 1643 1644 if (pqi_is_logical_device(device)) { 1645 if (device->devtype == TYPE_DISK) 1646 count += snprintf(buffer + count, 1647 PQI_DEV_INFO_BUFFER_LENGTH - count, 1648 "SSDSmartPathCap%c En%c %-12s", 1649 device->raid_bypass_configured ? '+' : '-', 1650 device->raid_bypass_enabled ? '+' : '-', 1651 pqi_raid_level_to_string(device->raid_level)); 1652 } else { 1653 count += snprintf(buffer + count, 1654 PQI_DEV_INFO_BUFFER_LENGTH - count, 1655 "AIO%c", device->aio_enabled ? '+' : '-'); 1656 if (device->devtype == TYPE_DISK || 1657 device->devtype == TYPE_ZBC) 1658 count += snprintf(buffer + count, 1659 PQI_DEV_INFO_BUFFER_LENGTH - count, 1660 " qd=%-6d", device->queue_depth); 1661 } 1662 1663 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1664 } 1665 1666 /* Assumes the SCSI device list lock is held. */ 1667 1668 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1669 struct pqi_scsi_dev *new_device) 1670 { 1671 existing_device->devtype = new_device->devtype; 1672 existing_device->device_type = new_device->device_type; 1673 existing_device->bus = new_device->bus; 1674 if (new_device->target_lun_valid) { 1675 existing_device->target = new_device->target; 1676 existing_device->lun = new_device->lun; 1677 existing_device->target_lun_valid = true; 1678 } 1679 1680 /* By definition, the scsi3addr and wwid fields are already the same. */ 1681 1682 existing_device->is_physical_device = new_device->is_physical_device; 1683 existing_device->is_external_raid_device = 1684 new_device->is_external_raid_device; 1685 existing_device->is_expander_smp_device = 1686 new_device->is_expander_smp_device; 1687 existing_device->aio_enabled = new_device->aio_enabled; 1688 memcpy(existing_device->vendor, new_device->vendor, 1689 sizeof(existing_device->vendor)); 1690 memcpy(existing_device->model, new_device->model, 1691 sizeof(existing_device->model)); 1692 existing_device->sas_address = new_device->sas_address; 1693 existing_device->raid_level = new_device->raid_level; 1694 existing_device->queue_depth = new_device->queue_depth; 1695 existing_device->aio_handle = new_device->aio_handle; 1696 existing_device->volume_status = new_device->volume_status; 1697 existing_device->active_path_index = new_device->active_path_index; 1698 existing_device->path_map = new_device->path_map; 1699 existing_device->bay = new_device->bay; 1700 existing_device->box_index = new_device->box_index; 1701 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 1702 existing_device->phy_connected_dev_type = 1703 new_device->phy_connected_dev_type; 1704 memcpy(existing_device->box, new_device->box, 1705 sizeof(existing_device->box)); 1706 memcpy(existing_device->phys_connector, new_device->phys_connector, 1707 sizeof(existing_device->phys_connector)); 1708 existing_device->offload_to_mirror = 0; 1709 kfree(existing_device->raid_map); 1710 existing_device->raid_map = new_device->raid_map; 1711 existing_device->raid_bypass_configured = 1712 new_device->raid_bypass_configured; 1713 existing_device->raid_bypass_enabled = 1714 new_device->raid_bypass_enabled; 1715 existing_device->device_offline = false; 1716 1717 /* To prevent this from being freed later. */ 1718 new_device->raid_map = NULL; 1719 } 1720 1721 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1722 { 1723 if (device) { 1724 kfree(device->raid_map); 1725 kfree(device); 1726 } 1727 } 1728 1729 /* 1730 * Called when exposing a new device to the OS fails in order to re-adjust 1731 * our internal SCSI device list to match the SCSI ML's view. 1732 */ 1733 1734 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1735 struct pqi_scsi_dev *device) 1736 { 1737 unsigned long flags; 1738 1739 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1740 list_del(&device->scsi_device_list_entry); 1741 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1742 1743 /* Allow the device structure to be freed later. */ 1744 device->keep_device = false; 1745 } 1746 1747 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 1748 { 1749 if (device->is_expander_smp_device) 1750 return device->sas_port != NULL; 1751 1752 return device->sdev != NULL; 1753 } 1754 1755 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1756 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1757 { 1758 int rc; 1759 unsigned int i; 1760 unsigned long flags; 1761 enum pqi_find_result find_result; 1762 struct pqi_scsi_dev *device; 1763 struct pqi_scsi_dev *next; 1764 struct pqi_scsi_dev *matching_device; 1765 LIST_HEAD(add_list); 1766 LIST_HEAD(delete_list); 1767 1768 /* 1769 * The idea here is to do as little work as possible while holding the 1770 * spinlock. That's why we go to great pains to defer anything other 1771 * than updating the internal device list until after we release the 1772 * spinlock. 1773 */ 1774 1775 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1776 1777 /* Assume that all devices in the existing list have gone away. */ 1778 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1779 scsi_device_list_entry) 1780 device->device_gone = true; 1781 1782 for (i = 0; i < num_new_devices; i++) { 1783 device = new_device_list[i]; 1784 1785 find_result = pqi_scsi_find_entry(ctrl_info, device, 1786 &matching_device); 1787 1788 switch (find_result) { 1789 case DEVICE_SAME: 1790 /* 1791 * The newly found device is already in the existing 1792 * device list. 1793 */ 1794 device->new_device = false; 1795 matching_device->device_gone = false; 1796 pqi_scsi_update_device(matching_device, device); 1797 break; 1798 case DEVICE_NOT_FOUND: 1799 /* 1800 * The newly found device is NOT in the existing device 1801 * list. 1802 */ 1803 device->new_device = true; 1804 break; 1805 case DEVICE_CHANGED: 1806 /* 1807 * The original device has gone away and we need to add 1808 * the new device. 1809 */ 1810 device->new_device = true; 1811 break; 1812 } 1813 } 1814 1815 /* Process all devices that have gone away. */ 1816 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1817 scsi_device_list_entry) { 1818 if (device->device_gone) { 1819 list_del(&device->scsi_device_list_entry); 1820 list_add_tail(&device->delete_list_entry, &delete_list); 1821 } 1822 } 1823 1824 /* Process all new devices. */ 1825 for (i = 0; i < num_new_devices; i++) { 1826 device = new_device_list[i]; 1827 if (!device->new_device) 1828 continue; 1829 if (device->volume_offline) 1830 continue; 1831 list_add_tail(&device->scsi_device_list_entry, 1832 &ctrl_info->scsi_device_list); 1833 list_add_tail(&device->add_list_entry, &add_list); 1834 /* To prevent this device structure from being freed later. */ 1835 device->keep_device = true; 1836 } 1837 1838 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1839 1840 if (pqi_ctrl_in_ofa(ctrl_info)) 1841 pqi_ctrl_ofa_done(ctrl_info); 1842 1843 /* Remove all devices that have gone away. */ 1844 list_for_each_entry_safe(device, next, &delete_list, 1845 delete_list_entry) { 1846 if (device->volume_offline) { 1847 pqi_dev_info(ctrl_info, "offline", device); 1848 pqi_show_volume_status(ctrl_info, device); 1849 } else { 1850 pqi_dev_info(ctrl_info, "removed", device); 1851 } 1852 if (pqi_is_device_added(device)) 1853 pqi_remove_device(ctrl_info, device); 1854 list_del(&device->delete_list_entry); 1855 pqi_free_device(device); 1856 } 1857 1858 /* 1859 * Notify the SCSI ML if the queue depth of any existing device has 1860 * changed. 1861 */ 1862 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1863 scsi_device_list_entry) { 1864 if (device->sdev && device->queue_depth != 1865 device->advertised_queue_depth) { 1866 device->advertised_queue_depth = device->queue_depth; 1867 scsi_change_queue_depth(device->sdev, 1868 device->advertised_queue_depth); 1869 } 1870 } 1871 1872 /* Expose any new devices. */ 1873 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1874 if (!pqi_is_device_added(device)) { 1875 pqi_dev_info(ctrl_info, "added", device); 1876 rc = pqi_add_device(ctrl_info, device); 1877 if (rc) { 1878 dev_warn(&ctrl_info->pci_dev->dev, 1879 "scsi %d:%d:%d:%d addition failed, device not added\n", 1880 ctrl_info->scsi_host->host_no, 1881 device->bus, device->target, 1882 device->lun); 1883 pqi_fixup_botched_add(ctrl_info, device); 1884 } 1885 } 1886 } 1887 } 1888 1889 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1890 { 1891 bool is_supported; 1892 1893 if (device->is_expander_smp_device) 1894 return true; 1895 1896 is_supported = false; 1897 1898 switch (device->devtype) { 1899 case TYPE_DISK: 1900 case TYPE_ZBC: 1901 case TYPE_TAPE: 1902 case TYPE_MEDIUM_CHANGER: 1903 case TYPE_ENCLOSURE: 1904 is_supported = true; 1905 break; 1906 case TYPE_RAID: 1907 /* 1908 * Only support the HBA controller itself as a RAID 1909 * controller. If it's a RAID controller other than 1910 * the HBA itself (an external RAID controller, for 1911 * example), we don't support it. 1912 */ 1913 if (pqi_is_hba_lunid(device->scsi3addr)) 1914 is_supported = true; 1915 break; 1916 } 1917 1918 return is_supported; 1919 } 1920 1921 static inline bool pqi_skip_device(u8 *scsi3addr) 1922 { 1923 /* Ignore all masked devices. */ 1924 if (MASKED_DEVICE(scsi3addr)) 1925 return true; 1926 1927 return false; 1928 } 1929 1930 static inline void pqi_mask_device(u8 *scsi3addr) 1931 { 1932 scsi3addr[3] |= 0xc0; 1933 } 1934 1935 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) 1936 { 1937 if (!device->is_physical_device) 1938 return false; 1939 1940 if (device->is_expander_smp_device) 1941 return true; 1942 1943 switch (device->devtype) { 1944 case TYPE_DISK: 1945 case TYPE_ZBC: 1946 case TYPE_ENCLOSURE: 1947 return true; 1948 } 1949 1950 return false; 1951 } 1952 1953 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 1954 { 1955 return !device->is_physical_device || 1956 !pqi_skip_device(device->scsi3addr); 1957 } 1958 1959 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1960 { 1961 int i; 1962 int rc; 1963 LIST_HEAD(new_device_list_head); 1964 struct report_phys_lun_extended *physdev_list = NULL; 1965 struct report_log_lun_extended *logdev_list = NULL; 1966 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1967 struct report_log_lun_extended_entry *log_lun_ext_entry; 1968 struct bmic_identify_physical_device *id_phys = NULL; 1969 u32 num_physicals; 1970 u32 num_logicals; 1971 struct pqi_scsi_dev **new_device_list = NULL; 1972 struct pqi_scsi_dev *device; 1973 struct pqi_scsi_dev *next; 1974 unsigned int num_new_devices; 1975 unsigned int num_valid_devices; 1976 bool is_physical_device; 1977 u8 *scsi3addr; 1978 unsigned int physical_index; 1979 unsigned int logical_index; 1980 static char *out_of_memory_msg = 1981 "failed to allocate memory, device discovery stopped"; 1982 1983 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1984 if (rc) 1985 goto out; 1986 1987 if (physdev_list) 1988 num_physicals = 1989 get_unaligned_be32(&physdev_list->header.list_length) 1990 / sizeof(physdev_list->lun_entries[0]); 1991 else 1992 num_physicals = 0; 1993 1994 if (logdev_list) 1995 num_logicals = 1996 get_unaligned_be32(&logdev_list->header.list_length) 1997 / sizeof(logdev_list->lun_entries[0]); 1998 else 1999 num_logicals = 0; 2000 2001 if (num_physicals) { 2002 /* 2003 * We need this buffer for calls to pqi_get_physical_disk_info() 2004 * below. We allocate it here instead of inside 2005 * pqi_get_physical_disk_info() because it's a fairly large 2006 * buffer. 2007 */ 2008 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2009 if (!id_phys) { 2010 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2011 out_of_memory_msg); 2012 rc = -ENOMEM; 2013 goto out; 2014 } 2015 2016 if (pqi_hide_vsep) { 2017 for (i = num_physicals - 1; i >= 0; i--) { 2018 phys_lun_ext_entry = 2019 &physdev_list->lun_entries[i]; 2020 if (CISS_GET_DRIVE_NUMBER( 2021 phys_lun_ext_entry->lunid) == 2022 PQI_VSEP_CISS_BTL) { 2023 pqi_mask_device( 2024 phys_lun_ext_entry->lunid); 2025 break; 2026 } 2027 } 2028 } 2029 } 2030 2031 num_new_devices = num_physicals + num_logicals; 2032 2033 new_device_list = kmalloc_array(num_new_devices, 2034 sizeof(*new_device_list), 2035 GFP_KERNEL); 2036 if (!new_device_list) { 2037 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2038 rc = -ENOMEM; 2039 goto out; 2040 } 2041 2042 for (i = 0; i < num_new_devices; i++) { 2043 device = kzalloc(sizeof(*device), GFP_KERNEL); 2044 if (!device) { 2045 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2046 out_of_memory_msg); 2047 rc = -ENOMEM; 2048 goto out; 2049 } 2050 list_add_tail(&device->new_device_list_entry, 2051 &new_device_list_head); 2052 } 2053 2054 device = NULL; 2055 num_valid_devices = 0; 2056 physical_index = 0; 2057 logical_index = 0; 2058 2059 for (i = 0; i < num_new_devices; i++) { 2060 2061 if ((!pqi_expose_ld_first && i < num_physicals) || 2062 (pqi_expose_ld_first && i >= num_logicals)) { 2063 is_physical_device = true; 2064 phys_lun_ext_entry = 2065 &physdev_list->lun_entries[physical_index++]; 2066 log_lun_ext_entry = NULL; 2067 scsi3addr = phys_lun_ext_entry->lunid; 2068 } else { 2069 is_physical_device = false; 2070 phys_lun_ext_entry = NULL; 2071 log_lun_ext_entry = 2072 &logdev_list->lun_entries[logical_index++]; 2073 scsi3addr = log_lun_ext_entry->lunid; 2074 } 2075 2076 if (is_physical_device && pqi_skip_device(scsi3addr)) 2077 continue; 2078 2079 if (device) 2080 device = list_next_entry(device, new_device_list_entry); 2081 else 2082 device = list_first_entry(&new_device_list_head, 2083 struct pqi_scsi_dev, new_device_list_entry); 2084 2085 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2086 device->is_physical_device = is_physical_device; 2087 if (is_physical_device) { 2088 if (phys_lun_ext_entry->device_type == 2089 SA_DEVICE_TYPE_EXPANDER_SMP) 2090 device->is_expander_smp_device = true; 2091 } else { 2092 device->is_external_raid_device = 2093 pqi_is_external_raid_addr(scsi3addr); 2094 } 2095 2096 /* Gather information about the device. */ 2097 rc = pqi_get_device_info(ctrl_info, device); 2098 if (rc == -ENOMEM) { 2099 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2100 out_of_memory_msg); 2101 goto out; 2102 } 2103 if (rc) { 2104 if (device->is_physical_device) 2105 dev_warn(&ctrl_info->pci_dev->dev, 2106 "obtaining device info failed, skipping physical device %016llx\n", 2107 get_unaligned_be64( 2108 &phys_lun_ext_entry->wwid)); 2109 else 2110 dev_warn(&ctrl_info->pci_dev->dev, 2111 "obtaining device info failed, skipping logical device %08x%08x\n", 2112 *((u32 *)&device->scsi3addr), 2113 *((u32 *)&device->scsi3addr[4])); 2114 rc = 0; 2115 continue; 2116 } 2117 2118 if (!pqi_is_supported_device(device)) 2119 continue; 2120 2121 pqi_assign_bus_target_lun(device); 2122 2123 if (device->is_physical_device) { 2124 device->wwid = phys_lun_ext_entry->wwid; 2125 if ((phys_lun_ext_entry->device_flags & 2126 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2127 phys_lun_ext_entry->aio_handle) { 2128 device->aio_enabled = true; 2129 device->aio_handle = 2130 phys_lun_ext_entry->aio_handle; 2131 } 2132 pqi_get_physical_disk_info(ctrl_info, device, id_phys); 2133 } else { 2134 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2135 sizeof(device->volume_id)); 2136 } 2137 2138 if (pqi_is_device_with_sas_address(device)) 2139 device->sas_address = get_unaligned_be64(&device->wwid); 2140 2141 new_device_list[num_valid_devices++] = device; 2142 } 2143 2144 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2145 2146 out: 2147 list_for_each_entry_safe(device, next, &new_device_list_head, 2148 new_device_list_entry) { 2149 if (device->keep_device) 2150 continue; 2151 list_del(&device->new_device_list_entry); 2152 pqi_free_device(device); 2153 } 2154 2155 kfree(new_device_list); 2156 kfree(physdev_list); 2157 kfree(logdev_list); 2158 kfree(id_phys); 2159 2160 return rc; 2161 } 2162 2163 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2164 { 2165 unsigned long flags; 2166 struct pqi_scsi_dev *device; 2167 2168 while (1) { 2169 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2170 2171 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 2172 struct pqi_scsi_dev, scsi_device_list_entry); 2173 if (device) 2174 list_del(&device->scsi_device_list_entry); 2175 2176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 2177 flags); 2178 2179 if (!device) 2180 break; 2181 2182 if (pqi_is_device_added(device)) 2183 pqi_remove_device(ctrl_info, device); 2184 pqi_free_device(device); 2185 } 2186 } 2187 2188 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2189 { 2190 int rc = 0; 2191 2192 if (pqi_ctrl_offline(ctrl_info)) 2193 return -ENXIO; 2194 2195 if (!mutex_trylock(&ctrl_info->scan_mutex)) { 2196 pqi_schedule_rescan_worker_delayed(ctrl_info); 2197 rc = -EINPROGRESS; 2198 } else { 2199 rc = pqi_update_scsi_devices(ctrl_info); 2200 if (rc) 2201 pqi_schedule_rescan_worker_delayed(ctrl_info); 2202 mutex_unlock(&ctrl_info->scan_mutex); 2203 } 2204 2205 return rc; 2206 } 2207 2208 static void pqi_scan_start(struct Scsi_Host *shost) 2209 { 2210 struct pqi_ctrl_info *ctrl_info; 2211 2212 ctrl_info = shost_to_hba(shost); 2213 if (pqi_ctrl_in_ofa(ctrl_info)) 2214 return; 2215 2216 pqi_scan_scsi_devices(ctrl_info); 2217 } 2218 2219 /* Returns TRUE if scan is finished. */ 2220 2221 static int pqi_scan_finished(struct Scsi_Host *shost, 2222 unsigned long elapsed_time) 2223 { 2224 struct pqi_ctrl_info *ctrl_info; 2225 2226 ctrl_info = shost_priv(shost); 2227 2228 return !mutex_is_locked(&ctrl_info->scan_mutex); 2229 } 2230 2231 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2232 { 2233 mutex_lock(&ctrl_info->scan_mutex); 2234 mutex_unlock(&ctrl_info->scan_mutex); 2235 } 2236 2237 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2238 { 2239 mutex_lock(&ctrl_info->lun_reset_mutex); 2240 mutex_unlock(&ctrl_info->lun_reset_mutex); 2241 } 2242 2243 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 2244 { 2245 mutex_lock(&ctrl_info->ofa_mutex); 2246 mutex_unlock(&ctrl_info->ofa_mutex); 2247 } 2248 2249 static inline void pqi_set_encryption_info( 2250 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2251 u64 first_block) 2252 { 2253 u32 volume_blk_size; 2254 2255 /* 2256 * Set the encryption tweak values based on logical block address. 2257 * If the block size is 512, the tweak value is equal to the LBA. 2258 * For other block sizes, tweak value is (LBA * block size) / 512. 2259 */ 2260 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2261 if (volume_blk_size != 512) 2262 first_block = (first_block * volume_blk_size) / 512; 2263 2264 encryption_info->data_encryption_key_index = 2265 get_unaligned_le16(&raid_map->data_encryption_key_index); 2266 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2267 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2268 } 2269 2270 /* 2271 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2272 */ 2273 2274 #define PQI_RAID_BYPASS_INELIGIBLE 1 2275 2276 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2277 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2278 struct pqi_queue_group *queue_group) 2279 { 2280 struct raid_map *raid_map; 2281 bool is_write = false; 2282 u32 map_index; 2283 u64 first_block; 2284 u64 last_block; 2285 u32 block_cnt; 2286 u32 blocks_per_row; 2287 u64 first_row; 2288 u64 last_row; 2289 u32 first_row_offset; 2290 u32 last_row_offset; 2291 u32 first_column; 2292 u32 last_column; 2293 u64 r0_first_row; 2294 u64 r0_last_row; 2295 u32 r5or6_blocks_per_row; 2296 u64 r5or6_first_row; 2297 u64 r5or6_last_row; 2298 u32 r5or6_first_row_offset; 2299 u32 r5or6_last_row_offset; 2300 u32 r5or6_first_column; 2301 u32 r5or6_last_column; 2302 u16 data_disks_per_row; 2303 u32 total_disks_per_row; 2304 u16 layout_map_count; 2305 u32 stripesize; 2306 u16 strip_size; 2307 u32 first_group; 2308 u32 last_group; 2309 u32 current_group; 2310 u32 map_row; 2311 u32 aio_handle; 2312 u64 disk_block; 2313 u32 disk_block_cnt; 2314 u8 cdb[16]; 2315 u8 cdb_length; 2316 int offload_to_mirror; 2317 struct pqi_encryption_info *encryption_info_ptr; 2318 struct pqi_encryption_info encryption_info; 2319 #if BITS_PER_LONG == 32 2320 u64 tmpdiv; 2321 #endif 2322 2323 /* Check for valid opcode, get LBA and block count. */ 2324 switch (scmd->cmnd[0]) { 2325 case WRITE_6: 2326 is_write = true; 2327 /* fall through */ 2328 case READ_6: 2329 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2330 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2331 block_cnt = (u32)scmd->cmnd[4]; 2332 if (block_cnt == 0) 2333 block_cnt = 256; 2334 break; 2335 case WRITE_10: 2336 is_write = true; 2337 /* fall through */ 2338 case READ_10: 2339 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2340 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2341 break; 2342 case WRITE_12: 2343 is_write = true; 2344 /* fall through */ 2345 case READ_12: 2346 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2347 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2348 break; 2349 case WRITE_16: 2350 is_write = true; 2351 /* fall through */ 2352 case READ_16: 2353 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2354 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2355 break; 2356 default: 2357 /* Process via normal I/O path. */ 2358 return PQI_RAID_BYPASS_INELIGIBLE; 2359 } 2360 2361 /* Check for write to non-RAID-0. */ 2362 if (is_write && device->raid_level != SA_RAID_0) 2363 return PQI_RAID_BYPASS_INELIGIBLE; 2364 2365 if (unlikely(block_cnt == 0)) 2366 return PQI_RAID_BYPASS_INELIGIBLE; 2367 2368 last_block = first_block + block_cnt - 1; 2369 raid_map = device->raid_map; 2370 2371 /* Check for invalid block or wraparound. */ 2372 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2373 last_block < first_block) 2374 return PQI_RAID_BYPASS_INELIGIBLE; 2375 2376 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2377 strip_size = get_unaligned_le16(&raid_map->strip_size); 2378 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2379 2380 /* Calculate stripe information for the request. */ 2381 blocks_per_row = data_disks_per_row * strip_size; 2382 #if BITS_PER_LONG == 32 2383 tmpdiv = first_block; 2384 do_div(tmpdiv, blocks_per_row); 2385 first_row = tmpdiv; 2386 tmpdiv = last_block; 2387 do_div(tmpdiv, blocks_per_row); 2388 last_row = tmpdiv; 2389 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2390 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2391 tmpdiv = first_row_offset; 2392 do_div(tmpdiv, strip_size); 2393 first_column = tmpdiv; 2394 tmpdiv = last_row_offset; 2395 do_div(tmpdiv, strip_size); 2396 last_column = tmpdiv; 2397 #else 2398 first_row = first_block / blocks_per_row; 2399 last_row = last_block / blocks_per_row; 2400 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2401 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2402 first_column = first_row_offset / strip_size; 2403 last_column = last_row_offset / strip_size; 2404 #endif 2405 2406 /* If this isn't a single row/column then give to the controller. */ 2407 if (first_row != last_row || first_column != last_column) 2408 return PQI_RAID_BYPASS_INELIGIBLE; 2409 2410 /* Proceeding with driver mapping. */ 2411 total_disks_per_row = data_disks_per_row + 2412 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2413 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2414 get_unaligned_le16(&raid_map->row_cnt); 2415 map_index = (map_row * total_disks_per_row) + first_column; 2416 2417 /* RAID 1 */ 2418 if (device->raid_level == SA_RAID_1) { 2419 if (device->offload_to_mirror) 2420 map_index += data_disks_per_row; 2421 device->offload_to_mirror = !device->offload_to_mirror; 2422 } else if (device->raid_level == SA_RAID_ADM) { 2423 /* RAID ADM */ 2424 /* 2425 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2426 * divisible by 3. 2427 */ 2428 offload_to_mirror = device->offload_to_mirror; 2429 if (offload_to_mirror == 0) { 2430 /* use physical disk in the first mirrored group. */ 2431 map_index %= data_disks_per_row; 2432 } else { 2433 do { 2434 /* 2435 * Determine mirror group that map_index 2436 * indicates. 2437 */ 2438 current_group = map_index / data_disks_per_row; 2439 2440 if (offload_to_mirror != current_group) { 2441 if (current_group < 2442 layout_map_count - 1) { 2443 /* 2444 * Select raid index from 2445 * next group. 2446 */ 2447 map_index += data_disks_per_row; 2448 current_group++; 2449 } else { 2450 /* 2451 * Select raid index from first 2452 * group. 2453 */ 2454 map_index %= data_disks_per_row; 2455 current_group = 0; 2456 } 2457 } 2458 } while (offload_to_mirror != current_group); 2459 } 2460 2461 /* Set mirror group to use next time. */ 2462 offload_to_mirror = 2463 (offload_to_mirror >= layout_map_count - 1) ? 2464 0 : offload_to_mirror + 1; 2465 WARN_ON(offload_to_mirror >= layout_map_count); 2466 device->offload_to_mirror = offload_to_mirror; 2467 /* 2468 * Avoid direct use of device->offload_to_mirror within this 2469 * function since multiple threads might simultaneously 2470 * increment it beyond the range of device->layout_map_count -1. 2471 */ 2472 } else if ((device->raid_level == SA_RAID_5 || 2473 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2474 /* RAID 50/60 */ 2475 /* Verify first and last block are in same RAID group */ 2476 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2477 stripesize = r5or6_blocks_per_row * layout_map_count; 2478 #if BITS_PER_LONG == 32 2479 tmpdiv = first_block; 2480 first_group = do_div(tmpdiv, stripesize); 2481 tmpdiv = first_group; 2482 do_div(tmpdiv, r5or6_blocks_per_row); 2483 first_group = tmpdiv; 2484 tmpdiv = last_block; 2485 last_group = do_div(tmpdiv, stripesize); 2486 tmpdiv = last_group; 2487 do_div(tmpdiv, r5or6_blocks_per_row); 2488 last_group = tmpdiv; 2489 #else 2490 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2491 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2492 #endif 2493 if (first_group != last_group) 2494 return PQI_RAID_BYPASS_INELIGIBLE; 2495 2496 /* Verify request is in a single row of RAID 5/6 */ 2497 #if BITS_PER_LONG == 32 2498 tmpdiv = first_block; 2499 do_div(tmpdiv, stripesize); 2500 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2501 tmpdiv = last_block; 2502 do_div(tmpdiv, stripesize); 2503 r5or6_last_row = r0_last_row = tmpdiv; 2504 #else 2505 first_row = r5or6_first_row = r0_first_row = 2506 first_block / stripesize; 2507 r5or6_last_row = r0_last_row = last_block / stripesize; 2508 #endif 2509 if (r5or6_first_row != r5or6_last_row) 2510 return PQI_RAID_BYPASS_INELIGIBLE; 2511 2512 /* Verify request is in a single column */ 2513 #if BITS_PER_LONG == 32 2514 tmpdiv = first_block; 2515 first_row_offset = do_div(tmpdiv, stripesize); 2516 tmpdiv = first_row_offset; 2517 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2518 r5or6_first_row_offset = first_row_offset; 2519 tmpdiv = last_block; 2520 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2521 tmpdiv = r5or6_last_row_offset; 2522 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2523 tmpdiv = r5or6_first_row_offset; 2524 do_div(tmpdiv, strip_size); 2525 first_column = r5or6_first_column = tmpdiv; 2526 tmpdiv = r5or6_last_row_offset; 2527 do_div(tmpdiv, strip_size); 2528 r5or6_last_column = tmpdiv; 2529 #else 2530 first_row_offset = r5or6_first_row_offset = 2531 (u32)((first_block % stripesize) % 2532 r5or6_blocks_per_row); 2533 2534 r5or6_last_row_offset = 2535 (u32)((last_block % stripesize) % 2536 r5or6_blocks_per_row); 2537 2538 first_column = r5or6_first_row_offset / strip_size; 2539 r5or6_first_column = first_column; 2540 r5or6_last_column = r5or6_last_row_offset / strip_size; 2541 #endif 2542 if (r5or6_first_column != r5or6_last_column) 2543 return PQI_RAID_BYPASS_INELIGIBLE; 2544 2545 /* Request is eligible */ 2546 map_row = 2547 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2548 get_unaligned_le16(&raid_map->row_cnt); 2549 2550 map_index = (first_group * 2551 (get_unaligned_le16(&raid_map->row_cnt) * 2552 total_disks_per_row)) + 2553 (map_row * total_disks_per_row) + first_column; 2554 } 2555 2556 aio_handle = raid_map->disk_data[map_index].aio_handle; 2557 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2558 first_row * strip_size + 2559 (first_row_offset - first_column * strip_size); 2560 disk_block_cnt = block_cnt; 2561 2562 /* Handle differing logical/physical block sizes. */ 2563 if (raid_map->phys_blk_shift) { 2564 disk_block <<= raid_map->phys_blk_shift; 2565 disk_block_cnt <<= raid_map->phys_blk_shift; 2566 } 2567 2568 if (unlikely(disk_block_cnt > 0xffff)) 2569 return PQI_RAID_BYPASS_INELIGIBLE; 2570 2571 /* Build the new CDB for the physical disk I/O. */ 2572 if (disk_block > 0xffffffff) { 2573 cdb[0] = is_write ? WRITE_16 : READ_16; 2574 cdb[1] = 0; 2575 put_unaligned_be64(disk_block, &cdb[2]); 2576 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2577 cdb[14] = 0; 2578 cdb[15] = 0; 2579 cdb_length = 16; 2580 } else { 2581 cdb[0] = is_write ? WRITE_10 : READ_10; 2582 cdb[1] = 0; 2583 put_unaligned_be32((u32)disk_block, &cdb[2]); 2584 cdb[6] = 0; 2585 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2586 cdb[9] = 0; 2587 cdb_length = 10; 2588 } 2589 2590 if (get_unaligned_le16(&raid_map->flags) & 2591 RAID_MAP_ENCRYPTION_ENABLED) { 2592 pqi_set_encryption_info(&encryption_info, raid_map, 2593 first_block); 2594 encryption_info_ptr = &encryption_info; 2595 } else { 2596 encryption_info_ptr = NULL; 2597 } 2598 2599 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2600 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2601 } 2602 2603 #define PQI_STATUS_IDLE 0x0 2604 2605 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2606 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2607 2608 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2609 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2610 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2611 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2612 #define PQI_DEVICE_STATE_ERROR 0x4 2613 2614 #define PQI_MODE_READY_TIMEOUT_SECS 30 2615 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2616 2617 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2618 { 2619 struct pqi_device_registers __iomem *pqi_registers; 2620 unsigned long timeout; 2621 u64 signature; 2622 u8 status; 2623 2624 pqi_registers = ctrl_info->pqi_registers; 2625 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; 2626 2627 while (1) { 2628 signature = readq(&pqi_registers->signature); 2629 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2630 sizeof(signature)) == 0) 2631 break; 2632 if (time_after(jiffies, timeout)) { 2633 dev_err(&ctrl_info->pci_dev->dev, 2634 "timed out waiting for PQI signature\n"); 2635 return -ETIMEDOUT; 2636 } 2637 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2638 } 2639 2640 while (1) { 2641 status = readb(&pqi_registers->function_and_status_code); 2642 if (status == PQI_STATUS_IDLE) 2643 break; 2644 if (time_after(jiffies, timeout)) { 2645 dev_err(&ctrl_info->pci_dev->dev, 2646 "timed out waiting for PQI IDLE\n"); 2647 return -ETIMEDOUT; 2648 } 2649 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2650 } 2651 2652 while (1) { 2653 if (readl(&pqi_registers->device_status) == 2654 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2655 break; 2656 if (time_after(jiffies, timeout)) { 2657 dev_err(&ctrl_info->pci_dev->dev, 2658 "timed out waiting for PQI all registers ready\n"); 2659 return -ETIMEDOUT; 2660 } 2661 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2662 } 2663 2664 return 0; 2665 } 2666 2667 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2668 { 2669 struct pqi_scsi_dev *device; 2670 2671 device = io_request->scmd->device->hostdata; 2672 device->raid_bypass_enabled = false; 2673 device->aio_enabled = false; 2674 } 2675 2676 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2677 { 2678 struct pqi_ctrl_info *ctrl_info; 2679 struct pqi_scsi_dev *device; 2680 2681 device = sdev->hostdata; 2682 if (device->device_offline) 2683 return; 2684 2685 device->device_offline = true; 2686 ctrl_info = shost_to_hba(sdev->host); 2687 pqi_schedule_rescan_worker(ctrl_info); 2688 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 2689 path, ctrl_info->scsi_host->host_no, device->bus, 2690 device->target, device->lun); 2691 } 2692 2693 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2694 { 2695 u8 scsi_status; 2696 u8 host_byte; 2697 struct scsi_cmnd *scmd; 2698 struct pqi_raid_error_info *error_info; 2699 size_t sense_data_length; 2700 int residual_count; 2701 int xfer_count; 2702 struct scsi_sense_hdr sshdr; 2703 2704 scmd = io_request->scmd; 2705 if (!scmd) 2706 return; 2707 2708 error_info = io_request->error_info; 2709 scsi_status = error_info->status; 2710 host_byte = DID_OK; 2711 2712 switch (error_info->data_out_result) { 2713 case PQI_DATA_IN_OUT_GOOD: 2714 break; 2715 case PQI_DATA_IN_OUT_UNDERFLOW: 2716 xfer_count = 2717 get_unaligned_le32(&error_info->data_out_transferred); 2718 residual_count = scsi_bufflen(scmd) - xfer_count; 2719 scsi_set_resid(scmd, residual_count); 2720 if (xfer_count < scmd->underflow) 2721 host_byte = DID_SOFT_ERROR; 2722 break; 2723 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2724 case PQI_DATA_IN_OUT_ABORTED: 2725 host_byte = DID_ABORT; 2726 break; 2727 case PQI_DATA_IN_OUT_TIMEOUT: 2728 host_byte = DID_TIME_OUT; 2729 break; 2730 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2731 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2732 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2733 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2734 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2735 case PQI_DATA_IN_OUT_ERROR: 2736 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2737 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2738 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2739 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2740 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2741 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2742 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2743 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2744 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2745 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2746 default: 2747 host_byte = DID_ERROR; 2748 break; 2749 } 2750 2751 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2752 if (sense_data_length == 0) 2753 sense_data_length = 2754 get_unaligned_le16(&error_info->response_data_length); 2755 if (sense_data_length) { 2756 if (sense_data_length > sizeof(error_info->data)) 2757 sense_data_length = sizeof(error_info->data); 2758 2759 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2760 scsi_normalize_sense(error_info->data, 2761 sense_data_length, &sshdr) && 2762 sshdr.sense_key == HARDWARE_ERROR && 2763 sshdr.asc == 0x3e) { 2764 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 2765 struct pqi_scsi_dev *device = scmd->device->hostdata; 2766 2767 switch (sshdr.ascq) { 2768 case 0x1: /* LOGICAL UNIT FAILURE */ 2769 if (printk_ratelimit()) 2770 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 2771 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2772 pqi_take_device_offline(scmd->device, "RAID"); 2773 host_byte = DID_NO_CONNECT; 2774 break; 2775 2776 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 2777 if (printk_ratelimit()) 2778 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 2779 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 2780 break; 2781 } 2782 } 2783 2784 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2785 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2786 memcpy(scmd->sense_buffer, error_info->data, 2787 sense_data_length); 2788 } 2789 2790 scmd->result = scsi_status; 2791 set_host_byte(scmd, host_byte); 2792 } 2793 2794 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2795 { 2796 u8 scsi_status; 2797 u8 host_byte; 2798 struct scsi_cmnd *scmd; 2799 struct pqi_aio_error_info *error_info; 2800 size_t sense_data_length; 2801 int residual_count; 2802 int xfer_count; 2803 bool device_offline; 2804 2805 scmd = io_request->scmd; 2806 error_info = io_request->error_info; 2807 host_byte = DID_OK; 2808 sense_data_length = 0; 2809 device_offline = false; 2810 2811 switch (error_info->service_response) { 2812 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2813 scsi_status = error_info->status; 2814 break; 2815 case PQI_AIO_SERV_RESPONSE_FAILURE: 2816 switch (error_info->status) { 2817 case PQI_AIO_STATUS_IO_ABORTED: 2818 scsi_status = SAM_STAT_TASK_ABORTED; 2819 break; 2820 case PQI_AIO_STATUS_UNDERRUN: 2821 scsi_status = SAM_STAT_GOOD; 2822 residual_count = get_unaligned_le32( 2823 &error_info->residual_count); 2824 scsi_set_resid(scmd, residual_count); 2825 xfer_count = scsi_bufflen(scmd) - residual_count; 2826 if (xfer_count < scmd->underflow) 2827 host_byte = DID_SOFT_ERROR; 2828 break; 2829 case PQI_AIO_STATUS_OVERRUN: 2830 scsi_status = SAM_STAT_GOOD; 2831 break; 2832 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2833 pqi_aio_path_disabled(io_request); 2834 scsi_status = SAM_STAT_GOOD; 2835 io_request->status = -EAGAIN; 2836 break; 2837 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2838 case PQI_AIO_STATUS_INVALID_DEVICE: 2839 if (!io_request->raid_bypass) { 2840 device_offline = true; 2841 pqi_take_device_offline(scmd->device, "AIO"); 2842 host_byte = DID_NO_CONNECT; 2843 } 2844 scsi_status = SAM_STAT_CHECK_CONDITION; 2845 break; 2846 case PQI_AIO_STATUS_IO_ERROR: 2847 default: 2848 scsi_status = SAM_STAT_CHECK_CONDITION; 2849 break; 2850 } 2851 break; 2852 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2853 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2854 scsi_status = SAM_STAT_GOOD; 2855 break; 2856 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2857 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2858 default: 2859 scsi_status = SAM_STAT_CHECK_CONDITION; 2860 break; 2861 } 2862 2863 if (error_info->data_present) { 2864 sense_data_length = 2865 get_unaligned_le16(&error_info->data_length); 2866 if (sense_data_length) { 2867 if (sense_data_length > sizeof(error_info->data)) 2868 sense_data_length = sizeof(error_info->data); 2869 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2870 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2871 memcpy(scmd->sense_buffer, error_info->data, 2872 sense_data_length); 2873 } 2874 } 2875 2876 if (device_offline && sense_data_length == 0) 2877 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2878 0x3e, 0x1); 2879 2880 scmd->result = scsi_status; 2881 set_host_byte(scmd, host_byte); 2882 } 2883 2884 static void pqi_process_io_error(unsigned int iu_type, 2885 struct pqi_io_request *io_request) 2886 { 2887 switch (iu_type) { 2888 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2889 pqi_process_raid_io_error(io_request); 2890 break; 2891 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2892 pqi_process_aio_io_error(io_request); 2893 break; 2894 } 2895 } 2896 2897 static int pqi_interpret_task_management_response( 2898 struct pqi_task_management_response *response) 2899 { 2900 int rc; 2901 2902 switch (response->response_code) { 2903 case SOP_TMF_COMPLETE: 2904 case SOP_TMF_FUNCTION_SUCCEEDED: 2905 rc = 0; 2906 break; 2907 case SOP_TMF_REJECTED: 2908 rc = -EAGAIN; 2909 break; 2910 default: 2911 rc = -EIO; 2912 break; 2913 } 2914 2915 return rc; 2916 } 2917 2918 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2919 struct pqi_queue_group *queue_group) 2920 { 2921 unsigned int num_responses; 2922 pqi_index_t oq_pi; 2923 pqi_index_t oq_ci; 2924 struct pqi_io_request *io_request; 2925 struct pqi_io_response *response; 2926 u16 request_id; 2927 2928 num_responses = 0; 2929 oq_ci = queue_group->oq_ci_copy; 2930 2931 while (1) { 2932 oq_pi = readl(queue_group->oq_pi); 2933 if (oq_pi == oq_ci) 2934 break; 2935 2936 num_responses++; 2937 response = queue_group->oq_element_array + 2938 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2939 2940 request_id = get_unaligned_le16(&response->request_id); 2941 WARN_ON(request_id >= ctrl_info->max_io_slots); 2942 2943 io_request = &ctrl_info->io_request_pool[request_id]; 2944 WARN_ON(atomic_read(&io_request->refcount) == 0); 2945 2946 switch (response->header.iu_type) { 2947 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2948 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2949 if (io_request->scmd) 2950 io_request->scmd->result = 0; 2951 /* fall through */ 2952 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2953 break; 2954 case PQI_RESPONSE_IU_VENDOR_GENERAL: 2955 io_request->status = 2956 get_unaligned_le16( 2957 &((struct pqi_vendor_general_response *) 2958 response)->status); 2959 break; 2960 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2961 io_request->status = 2962 pqi_interpret_task_management_response( 2963 (void *)response); 2964 break; 2965 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2966 pqi_aio_path_disabled(io_request); 2967 io_request->status = -EAGAIN; 2968 break; 2969 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2970 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2971 io_request->error_info = ctrl_info->error_buffer + 2972 (get_unaligned_le16(&response->error_index) * 2973 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2974 pqi_process_io_error(response->header.iu_type, 2975 io_request); 2976 break; 2977 default: 2978 dev_err(&ctrl_info->pci_dev->dev, 2979 "unexpected IU type: 0x%x\n", 2980 response->header.iu_type); 2981 break; 2982 } 2983 2984 io_request->io_complete_callback(io_request, 2985 io_request->context); 2986 2987 /* 2988 * Note that the I/O request structure CANNOT BE TOUCHED after 2989 * returning from the I/O completion callback! 2990 */ 2991 2992 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2993 } 2994 2995 if (num_responses) { 2996 queue_group->oq_ci_copy = oq_ci; 2997 writel(oq_ci, queue_group->oq_ci); 2998 } 2999 3000 return num_responses; 3001 } 3002 3003 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3004 unsigned int ci, unsigned int elements_in_queue) 3005 { 3006 unsigned int num_elements_used; 3007 3008 if (pi >= ci) 3009 num_elements_used = pi - ci; 3010 else 3011 num_elements_used = elements_in_queue - ci + pi; 3012 3013 return elements_in_queue - num_elements_used - 1; 3014 } 3015 3016 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3017 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3018 { 3019 pqi_index_t iq_pi; 3020 pqi_index_t iq_ci; 3021 unsigned long flags; 3022 void *next_element; 3023 struct pqi_queue_group *queue_group; 3024 3025 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3026 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3027 3028 while (1) { 3029 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3030 3031 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3032 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3033 3034 if (pqi_num_elements_free(iq_pi, iq_ci, 3035 ctrl_info->num_elements_per_iq)) 3036 break; 3037 3038 spin_unlock_irqrestore( 3039 &queue_group->submit_lock[RAID_PATH], flags); 3040 3041 if (pqi_ctrl_offline(ctrl_info)) 3042 return; 3043 } 3044 3045 next_element = queue_group->iq_element_array[RAID_PATH] + 3046 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3047 3048 memcpy(next_element, iu, iu_length); 3049 3050 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3051 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3052 3053 /* 3054 * This write notifies the controller that an IU is available to be 3055 * processed. 3056 */ 3057 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3058 3059 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3060 } 3061 3062 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3063 struct pqi_event *event) 3064 { 3065 struct pqi_event_acknowledge_request request; 3066 3067 memset(&request, 0, sizeof(request)); 3068 3069 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3070 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3071 &request.header.iu_length); 3072 request.event_type = event->event_type; 3073 request.event_id = event->event_id; 3074 request.additional_event_id = event->additional_event_id; 3075 3076 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3077 } 3078 3079 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3080 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3081 3082 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3083 struct pqi_ctrl_info *ctrl_info) 3084 { 3085 unsigned long timeout; 3086 u8 status; 3087 3088 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 3089 3090 while (1) { 3091 status = pqi_read_soft_reset_status(ctrl_info); 3092 if (status & PQI_SOFT_RESET_INITIATE) 3093 return RESET_INITIATE_DRIVER; 3094 3095 if (status & PQI_SOFT_RESET_ABORT) 3096 return RESET_ABORT; 3097 3098 if (time_after(jiffies, timeout)) { 3099 dev_err(&ctrl_info->pci_dev->dev, 3100 "timed out waiting for soft reset status\n"); 3101 return RESET_TIMEDOUT; 3102 } 3103 3104 if (!sis_is_firmware_running(ctrl_info)) 3105 return RESET_NORESPONSE; 3106 3107 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3108 } 3109 } 3110 3111 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, 3112 enum pqi_soft_reset_status reset_status) 3113 { 3114 int rc; 3115 3116 switch (reset_status) { 3117 case RESET_INITIATE_DRIVER: 3118 /* fall through */ 3119 case RESET_TIMEDOUT: 3120 dev_info(&ctrl_info->pci_dev->dev, 3121 "resetting controller %u\n", ctrl_info->ctrl_id); 3122 sis_soft_reset(ctrl_info); 3123 /* fall through */ 3124 case RESET_INITIATE_FIRMWARE: 3125 rc = pqi_ofa_ctrl_restart(ctrl_info); 3126 pqi_ofa_free_host_buffer(ctrl_info); 3127 dev_info(&ctrl_info->pci_dev->dev, 3128 "Online Firmware Activation for controller %u: %s\n", 3129 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); 3130 break; 3131 case RESET_ABORT: 3132 pqi_ofa_ctrl_unquiesce(ctrl_info); 3133 dev_info(&ctrl_info->pci_dev->dev, 3134 "Online Firmware Activation for controller %u: %s\n", 3135 ctrl_info->ctrl_id, "ABORTED"); 3136 break; 3137 case RESET_NORESPONSE: 3138 pqi_ofa_free_host_buffer(ctrl_info); 3139 pqi_take_ctrl_offline(ctrl_info); 3140 break; 3141 } 3142 } 3143 3144 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3145 struct pqi_event *event) 3146 { 3147 u16 event_id; 3148 enum pqi_soft_reset_status status; 3149 3150 event_id = get_unaligned_le16(&event->event_id); 3151 3152 mutex_lock(&ctrl_info->ofa_mutex); 3153 3154 if (event_id == PQI_EVENT_OFA_QUIESCE) { 3155 dev_info(&ctrl_info->pci_dev->dev, 3156 "Received Online Firmware Activation quiesce event for controller %u\n", 3157 ctrl_info->ctrl_id); 3158 pqi_ofa_ctrl_quiesce(ctrl_info); 3159 pqi_acknowledge_event(ctrl_info, event); 3160 if (ctrl_info->soft_reset_handshake_supported) { 3161 status = pqi_poll_for_soft_reset_status(ctrl_info); 3162 pqi_process_soft_reset(ctrl_info, status); 3163 } else { 3164 pqi_process_soft_reset(ctrl_info, 3165 RESET_INITIATE_FIRMWARE); 3166 } 3167 3168 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3169 pqi_acknowledge_event(ctrl_info, event); 3170 pqi_ofa_setup_host_buffer(ctrl_info, 3171 le32_to_cpu(event->ofa_bytes_requested)); 3172 pqi_ofa_host_memory_update(ctrl_info); 3173 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3174 pqi_ofa_free_host_buffer(ctrl_info); 3175 pqi_acknowledge_event(ctrl_info, event); 3176 dev_info(&ctrl_info->pci_dev->dev, 3177 "Online Firmware Activation(%u) cancel reason : %u\n", 3178 ctrl_info->ctrl_id, event->ofa_cancel_reason); 3179 } 3180 3181 mutex_unlock(&ctrl_info->ofa_mutex); 3182 } 3183 3184 static void pqi_event_worker(struct work_struct *work) 3185 { 3186 unsigned int i; 3187 struct pqi_ctrl_info *ctrl_info; 3188 struct pqi_event *event; 3189 3190 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3191 3192 pqi_ctrl_busy(ctrl_info); 3193 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 3194 if (pqi_ctrl_offline(ctrl_info)) 3195 goto out; 3196 3197 pqi_schedule_rescan_worker_delayed(ctrl_info); 3198 3199 event = ctrl_info->events; 3200 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3201 if (event->pending) { 3202 event->pending = false; 3203 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3204 pqi_ctrl_unbusy(ctrl_info); 3205 pqi_ofa_process_event(ctrl_info, event); 3206 return; 3207 } 3208 pqi_acknowledge_event(ctrl_info, event); 3209 } 3210 event++; 3211 } 3212 3213 out: 3214 pqi_ctrl_unbusy(ctrl_info); 3215 } 3216 3217 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) 3218 3219 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3220 { 3221 int num_interrupts; 3222 u32 heartbeat_count; 3223 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 3224 heartbeat_timer); 3225 3226 pqi_check_ctrl_health(ctrl_info); 3227 if (pqi_ctrl_offline(ctrl_info)) 3228 return; 3229 3230 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3231 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3232 3233 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3234 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3235 dev_err(&ctrl_info->pci_dev->dev, 3236 "no heartbeat detected - last heartbeat count: %u\n", 3237 heartbeat_count); 3238 pqi_take_ctrl_offline(ctrl_info); 3239 return; 3240 } 3241 } else { 3242 ctrl_info->previous_num_interrupts = num_interrupts; 3243 } 3244 3245 ctrl_info->previous_heartbeat_count = heartbeat_count; 3246 mod_timer(&ctrl_info->heartbeat_timer, 3247 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3248 } 3249 3250 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3251 { 3252 if (!ctrl_info->heartbeat_counter) 3253 return; 3254 3255 ctrl_info->previous_num_interrupts = 3256 atomic_read(&ctrl_info->num_interrupts); 3257 ctrl_info->previous_heartbeat_count = 3258 pqi_read_heartbeat_counter(ctrl_info); 3259 3260 ctrl_info->heartbeat_timer.expires = 3261 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3262 add_timer(&ctrl_info->heartbeat_timer); 3263 } 3264 3265 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3266 { 3267 del_timer_sync(&ctrl_info->heartbeat_timer); 3268 } 3269 3270 static inline int pqi_event_type_to_event_index(unsigned int event_type) 3271 { 3272 int index; 3273 3274 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 3275 if (event_type == pqi_supported_event_types[index]) 3276 return index; 3277 3278 return -1; 3279 } 3280 3281 static inline bool pqi_is_supported_event(unsigned int event_type) 3282 { 3283 return pqi_event_type_to_event_index(event_type) != -1; 3284 } 3285 3286 static void pqi_ofa_capture_event_payload(struct pqi_event *event, 3287 struct pqi_event_response *response) 3288 { 3289 u16 event_id; 3290 3291 event_id = get_unaligned_le16(&event->event_id); 3292 3293 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3294 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3295 event->ofa_bytes_requested = 3296 response->data.ofa_memory_allocation.bytes_requested; 3297 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3298 event->ofa_cancel_reason = 3299 response->data.ofa_cancelled.reason; 3300 } 3301 } 3302 } 3303 3304 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3305 { 3306 unsigned int num_events; 3307 pqi_index_t oq_pi; 3308 pqi_index_t oq_ci; 3309 struct pqi_event_queue *event_queue; 3310 struct pqi_event_response *response; 3311 struct pqi_event *event; 3312 int event_index; 3313 3314 event_queue = &ctrl_info->event_queue; 3315 num_events = 0; 3316 oq_ci = event_queue->oq_ci_copy; 3317 3318 while (1) { 3319 oq_pi = readl(event_queue->oq_pi); 3320 if (oq_pi == oq_ci) 3321 break; 3322 3323 num_events++; 3324 response = event_queue->oq_element_array + 3325 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3326 3327 event_index = 3328 pqi_event_type_to_event_index(response->event_type); 3329 3330 if (event_index >= 0) { 3331 if (response->request_acknowlege) { 3332 event = &ctrl_info->events[event_index]; 3333 event->pending = true; 3334 event->event_type = response->event_type; 3335 event->event_id = response->event_id; 3336 event->additional_event_id = 3337 response->additional_event_id; 3338 pqi_ofa_capture_event_payload(event, response); 3339 } 3340 } 3341 3342 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3343 } 3344 3345 if (num_events) { 3346 event_queue->oq_ci_copy = oq_ci; 3347 writel(oq_ci, event_queue->oq_ci); 3348 schedule_work(&ctrl_info->event_work); 3349 } 3350 3351 return num_events; 3352 } 3353 3354 #define PQI_LEGACY_INTX_MASK 0x1 3355 3356 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 3357 bool enable_intx) 3358 { 3359 u32 intx_mask; 3360 struct pqi_device_registers __iomem *pqi_registers; 3361 volatile void __iomem *register_addr; 3362 3363 pqi_registers = ctrl_info->pqi_registers; 3364 3365 if (enable_intx) 3366 register_addr = &pqi_registers->legacy_intx_mask_clear; 3367 else 3368 register_addr = &pqi_registers->legacy_intx_mask_set; 3369 3370 intx_mask = readl(register_addr); 3371 intx_mask |= PQI_LEGACY_INTX_MASK; 3372 writel(intx_mask, register_addr); 3373 } 3374 3375 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3376 enum pqi_irq_mode new_mode) 3377 { 3378 switch (ctrl_info->irq_mode) { 3379 case IRQ_MODE_MSIX: 3380 switch (new_mode) { 3381 case IRQ_MODE_MSIX: 3382 break; 3383 case IRQ_MODE_INTX: 3384 pqi_configure_legacy_intx(ctrl_info, true); 3385 sis_enable_intx(ctrl_info); 3386 break; 3387 case IRQ_MODE_NONE: 3388 break; 3389 } 3390 break; 3391 case IRQ_MODE_INTX: 3392 switch (new_mode) { 3393 case IRQ_MODE_MSIX: 3394 pqi_configure_legacy_intx(ctrl_info, false); 3395 sis_enable_msix(ctrl_info); 3396 break; 3397 case IRQ_MODE_INTX: 3398 break; 3399 case IRQ_MODE_NONE: 3400 pqi_configure_legacy_intx(ctrl_info, false); 3401 break; 3402 } 3403 break; 3404 case IRQ_MODE_NONE: 3405 switch (new_mode) { 3406 case IRQ_MODE_MSIX: 3407 sis_enable_msix(ctrl_info); 3408 break; 3409 case IRQ_MODE_INTX: 3410 pqi_configure_legacy_intx(ctrl_info, true); 3411 sis_enable_intx(ctrl_info); 3412 break; 3413 case IRQ_MODE_NONE: 3414 break; 3415 } 3416 break; 3417 } 3418 3419 ctrl_info->irq_mode = new_mode; 3420 } 3421 3422 #define PQI_LEGACY_INTX_PENDING 0x1 3423 3424 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3425 { 3426 bool valid_irq; 3427 u32 intx_status; 3428 3429 switch (ctrl_info->irq_mode) { 3430 case IRQ_MODE_MSIX: 3431 valid_irq = true; 3432 break; 3433 case IRQ_MODE_INTX: 3434 intx_status = 3435 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3436 if (intx_status & PQI_LEGACY_INTX_PENDING) 3437 valid_irq = true; 3438 else 3439 valid_irq = false; 3440 break; 3441 case IRQ_MODE_NONE: 3442 default: 3443 valid_irq = false; 3444 break; 3445 } 3446 3447 return valid_irq; 3448 } 3449 3450 static irqreturn_t pqi_irq_handler(int irq, void *data) 3451 { 3452 struct pqi_ctrl_info *ctrl_info; 3453 struct pqi_queue_group *queue_group; 3454 unsigned int num_responses_handled; 3455 3456 queue_group = data; 3457 ctrl_info = queue_group->ctrl_info; 3458 3459 if (!pqi_is_valid_irq(ctrl_info)) 3460 return IRQ_NONE; 3461 3462 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3463 3464 if (irq == ctrl_info->event_irq) 3465 num_responses_handled += pqi_process_event_intr(ctrl_info); 3466 3467 if (num_responses_handled) 3468 atomic_inc(&ctrl_info->num_interrupts); 3469 3470 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3471 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3472 3473 return IRQ_HANDLED; 3474 } 3475 3476 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3477 { 3478 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3479 int i; 3480 int rc; 3481 3482 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3483 3484 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3485 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3486 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3487 if (rc) { 3488 dev_err(&pci_dev->dev, 3489 "irq %u init failed with error %d\n", 3490 pci_irq_vector(pci_dev, i), rc); 3491 return rc; 3492 } 3493 ctrl_info->num_msix_vectors_initialized++; 3494 } 3495 3496 return 0; 3497 } 3498 3499 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3500 { 3501 int i; 3502 3503 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3504 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3505 &ctrl_info->queue_groups[i]); 3506 3507 ctrl_info->num_msix_vectors_initialized = 0; 3508 } 3509 3510 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3511 { 3512 int num_vectors_enabled; 3513 3514 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3515 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3516 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3517 if (num_vectors_enabled < 0) { 3518 dev_err(&ctrl_info->pci_dev->dev, 3519 "MSI-X init failed with error %d\n", 3520 num_vectors_enabled); 3521 return num_vectors_enabled; 3522 } 3523 3524 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3525 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3526 return 0; 3527 } 3528 3529 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3530 { 3531 if (ctrl_info->num_msix_vectors_enabled) { 3532 pci_free_irq_vectors(ctrl_info->pci_dev); 3533 ctrl_info->num_msix_vectors_enabled = 0; 3534 } 3535 } 3536 3537 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3538 { 3539 unsigned int i; 3540 size_t alloc_length; 3541 size_t element_array_length_per_iq; 3542 size_t element_array_length_per_oq; 3543 void *element_array; 3544 void __iomem *next_queue_index; 3545 void *aligned_pointer; 3546 unsigned int num_inbound_queues; 3547 unsigned int num_outbound_queues; 3548 unsigned int num_queue_indexes; 3549 struct pqi_queue_group *queue_group; 3550 3551 element_array_length_per_iq = 3552 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3553 ctrl_info->num_elements_per_iq; 3554 element_array_length_per_oq = 3555 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3556 ctrl_info->num_elements_per_oq; 3557 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3558 num_outbound_queues = ctrl_info->num_queue_groups; 3559 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3560 3561 aligned_pointer = NULL; 3562 3563 for (i = 0; i < num_inbound_queues; i++) { 3564 aligned_pointer = PTR_ALIGN(aligned_pointer, 3565 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3566 aligned_pointer += element_array_length_per_iq; 3567 } 3568 3569 for (i = 0; i < num_outbound_queues; i++) { 3570 aligned_pointer = PTR_ALIGN(aligned_pointer, 3571 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3572 aligned_pointer += element_array_length_per_oq; 3573 } 3574 3575 aligned_pointer = PTR_ALIGN(aligned_pointer, 3576 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3577 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3578 PQI_EVENT_OQ_ELEMENT_LENGTH; 3579 3580 for (i = 0; i < num_queue_indexes; i++) { 3581 aligned_pointer = PTR_ALIGN(aligned_pointer, 3582 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3583 aligned_pointer += sizeof(pqi_index_t); 3584 } 3585 3586 alloc_length = (size_t)aligned_pointer + 3587 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3588 3589 alloc_length += PQI_EXTRA_SGL_MEMORY; 3590 3591 ctrl_info->queue_memory_base = 3592 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3593 &ctrl_info->queue_memory_base_dma_handle, 3594 GFP_KERNEL); 3595 3596 if (!ctrl_info->queue_memory_base) 3597 return -ENOMEM; 3598 3599 ctrl_info->queue_memory_length = alloc_length; 3600 3601 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3602 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3603 3604 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3605 queue_group = &ctrl_info->queue_groups[i]; 3606 queue_group->iq_element_array[RAID_PATH] = element_array; 3607 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3608 ctrl_info->queue_memory_base_dma_handle + 3609 (element_array - ctrl_info->queue_memory_base); 3610 element_array += element_array_length_per_iq; 3611 element_array = PTR_ALIGN(element_array, 3612 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3613 queue_group->iq_element_array[AIO_PATH] = element_array; 3614 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3615 ctrl_info->queue_memory_base_dma_handle + 3616 (element_array - ctrl_info->queue_memory_base); 3617 element_array += element_array_length_per_iq; 3618 element_array = PTR_ALIGN(element_array, 3619 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3620 } 3621 3622 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3623 queue_group = &ctrl_info->queue_groups[i]; 3624 queue_group->oq_element_array = element_array; 3625 queue_group->oq_element_array_bus_addr = 3626 ctrl_info->queue_memory_base_dma_handle + 3627 (element_array - ctrl_info->queue_memory_base); 3628 element_array += element_array_length_per_oq; 3629 element_array = PTR_ALIGN(element_array, 3630 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3631 } 3632 3633 ctrl_info->event_queue.oq_element_array = element_array; 3634 ctrl_info->event_queue.oq_element_array_bus_addr = 3635 ctrl_info->queue_memory_base_dma_handle + 3636 (element_array - ctrl_info->queue_memory_base); 3637 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3638 PQI_EVENT_OQ_ELEMENT_LENGTH; 3639 3640 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3641 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3642 3643 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3644 queue_group = &ctrl_info->queue_groups[i]; 3645 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3646 queue_group->iq_ci_bus_addr[RAID_PATH] = 3647 ctrl_info->queue_memory_base_dma_handle + 3648 (next_queue_index - 3649 (void __iomem *)ctrl_info->queue_memory_base); 3650 next_queue_index += sizeof(pqi_index_t); 3651 next_queue_index = PTR_ALIGN(next_queue_index, 3652 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3653 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3654 queue_group->iq_ci_bus_addr[AIO_PATH] = 3655 ctrl_info->queue_memory_base_dma_handle + 3656 (next_queue_index - 3657 (void __iomem *)ctrl_info->queue_memory_base); 3658 next_queue_index += sizeof(pqi_index_t); 3659 next_queue_index = PTR_ALIGN(next_queue_index, 3660 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3661 queue_group->oq_pi = next_queue_index; 3662 queue_group->oq_pi_bus_addr = 3663 ctrl_info->queue_memory_base_dma_handle + 3664 (next_queue_index - 3665 (void __iomem *)ctrl_info->queue_memory_base); 3666 next_queue_index += sizeof(pqi_index_t); 3667 next_queue_index = PTR_ALIGN(next_queue_index, 3668 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3669 } 3670 3671 ctrl_info->event_queue.oq_pi = next_queue_index; 3672 ctrl_info->event_queue.oq_pi_bus_addr = 3673 ctrl_info->queue_memory_base_dma_handle + 3674 (next_queue_index - 3675 (void __iomem *)ctrl_info->queue_memory_base); 3676 3677 return 0; 3678 } 3679 3680 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3681 { 3682 unsigned int i; 3683 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3684 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3685 3686 /* 3687 * Initialize the backpointers to the controller structure in 3688 * each operational queue group structure. 3689 */ 3690 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3691 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3692 3693 /* 3694 * Assign IDs to all operational queues. Note that the IDs 3695 * assigned to operational IQs are independent of the IDs 3696 * assigned to operational OQs. 3697 */ 3698 ctrl_info->event_queue.oq_id = next_oq_id++; 3699 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3700 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3701 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3702 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3703 } 3704 3705 /* 3706 * Assign MSI-X table entry indexes to all queues. Note that the 3707 * interrupt for the event queue is shared with the first queue group. 3708 */ 3709 ctrl_info->event_queue.int_msg_num = 0; 3710 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3711 ctrl_info->queue_groups[i].int_msg_num = i; 3712 3713 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3714 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3715 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3716 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3717 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3718 } 3719 } 3720 3721 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3722 { 3723 size_t alloc_length; 3724 struct pqi_admin_queues_aligned *admin_queues_aligned; 3725 struct pqi_admin_queues *admin_queues; 3726 3727 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3728 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3729 3730 ctrl_info->admin_queue_memory_base = 3731 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3732 &ctrl_info->admin_queue_memory_base_dma_handle, 3733 GFP_KERNEL); 3734 3735 if (!ctrl_info->admin_queue_memory_base) 3736 return -ENOMEM; 3737 3738 ctrl_info->admin_queue_memory_length = alloc_length; 3739 3740 admin_queues = &ctrl_info->admin_queues; 3741 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3742 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3743 admin_queues->iq_element_array = 3744 &admin_queues_aligned->iq_element_array; 3745 admin_queues->oq_element_array = 3746 &admin_queues_aligned->oq_element_array; 3747 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3748 admin_queues->oq_pi = 3749 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3750 3751 admin_queues->iq_element_array_bus_addr = 3752 ctrl_info->admin_queue_memory_base_dma_handle + 3753 (admin_queues->iq_element_array - 3754 ctrl_info->admin_queue_memory_base); 3755 admin_queues->oq_element_array_bus_addr = 3756 ctrl_info->admin_queue_memory_base_dma_handle + 3757 (admin_queues->oq_element_array - 3758 ctrl_info->admin_queue_memory_base); 3759 admin_queues->iq_ci_bus_addr = 3760 ctrl_info->admin_queue_memory_base_dma_handle + 3761 ((void *)admin_queues->iq_ci - 3762 ctrl_info->admin_queue_memory_base); 3763 admin_queues->oq_pi_bus_addr = 3764 ctrl_info->admin_queue_memory_base_dma_handle + 3765 ((void __iomem *)admin_queues->oq_pi - 3766 (void __iomem *)ctrl_info->admin_queue_memory_base); 3767 3768 return 0; 3769 } 3770 3771 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ 3772 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3773 3774 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3775 { 3776 struct pqi_device_registers __iomem *pqi_registers; 3777 struct pqi_admin_queues *admin_queues; 3778 unsigned long timeout; 3779 u8 status; 3780 u32 reg; 3781 3782 pqi_registers = ctrl_info->pqi_registers; 3783 admin_queues = &ctrl_info->admin_queues; 3784 3785 writeq((u64)admin_queues->iq_element_array_bus_addr, 3786 &pqi_registers->admin_iq_element_array_addr); 3787 writeq((u64)admin_queues->oq_element_array_bus_addr, 3788 &pqi_registers->admin_oq_element_array_addr); 3789 writeq((u64)admin_queues->iq_ci_bus_addr, 3790 &pqi_registers->admin_iq_ci_addr); 3791 writeq((u64)admin_queues->oq_pi_bus_addr, 3792 &pqi_registers->admin_oq_pi_addr); 3793 3794 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3795 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 3796 (admin_queues->int_msg_num << 16); 3797 writel(reg, &pqi_registers->admin_iq_num_elements); 3798 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3799 &pqi_registers->function_and_status_code); 3800 3801 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3802 while (1) { 3803 status = readb(&pqi_registers->function_and_status_code); 3804 if (status == PQI_STATUS_IDLE) 3805 break; 3806 if (time_after(jiffies, timeout)) 3807 return -ETIMEDOUT; 3808 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3809 } 3810 3811 /* 3812 * The offset registers are not initialized to the correct 3813 * offsets until *after* the create admin queue pair command 3814 * completes successfully. 3815 */ 3816 admin_queues->iq_pi = ctrl_info->iomem_base + 3817 PQI_DEVICE_REGISTERS_OFFSET + 3818 readq(&pqi_registers->admin_iq_pi_offset); 3819 admin_queues->oq_ci = ctrl_info->iomem_base + 3820 PQI_DEVICE_REGISTERS_OFFSET + 3821 readq(&pqi_registers->admin_oq_ci_offset); 3822 3823 return 0; 3824 } 3825 3826 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3827 struct pqi_general_admin_request *request) 3828 { 3829 struct pqi_admin_queues *admin_queues; 3830 void *next_element; 3831 pqi_index_t iq_pi; 3832 3833 admin_queues = &ctrl_info->admin_queues; 3834 iq_pi = admin_queues->iq_pi_copy; 3835 3836 next_element = admin_queues->iq_element_array + 3837 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3838 3839 memcpy(next_element, request, sizeof(*request)); 3840 3841 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3842 admin_queues->iq_pi_copy = iq_pi; 3843 3844 /* 3845 * This write notifies the controller that an IU is available to be 3846 * processed. 3847 */ 3848 writel(iq_pi, admin_queues->iq_pi); 3849 } 3850 3851 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3852 3853 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3854 struct pqi_general_admin_response *response) 3855 { 3856 struct pqi_admin_queues *admin_queues; 3857 pqi_index_t oq_pi; 3858 pqi_index_t oq_ci; 3859 unsigned long timeout; 3860 3861 admin_queues = &ctrl_info->admin_queues; 3862 oq_ci = admin_queues->oq_ci_copy; 3863 3864 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; 3865 3866 while (1) { 3867 oq_pi = readl(admin_queues->oq_pi); 3868 if (oq_pi != oq_ci) 3869 break; 3870 if (time_after(jiffies, timeout)) { 3871 dev_err(&ctrl_info->pci_dev->dev, 3872 "timed out waiting for admin response\n"); 3873 return -ETIMEDOUT; 3874 } 3875 if (!sis_is_firmware_running(ctrl_info)) 3876 return -ENXIO; 3877 usleep_range(1000, 2000); 3878 } 3879 3880 memcpy(response, admin_queues->oq_element_array + 3881 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3882 3883 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3884 admin_queues->oq_ci_copy = oq_ci; 3885 writel(oq_ci, admin_queues->oq_ci); 3886 3887 return 0; 3888 } 3889 3890 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3891 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3892 struct pqi_io_request *io_request) 3893 { 3894 struct pqi_io_request *next; 3895 void *next_element; 3896 pqi_index_t iq_pi; 3897 pqi_index_t iq_ci; 3898 size_t iu_length; 3899 unsigned long flags; 3900 unsigned int num_elements_needed; 3901 unsigned int num_elements_to_end_of_queue; 3902 size_t copy_count; 3903 struct pqi_iu_header *request; 3904 3905 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3906 3907 if (io_request) { 3908 io_request->queue_group = queue_group; 3909 list_add_tail(&io_request->request_list_entry, 3910 &queue_group->request_list[path]); 3911 } 3912 3913 iq_pi = queue_group->iq_pi_copy[path]; 3914 3915 list_for_each_entry_safe(io_request, next, 3916 &queue_group->request_list[path], request_list_entry) { 3917 3918 request = io_request->iu; 3919 3920 iu_length = get_unaligned_le16(&request->iu_length) + 3921 PQI_REQUEST_HEADER_LENGTH; 3922 num_elements_needed = 3923 DIV_ROUND_UP(iu_length, 3924 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3925 3926 iq_ci = readl(queue_group->iq_ci[path]); 3927 3928 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3929 ctrl_info->num_elements_per_iq)) 3930 break; 3931 3932 put_unaligned_le16(queue_group->oq_id, 3933 &request->response_queue_id); 3934 3935 next_element = queue_group->iq_element_array[path] + 3936 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3937 3938 num_elements_to_end_of_queue = 3939 ctrl_info->num_elements_per_iq - iq_pi; 3940 3941 if (num_elements_needed <= num_elements_to_end_of_queue) { 3942 memcpy(next_element, request, iu_length); 3943 } else { 3944 copy_count = num_elements_to_end_of_queue * 3945 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3946 memcpy(next_element, request, copy_count); 3947 memcpy(queue_group->iq_element_array[path], 3948 (u8 *)request + copy_count, 3949 iu_length - copy_count); 3950 } 3951 3952 iq_pi = (iq_pi + num_elements_needed) % 3953 ctrl_info->num_elements_per_iq; 3954 3955 list_del(&io_request->request_list_entry); 3956 } 3957 3958 if (iq_pi != queue_group->iq_pi_copy[path]) { 3959 queue_group->iq_pi_copy[path] = iq_pi; 3960 /* 3961 * This write notifies the controller that one or more IUs are 3962 * available to be processed. 3963 */ 3964 writel(iq_pi, queue_group->iq_pi[path]); 3965 } 3966 3967 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3968 } 3969 3970 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3971 3972 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3973 struct completion *wait) 3974 { 3975 int rc; 3976 3977 while (1) { 3978 if (wait_for_completion_io_timeout(wait, 3979 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { 3980 rc = 0; 3981 break; 3982 } 3983 3984 pqi_check_ctrl_health(ctrl_info); 3985 if (pqi_ctrl_offline(ctrl_info)) { 3986 rc = -ENXIO; 3987 break; 3988 } 3989 } 3990 3991 return rc; 3992 } 3993 3994 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3995 void *context) 3996 { 3997 struct completion *waiting = context; 3998 3999 complete(waiting); 4000 } 4001 4002 static int pqi_process_raid_io_error_synchronous( 4003 struct pqi_raid_error_info *error_info) 4004 { 4005 int rc = -EIO; 4006 4007 switch (error_info->data_out_result) { 4008 case PQI_DATA_IN_OUT_GOOD: 4009 if (error_info->status == SAM_STAT_GOOD) 4010 rc = 0; 4011 break; 4012 case PQI_DATA_IN_OUT_UNDERFLOW: 4013 if (error_info->status == SAM_STAT_GOOD || 4014 error_info->status == SAM_STAT_CHECK_CONDITION) 4015 rc = 0; 4016 break; 4017 case PQI_DATA_IN_OUT_ABORTED: 4018 rc = PQI_CMD_STATUS_ABORTED; 4019 break; 4020 } 4021 4022 return rc; 4023 } 4024 4025 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4026 struct pqi_iu_header *request, unsigned int flags, 4027 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 4028 { 4029 int rc = 0; 4030 struct pqi_io_request *io_request; 4031 unsigned long start_jiffies; 4032 unsigned long msecs_blocked; 4033 size_t iu_length; 4034 DECLARE_COMPLETION_ONSTACK(wait); 4035 4036 /* 4037 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 4038 * are mutually exclusive. 4039 */ 4040 4041 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4042 if (down_interruptible(&ctrl_info->sync_request_sem)) 4043 return -ERESTARTSYS; 4044 } else { 4045 if (timeout_msecs == NO_TIMEOUT) { 4046 down(&ctrl_info->sync_request_sem); 4047 } else { 4048 start_jiffies = jiffies; 4049 if (down_timeout(&ctrl_info->sync_request_sem, 4050 msecs_to_jiffies(timeout_msecs))) 4051 return -ETIMEDOUT; 4052 msecs_blocked = 4053 jiffies_to_msecs(jiffies - start_jiffies); 4054 if (msecs_blocked >= timeout_msecs) { 4055 rc = -ETIMEDOUT; 4056 goto out; 4057 } 4058 timeout_msecs -= msecs_blocked; 4059 } 4060 } 4061 4062 pqi_ctrl_busy(ctrl_info); 4063 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 4064 if (timeout_msecs == 0) { 4065 pqi_ctrl_unbusy(ctrl_info); 4066 rc = -ETIMEDOUT; 4067 goto out; 4068 } 4069 4070 if (pqi_ctrl_offline(ctrl_info)) { 4071 pqi_ctrl_unbusy(ctrl_info); 4072 rc = -ENXIO; 4073 goto out; 4074 } 4075 4076 atomic_inc(&ctrl_info->sync_cmds_outstanding); 4077 4078 io_request = pqi_alloc_io_request(ctrl_info); 4079 4080 put_unaligned_le16(io_request->index, 4081 &(((struct pqi_raid_path_request *)request)->request_id)); 4082 4083 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4084 ((struct pqi_raid_path_request *)request)->error_index = 4085 ((struct pqi_raid_path_request *)request)->request_id; 4086 4087 iu_length = get_unaligned_le16(&request->iu_length) + 4088 PQI_REQUEST_HEADER_LENGTH; 4089 memcpy(io_request->iu, request, iu_length); 4090 4091 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4092 io_request->context = &wait; 4093 4094 pqi_start_io(ctrl_info, 4095 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4096 io_request); 4097 4098 pqi_ctrl_unbusy(ctrl_info); 4099 4100 if (timeout_msecs == NO_TIMEOUT) { 4101 pqi_wait_for_completion_io(ctrl_info, &wait); 4102 } else { 4103 if (!wait_for_completion_io_timeout(&wait, 4104 msecs_to_jiffies(timeout_msecs))) { 4105 dev_warn(&ctrl_info->pci_dev->dev, 4106 "command timed out\n"); 4107 rc = -ETIMEDOUT; 4108 } 4109 } 4110 4111 if (error_info) { 4112 if (io_request->error_info) 4113 memcpy(error_info, io_request->error_info, 4114 sizeof(*error_info)); 4115 else 4116 memset(error_info, 0, sizeof(*error_info)); 4117 } else if (rc == 0 && io_request->error_info) { 4118 rc = pqi_process_raid_io_error_synchronous( 4119 io_request->error_info); 4120 } 4121 4122 pqi_free_io_request(io_request); 4123 4124 atomic_dec(&ctrl_info->sync_cmds_outstanding); 4125 out: 4126 up(&ctrl_info->sync_request_sem); 4127 4128 return rc; 4129 } 4130 4131 static int pqi_validate_admin_response( 4132 struct pqi_general_admin_response *response, u8 expected_function_code) 4133 { 4134 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4135 return -EINVAL; 4136 4137 if (get_unaligned_le16(&response->header.iu_length) != 4138 PQI_GENERAL_ADMIN_IU_LENGTH) 4139 return -EINVAL; 4140 4141 if (response->function_code != expected_function_code) 4142 return -EINVAL; 4143 4144 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4145 return -EINVAL; 4146 4147 return 0; 4148 } 4149 4150 static int pqi_submit_admin_request_synchronous( 4151 struct pqi_ctrl_info *ctrl_info, 4152 struct pqi_general_admin_request *request, 4153 struct pqi_general_admin_response *response) 4154 { 4155 int rc; 4156 4157 pqi_submit_admin_request(ctrl_info, request); 4158 4159 rc = pqi_poll_for_admin_response(ctrl_info, response); 4160 4161 if (rc == 0) 4162 rc = pqi_validate_admin_response(response, 4163 request->function_code); 4164 4165 return rc; 4166 } 4167 4168 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4169 { 4170 int rc; 4171 struct pqi_general_admin_request request; 4172 struct pqi_general_admin_response response; 4173 struct pqi_device_capability *capability; 4174 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4175 4176 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4177 if (!capability) 4178 return -ENOMEM; 4179 4180 memset(&request, 0, sizeof(request)); 4181 4182 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4183 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4184 &request.header.iu_length); 4185 request.function_code = 4186 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4187 put_unaligned_le32(sizeof(*capability), 4188 &request.data.report_device_capability.buffer_length); 4189 4190 rc = pqi_map_single(ctrl_info->pci_dev, 4191 &request.data.report_device_capability.sg_descriptor, 4192 capability, sizeof(*capability), 4193 DMA_FROM_DEVICE); 4194 if (rc) 4195 goto out; 4196 4197 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4198 &response); 4199 4200 pqi_pci_unmap(ctrl_info->pci_dev, 4201 &request.data.report_device_capability.sg_descriptor, 1, 4202 DMA_FROM_DEVICE); 4203 4204 if (rc) 4205 goto out; 4206 4207 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4208 rc = -EIO; 4209 goto out; 4210 } 4211 4212 ctrl_info->max_inbound_queues = 4213 get_unaligned_le16(&capability->max_inbound_queues); 4214 ctrl_info->max_elements_per_iq = 4215 get_unaligned_le16(&capability->max_elements_per_iq); 4216 ctrl_info->max_iq_element_length = 4217 get_unaligned_le16(&capability->max_iq_element_length) 4218 * 16; 4219 ctrl_info->max_outbound_queues = 4220 get_unaligned_le16(&capability->max_outbound_queues); 4221 ctrl_info->max_elements_per_oq = 4222 get_unaligned_le16(&capability->max_elements_per_oq); 4223 ctrl_info->max_oq_element_length = 4224 get_unaligned_le16(&capability->max_oq_element_length) 4225 * 16; 4226 4227 sop_iu_layer_descriptor = 4228 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4229 4230 ctrl_info->max_inbound_iu_length_per_firmware = 4231 get_unaligned_le16( 4232 &sop_iu_layer_descriptor->max_inbound_iu_length); 4233 ctrl_info->inbound_spanning_supported = 4234 sop_iu_layer_descriptor->inbound_spanning_supported; 4235 ctrl_info->outbound_spanning_supported = 4236 sop_iu_layer_descriptor->outbound_spanning_supported; 4237 4238 out: 4239 kfree(capability); 4240 4241 return rc; 4242 } 4243 4244 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4245 { 4246 if (ctrl_info->max_iq_element_length < 4247 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4248 dev_err(&ctrl_info->pci_dev->dev, 4249 "max. inbound queue element length of %d is less than the required length of %d\n", 4250 ctrl_info->max_iq_element_length, 4251 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4252 return -EINVAL; 4253 } 4254 4255 if (ctrl_info->max_oq_element_length < 4256 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4257 dev_err(&ctrl_info->pci_dev->dev, 4258 "max. outbound queue element length of %d is less than the required length of %d\n", 4259 ctrl_info->max_oq_element_length, 4260 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4261 return -EINVAL; 4262 } 4263 4264 if (ctrl_info->max_inbound_iu_length_per_firmware < 4265 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4266 dev_err(&ctrl_info->pci_dev->dev, 4267 "max. inbound IU length of %u is less than the min. required length of %d\n", 4268 ctrl_info->max_inbound_iu_length_per_firmware, 4269 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4270 return -EINVAL; 4271 } 4272 4273 if (!ctrl_info->inbound_spanning_supported) { 4274 dev_err(&ctrl_info->pci_dev->dev, 4275 "the controller does not support inbound spanning\n"); 4276 return -EINVAL; 4277 } 4278 4279 if (ctrl_info->outbound_spanning_supported) { 4280 dev_err(&ctrl_info->pci_dev->dev, 4281 "the controller supports outbound spanning but this driver does not\n"); 4282 return -EINVAL; 4283 } 4284 4285 return 0; 4286 } 4287 4288 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4289 { 4290 int rc; 4291 struct pqi_event_queue *event_queue; 4292 struct pqi_general_admin_request request; 4293 struct pqi_general_admin_response response; 4294 4295 event_queue = &ctrl_info->event_queue; 4296 4297 /* 4298 * Create OQ (Outbound Queue - device to host queue) to dedicate 4299 * to events. 4300 */ 4301 memset(&request, 0, sizeof(request)); 4302 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4303 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4304 &request.header.iu_length); 4305 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4306 put_unaligned_le16(event_queue->oq_id, 4307 &request.data.create_operational_oq.queue_id); 4308 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4309 &request.data.create_operational_oq.element_array_addr); 4310 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4311 &request.data.create_operational_oq.pi_addr); 4312 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4313 &request.data.create_operational_oq.num_elements); 4314 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4315 &request.data.create_operational_oq.element_length); 4316 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4317 put_unaligned_le16(event_queue->int_msg_num, 4318 &request.data.create_operational_oq.int_msg_num); 4319 4320 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4321 &response); 4322 if (rc) 4323 return rc; 4324 4325 event_queue->oq_ci = ctrl_info->iomem_base + 4326 PQI_DEVICE_REGISTERS_OFFSET + 4327 get_unaligned_le64( 4328 &response.data.create_operational_oq.oq_ci_offset); 4329 4330 return 0; 4331 } 4332 4333 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4334 unsigned int group_number) 4335 { 4336 int rc; 4337 struct pqi_queue_group *queue_group; 4338 struct pqi_general_admin_request request; 4339 struct pqi_general_admin_response response; 4340 4341 queue_group = &ctrl_info->queue_groups[group_number]; 4342 4343 /* 4344 * Create IQ (Inbound Queue - host to device queue) for 4345 * RAID path. 4346 */ 4347 memset(&request, 0, sizeof(request)); 4348 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4349 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4350 &request.header.iu_length); 4351 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4352 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4353 &request.data.create_operational_iq.queue_id); 4354 put_unaligned_le64( 4355 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4356 &request.data.create_operational_iq.element_array_addr); 4357 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4358 &request.data.create_operational_iq.ci_addr); 4359 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4360 &request.data.create_operational_iq.num_elements); 4361 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4362 &request.data.create_operational_iq.element_length); 4363 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4364 4365 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4366 &response); 4367 if (rc) { 4368 dev_err(&ctrl_info->pci_dev->dev, 4369 "error creating inbound RAID queue\n"); 4370 return rc; 4371 } 4372 4373 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4374 PQI_DEVICE_REGISTERS_OFFSET + 4375 get_unaligned_le64( 4376 &response.data.create_operational_iq.iq_pi_offset); 4377 4378 /* 4379 * Create IQ (Inbound Queue - host to device queue) for 4380 * Advanced I/O (AIO) path. 4381 */ 4382 memset(&request, 0, sizeof(request)); 4383 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4384 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4385 &request.header.iu_length); 4386 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4387 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4388 &request.data.create_operational_iq.queue_id); 4389 put_unaligned_le64((u64)queue_group-> 4390 iq_element_array_bus_addr[AIO_PATH], 4391 &request.data.create_operational_iq.element_array_addr); 4392 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4393 &request.data.create_operational_iq.ci_addr); 4394 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4395 &request.data.create_operational_iq.num_elements); 4396 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4397 &request.data.create_operational_iq.element_length); 4398 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4399 4400 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4401 &response); 4402 if (rc) { 4403 dev_err(&ctrl_info->pci_dev->dev, 4404 "error creating inbound AIO queue\n"); 4405 return rc; 4406 } 4407 4408 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4409 PQI_DEVICE_REGISTERS_OFFSET + 4410 get_unaligned_le64( 4411 &response.data.create_operational_iq.iq_pi_offset); 4412 4413 /* 4414 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4415 * assumed to be for RAID path I/O unless we change the queue's 4416 * property. 4417 */ 4418 memset(&request, 0, sizeof(request)); 4419 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4420 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4421 &request.header.iu_length); 4422 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4423 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4424 &request.data.change_operational_iq_properties.queue_id); 4425 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4426 &request.data.change_operational_iq_properties.vendor_specific); 4427 4428 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4429 &response); 4430 if (rc) { 4431 dev_err(&ctrl_info->pci_dev->dev, 4432 "error changing queue property\n"); 4433 return rc; 4434 } 4435 4436 /* 4437 * Create OQ (Outbound Queue - device to host queue). 4438 */ 4439 memset(&request, 0, sizeof(request)); 4440 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4441 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4442 &request.header.iu_length); 4443 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4444 put_unaligned_le16(queue_group->oq_id, 4445 &request.data.create_operational_oq.queue_id); 4446 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4447 &request.data.create_operational_oq.element_array_addr); 4448 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4449 &request.data.create_operational_oq.pi_addr); 4450 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4451 &request.data.create_operational_oq.num_elements); 4452 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4453 &request.data.create_operational_oq.element_length); 4454 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4455 put_unaligned_le16(queue_group->int_msg_num, 4456 &request.data.create_operational_oq.int_msg_num); 4457 4458 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4459 &response); 4460 if (rc) { 4461 dev_err(&ctrl_info->pci_dev->dev, 4462 "error creating outbound queue\n"); 4463 return rc; 4464 } 4465 4466 queue_group->oq_ci = ctrl_info->iomem_base + 4467 PQI_DEVICE_REGISTERS_OFFSET + 4468 get_unaligned_le64( 4469 &response.data.create_operational_oq.oq_ci_offset); 4470 4471 return 0; 4472 } 4473 4474 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4475 { 4476 int rc; 4477 unsigned int i; 4478 4479 rc = pqi_create_event_queue(ctrl_info); 4480 if (rc) { 4481 dev_err(&ctrl_info->pci_dev->dev, 4482 "error creating event queue\n"); 4483 return rc; 4484 } 4485 4486 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4487 rc = pqi_create_queue_group(ctrl_info, i); 4488 if (rc) { 4489 dev_err(&ctrl_info->pci_dev->dev, 4490 "error creating queue group number %u/%u\n", 4491 i, ctrl_info->num_queue_groups); 4492 return rc; 4493 } 4494 } 4495 4496 return 0; 4497 } 4498 4499 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4500 (offsetof(struct pqi_event_config, descriptors) + \ 4501 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4502 4503 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4504 bool enable_events) 4505 { 4506 int rc; 4507 unsigned int i; 4508 struct pqi_event_config *event_config; 4509 struct pqi_event_descriptor *event_descriptor; 4510 struct pqi_general_management_request request; 4511 4512 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4513 GFP_KERNEL); 4514 if (!event_config) 4515 return -ENOMEM; 4516 4517 memset(&request, 0, sizeof(request)); 4518 4519 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4520 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4521 data.report_event_configuration.sg_descriptors[1]) - 4522 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4523 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4524 &request.data.report_event_configuration.buffer_length); 4525 4526 rc = pqi_map_single(ctrl_info->pci_dev, 4527 request.data.report_event_configuration.sg_descriptors, 4528 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4529 DMA_FROM_DEVICE); 4530 if (rc) 4531 goto out; 4532 4533 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4534 0, NULL, NO_TIMEOUT); 4535 4536 pqi_pci_unmap(ctrl_info->pci_dev, 4537 request.data.report_event_configuration.sg_descriptors, 1, 4538 DMA_FROM_DEVICE); 4539 4540 if (rc) 4541 goto out; 4542 4543 for (i = 0; i < event_config->num_event_descriptors; i++) { 4544 event_descriptor = &event_config->descriptors[i]; 4545 if (enable_events && 4546 pqi_is_supported_event(event_descriptor->event_type)) 4547 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4548 &event_descriptor->oq_id); 4549 else 4550 put_unaligned_le16(0, &event_descriptor->oq_id); 4551 } 4552 4553 memset(&request, 0, sizeof(request)); 4554 4555 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4556 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4557 data.report_event_configuration.sg_descriptors[1]) - 4558 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4559 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4560 &request.data.report_event_configuration.buffer_length); 4561 4562 rc = pqi_map_single(ctrl_info->pci_dev, 4563 request.data.report_event_configuration.sg_descriptors, 4564 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4565 DMA_TO_DEVICE); 4566 if (rc) 4567 goto out; 4568 4569 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4570 NULL, NO_TIMEOUT); 4571 4572 pqi_pci_unmap(ctrl_info->pci_dev, 4573 request.data.report_event_configuration.sg_descriptors, 1, 4574 DMA_TO_DEVICE); 4575 4576 out: 4577 kfree(event_config); 4578 4579 return rc; 4580 } 4581 4582 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4583 { 4584 return pqi_configure_events(ctrl_info, true); 4585 } 4586 4587 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4588 { 4589 return pqi_configure_events(ctrl_info, false); 4590 } 4591 4592 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4593 { 4594 unsigned int i; 4595 struct device *dev; 4596 size_t sg_chain_buffer_length; 4597 struct pqi_io_request *io_request; 4598 4599 if (!ctrl_info->io_request_pool) 4600 return; 4601 4602 dev = &ctrl_info->pci_dev->dev; 4603 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4604 io_request = ctrl_info->io_request_pool; 4605 4606 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4607 kfree(io_request->iu); 4608 if (!io_request->sg_chain_buffer) 4609 break; 4610 dma_free_coherent(dev, sg_chain_buffer_length, 4611 io_request->sg_chain_buffer, 4612 io_request->sg_chain_buffer_dma_handle); 4613 io_request++; 4614 } 4615 4616 kfree(ctrl_info->io_request_pool); 4617 ctrl_info->io_request_pool = NULL; 4618 } 4619 4620 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4621 { 4622 4623 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4624 ctrl_info->error_buffer_length, 4625 &ctrl_info->error_buffer_dma_handle, 4626 GFP_KERNEL); 4627 if (!ctrl_info->error_buffer) 4628 return -ENOMEM; 4629 4630 return 0; 4631 } 4632 4633 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4634 { 4635 unsigned int i; 4636 void *sg_chain_buffer; 4637 size_t sg_chain_buffer_length; 4638 dma_addr_t sg_chain_buffer_dma_handle; 4639 struct device *dev; 4640 struct pqi_io_request *io_request; 4641 4642 ctrl_info->io_request_pool = 4643 kcalloc(ctrl_info->max_io_slots, 4644 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4645 4646 if (!ctrl_info->io_request_pool) { 4647 dev_err(&ctrl_info->pci_dev->dev, 4648 "failed to allocate I/O request pool\n"); 4649 goto error; 4650 } 4651 4652 dev = &ctrl_info->pci_dev->dev; 4653 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4654 io_request = ctrl_info->io_request_pool; 4655 4656 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4657 io_request->iu = 4658 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4659 4660 if (!io_request->iu) { 4661 dev_err(&ctrl_info->pci_dev->dev, 4662 "failed to allocate IU buffers\n"); 4663 goto error; 4664 } 4665 4666 sg_chain_buffer = dma_alloc_coherent(dev, 4667 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4668 GFP_KERNEL); 4669 4670 if (!sg_chain_buffer) { 4671 dev_err(&ctrl_info->pci_dev->dev, 4672 "failed to allocate PQI scatter-gather chain buffers\n"); 4673 goto error; 4674 } 4675 4676 io_request->index = i; 4677 io_request->sg_chain_buffer = sg_chain_buffer; 4678 io_request->sg_chain_buffer_dma_handle = 4679 sg_chain_buffer_dma_handle; 4680 io_request++; 4681 } 4682 4683 return 0; 4684 4685 error: 4686 pqi_free_all_io_requests(ctrl_info); 4687 4688 return -ENOMEM; 4689 } 4690 4691 /* 4692 * Calculate required resources that are sized based on max. outstanding 4693 * requests and max. transfer size. 4694 */ 4695 4696 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4697 { 4698 u32 max_transfer_size; 4699 u32 max_sg_entries; 4700 4701 ctrl_info->scsi_ml_can_queue = 4702 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4703 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4704 4705 ctrl_info->error_buffer_length = 4706 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4707 4708 if (reset_devices) 4709 max_transfer_size = min(ctrl_info->max_transfer_size, 4710 PQI_MAX_TRANSFER_SIZE_KDUMP); 4711 else 4712 max_transfer_size = min(ctrl_info->max_transfer_size, 4713 PQI_MAX_TRANSFER_SIZE); 4714 4715 max_sg_entries = max_transfer_size / PAGE_SIZE; 4716 4717 /* +1 to cover when the buffer is not page-aligned. */ 4718 max_sg_entries++; 4719 4720 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4721 4722 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4723 4724 ctrl_info->sg_chain_buffer_length = 4725 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4726 PQI_EXTRA_SGL_MEMORY; 4727 ctrl_info->sg_tablesize = max_sg_entries; 4728 ctrl_info->max_sectors = max_transfer_size / 512; 4729 } 4730 4731 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4732 { 4733 int num_queue_groups; 4734 u16 num_elements_per_iq; 4735 u16 num_elements_per_oq; 4736 4737 if (reset_devices) { 4738 num_queue_groups = 1; 4739 } else { 4740 int num_cpus; 4741 int max_queue_groups; 4742 4743 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4744 ctrl_info->max_outbound_queues - 1); 4745 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4746 4747 num_cpus = num_online_cpus(); 4748 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4749 num_queue_groups = min(num_queue_groups, max_queue_groups); 4750 } 4751 4752 ctrl_info->num_queue_groups = num_queue_groups; 4753 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4754 4755 /* 4756 * Make sure that the max. inbound IU length is an even multiple 4757 * of our inbound element length. 4758 */ 4759 ctrl_info->max_inbound_iu_length = 4760 (ctrl_info->max_inbound_iu_length_per_firmware / 4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4762 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4763 4764 num_elements_per_iq = 4765 (ctrl_info->max_inbound_iu_length / 4766 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4767 4768 /* Add one because one element in each queue is unusable. */ 4769 num_elements_per_iq++; 4770 4771 num_elements_per_iq = min(num_elements_per_iq, 4772 ctrl_info->max_elements_per_iq); 4773 4774 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4775 num_elements_per_oq = min(num_elements_per_oq, 4776 ctrl_info->max_elements_per_oq); 4777 4778 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4779 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4780 4781 ctrl_info->max_sg_per_iu = 4782 ((ctrl_info->max_inbound_iu_length - 4783 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4784 sizeof(struct pqi_sg_descriptor)) + 4785 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4786 } 4787 4788 static inline void pqi_set_sg_descriptor( 4789 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4790 { 4791 u64 address = (u64)sg_dma_address(sg); 4792 unsigned int length = sg_dma_len(sg); 4793 4794 put_unaligned_le64(address, &sg_descriptor->address); 4795 put_unaligned_le32(length, &sg_descriptor->length); 4796 put_unaligned_le32(0, &sg_descriptor->flags); 4797 } 4798 4799 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4800 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4801 struct pqi_io_request *io_request) 4802 { 4803 int i; 4804 u16 iu_length; 4805 int sg_count; 4806 bool chained; 4807 unsigned int num_sg_in_iu; 4808 unsigned int max_sg_per_iu; 4809 struct scatterlist *sg; 4810 struct pqi_sg_descriptor *sg_descriptor; 4811 4812 sg_count = scsi_dma_map(scmd); 4813 if (sg_count < 0) 4814 return sg_count; 4815 4816 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4817 PQI_REQUEST_HEADER_LENGTH; 4818 4819 if (sg_count == 0) 4820 goto out; 4821 4822 sg = scsi_sglist(scmd); 4823 sg_descriptor = request->sg_descriptors; 4824 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4825 chained = false; 4826 num_sg_in_iu = 0; 4827 i = 0; 4828 4829 while (1) { 4830 pqi_set_sg_descriptor(sg_descriptor, sg); 4831 if (!chained) 4832 num_sg_in_iu++; 4833 i++; 4834 if (i == sg_count) 4835 break; 4836 sg_descriptor++; 4837 if (i == max_sg_per_iu) { 4838 put_unaligned_le64( 4839 (u64)io_request->sg_chain_buffer_dma_handle, 4840 &sg_descriptor->address); 4841 put_unaligned_le32((sg_count - num_sg_in_iu) 4842 * sizeof(*sg_descriptor), 4843 &sg_descriptor->length); 4844 put_unaligned_le32(CISS_SG_CHAIN, 4845 &sg_descriptor->flags); 4846 chained = true; 4847 num_sg_in_iu++; 4848 sg_descriptor = io_request->sg_chain_buffer; 4849 } 4850 sg = sg_next(sg); 4851 } 4852 4853 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4854 request->partial = chained; 4855 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4856 4857 out: 4858 put_unaligned_le16(iu_length, &request->header.iu_length); 4859 4860 return 0; 4861 } 4862 4863 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4864 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4865 struct pqi_io_request *io_request) 4866 { 4867 int i; 4868 u16 iu_length; 4869 int sg_count; 4870 bool chained; 4871 unsigned int num_sg_in_iu; 4872 unsigned int max_sg_per_iu; 4873 struct scatterlist *sg; 4874 struct pqi_sg_descriptor *sg_descriptor; 4875 4876 sg_count = scsi_dma_map(scmd); 4877 if (sg_count < 0) 4878 return sg_count; 4879 4880 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4881 PQI_REQUEST_HEADER_LENGTH; 4882 num_sg_in_iu = 0; 4883 4884 if (sg_count == 0) 4885 goto out; 4886 4887 sg = scsi_sglist(scmd); 4888 sg_descriptor = request->sg_descriptors; 4889 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4890 chained = false; 4891 i = 0; 4892 4893 while (1) { 4894 pqi_set_sg_descriptor(sg_descriptor, sg); 4895 if (!chained) 4896 num_sg_in_iu++; 4897 i++; 4898 if (i == sg_count) 4899 break; 4900 sg_descriptor++; 4901 if (i == max_sg_per_iu) { 4902 put_unaligned_le64( 4903 (u64)io_request->sg_chain_buffer_dma_handle, 4904 &sg_descriptor->address); 4905 put_unaligned_le32((sg_count - num_sg_in_iu) 4906 * sizeof(*sg_descriptor), 4907 &sg_descriptor->length); 4908 put_unaligned_le32(CISS_SG_CHAIN, 4909 &sg_descriptor->flags); 4910 chained = true; 4911 num_sg_in_iu++; 4912 sg_descriptor = io_request->sg_chain_buffer; 4913 } 4914 sg = sg_next(sg); 4915 } 4916 4917 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4918 request->partial = chained; 4919 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4920 4921 out: 4922 put_unaligned_le16(iu_length, &request->header.iu_length); 4923 request->num_sg_descriptors = num_sg_in_iu; 4924 4925 return 0; 4926 } 4927 4928 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4929 void *context) 4930 { 4931 struct scsi_cmnd *scmd; 4932 4933 scmd = io_request->scmd; 4934 pqi_free_io_request(io_request); 4935 scsi_dma_unmap(scmd); 4936 pqi_scsi_done(scmd); 4937 } 4938 4939 static int pqi_raid_submit_scsi_cmd_with_io_request( 4940 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4941 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4942 struct pqi_queue_group *queue_group) 4943 { 4944 int rc; 4945 size_t cdb_length; 4946 struct pqi_raid_path_request *request; 4947 4948 io_request->io_complete_callback = pqi_raid_io_complete; 4949 io_request->scmd = scmd; 4950 4951 request = io_request->iu; 4952 memset(request, 0, 4953 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4954 4955 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4956 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4957 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4958 put_unaligned_le16(io_request->index, &request->request_id); 4959 request->error_index = request->request_id; 4960 memcpy(request->lun_number, device->scsi3addr, 4961 sizeof(request->lun_number)); 4962 4963 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4964 memcpy(request->cdb, scmd->cmnd, cdb_length); 4965 4966 switch (cdb_length) { 4967 case 6: 4968 case 10: 4969 case 12: 4970 case 16: 4971 /* No bytes in the Additional CDB bytes field */ 4972 request->additional_cdb_bytes_usage = 4973 SOP_ADDITIONAL_CDB_BYTES_0; 4974 break; 4975 case 20: 4976 /* 4 bytes in the Additional cdb field */ 4977 request->additional_cdb_bytes_usage = 4978 SOP_ADDITIONAL_CDB_BYTES_4; 4979 break; 4980 case 24: 4981 /* 8 bytes in the Additional cdb field */ 4982 request->additional_cdb_bytes_usage = 4983 SOP_ADDITIONAL_CDB_BYTES_8; 4984 break; 4985 case 28: 4986 /* 12 bytes in the Additional cdb field */ 4987 request->additional_cdb_bytes_usage = 4988 SOP_ADDITIONAL_CDB_BYTES_12; 4989 break; 4990 case 32: 4991 default: 4992 /* 16 bytes in the Additional cdb field */ 4993 request->additional_cdb_bytes_usage = 4994 SOP_ADDITIONAL_CDB_BYTES_16; 4995 break; 4996 } 4997 4998 switch (scmd->sc_data_direction) { 4999 case DMA_TO_DEVICE: 5000 request->data_direction = SOP_READ_FLAG; 5001 break; 5002 case DMA_FROM_DEVICE: 5003 request->data_direction = SOP_WRITE_FLAG; 5004 break; 5005 case DMA_NONE: 5006 request->data_direction = SOP_NO_DIRECTION_FLAG; 5007 break; 5008 case DMA_BIDIRECTIONAL: 5009 request->data_direction = SOP_BIDIRECTIONAL; 5010 break; 5011 default: 5012 dev_err(&ctrl_info->pci_dev->dev, 5013 "unknown data direction: %d\n", 5014 scmd->sc_data_direction); 5015 break; 5016 } 5017 5018 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5019 if (rc) { 5020 pqi_free_io_request(io_request); 5021 return SCSI_MLQUEUE_HOST_BUSY; 5022 } 5023 5024 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5025 5026 return 0; 5027 } 5028 5029 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5030 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5031 struct pqi_queue_group *queue_group) 5032 { 5033 struct pqi_io_request *io_request; 5034 5035 io_request = pqi_alloc_io_request(ctrl_info); 5036 5037 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5038 device, scmd, queue_group); 5039 } 5040 5041 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 5042 { 5043 if (!pqi_ctrl_blocked(ctrl_info)) 5044 schedule_work(&ctrl_info->raid_bypass_retry_work); 5045 } 5046 5047 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5048 { 5049 struct scsi_cmnd *scmd; 5050 struct pqi_scsi_dev *device; 5051 struct pqi_ctrl_info *ctrl_info; 5052 5053 if (!io_request->raid_bypass) 5054 return false; 5055 5056 scmd = io_request->scmd; 5057 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5058 return false; 5059 if (host_byte(scmd->result) == DID_NO_CONNECT) 5060 return false; 5061 5062 device = scmd->device->hostdata; 5063 if (pqi_device_offline(device)) 5064 return false; 5065 5066 ctrl_info = shost_to_hba(scmd->device->host); 5067 if (pqi_ctrl_offline(ctrl_info)) 5068 return false; 5069 5070 return true; 5071 } 5072 5073 static inline void pqi_add_to_raid_bypass_retry_list( 5074 struct pqi_ctrl_info *ctrl_info, 5075 struct pqi_io_request *io_request, bool at_head) 5076 { 5077 unsigned long flags; 5078 5079 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5080 if (at_head) 5081 list_add(&io_request->request_list_entry, 5082 &ctrl_info->raid_bypass_retry_list); 5083 else 5084 list_add_tail(&io_request->request_list_entry, 5085 &ctrl_info->raid_bypass_retry_list); 5086 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5087 } 5088 5089 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 5090 void *context) 5091 { 5092 struct scsi_cmnd *scmd; 5093 5094 scmd = io_request->scmd; 5095 pqi_free_io_request(io_request); 5096 pqi_scsi_done(scmd); 5097 } 5098 5099 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 5100 { 5101 struct scsi_cmnd *scmd; 5102 struct pqi_ctrl_info *ctrl_info; 5103 5104 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 5105 scmd = io_request->scmd; 5106 scmd->result = 0; 5107 ctrl_info = shost_to_hba(scmd->device->host); 5108 5109 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 5110 pqi_schedule_bypass_retry(ctrl_info); 5111 } 5112 5113 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 5114 { 5115 struct scsi_cmnd *scmd; 5116 struct pqi_scsi_dev *device; 5117 struct pqi_ctrl_info *ctrl_info; 5118 struct pqi_queue_group *queue_group; 5119 5120 scmd = io_request->scmd; 5121 device = scmd->device->hostdata; 5122 if (pqi_device_in_reset(device)) { 5123 pqi_free_io_request(io_request); 5124 set_host_byte(scmd, DID_RESET); 5125 pqi_scsi_done(scmd); 5126 return 0; 5127 } 5128 5129 ctrl_info = shost_to_hba(scmd->device->host); 5130 queue_group = io_request->queue_group; 5131 5132 pqi_reinit_io_request(io_request); 5133 5134 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5135 device, scmd, queue_group); 5136 } 5137 5138 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 5139 struct pqi_ctrl_info *ctrl_info) 5140 { 5141 unsigned long flags; 5142 struct pqi_io_request *io_request; 5143 5144 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5145 io_request = list_first_entry_or_null( 5146 &ctrl_info->raid_bypass_retry_list, 5147 struct pqi_io_request, request_list_entry); 5148 if (io_request) 5149 list_del(&io_request->request_list_entry); 5150 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5151 5152 return io_request; 5153 } 5154 5155 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 5156 { 5157 int rc; 5158 struct pqi_io_request *io_request; 5159 5160 pqi_ctrl_busy(ctrl_info); 5161 5162 while (1) { 5163 if (pqi_ctrl_blocked(ctrl_info)) 5164 break; 5165 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 5166 if (!io_request) 5167 break; 5168 rc = pqi_retry_raid_bypass(io_request); 5169 if (rc) { 5170 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 5171 true); 5172 pqi_schedule_bypass_retry(ctrl_info); 5173 break; 5174 } 5175 } 5176 5177 pqi_ctrl_unbusy(ctrl_info); 5178 } 5179 5180 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 5181 { 5182 struct pqi_ctrl_info *ctrl_info; 5183 5184 ctrl_info = container_of(work, struct pqi_ctrl_info, 5185 raid_bypass_retry_work); 5186 pqi_retry_raid_bypass_requests(ctrl_info); 5187 } 5188 5189 static void pqi_clear_all_queued_raid_bypass_retries( 5190 struct pqi_ctrl_info *ctrl_info) 5191 { 5192 unsigned long flags; 5193 5194 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5195 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 5196 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5197 } 5198 5199 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5200 void *context) 5201 { 5202 struct scsi_cmnd *scmd; 5203 5204 scmd = io_request->scmd; 5205 scsi_dma_unmap(scmd); 5206 if (io_request->status == -EAGAIN) 5207 set_host_byte(scmd, DID_IMM_RETRY); 5208 else if (pqi_raid_bypass_retry_needed(io_request)) { 5209 pqi_queue_raid_bypass_retry(io_request); 5210 return; 5211 } 5212 pqi_free_io_request(io_request); 5213 pqi_scsi_done(scmd); 5214 } 5215 5216 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5217 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5218 struct pqi_queue_group *queue_group) 5219 { 5220 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5221 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 5222 } 5223 5224 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5225 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5226 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5227 struct pqi_encryption_info *encryption_info, bool raid_bypass) 5228 { 5229 int rc; 5230 struct pqi_io_request *io_request; 5231 struct pqi_aio_path_request *request; 5232 5233 io_request = pqi_alloc_io_request(ctrl_info); 5234 io_request->io_complete_callback = pqi_aio_io_complete; 5235 io_request->scmd = scmd; 5236 io_request->raid_bypass = raid_bypass; 5237 5238 request = io_request->iu; 5239 memset(request, 0, 5240 offsetof(struct pqi_raid_path_request, sg_descriptors)); 5241 5242 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5243 put_unaligned_le32(aio_handle, &request->nexus_id); 5244 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5245 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5246 put_unaligned_le16(io_request->index, &request->request_id); 5247 request->error_index = request->request_id; 5248 if (cdb_length > sizeof(request->cdb)) 5249 cdb_length = sizeof(request->cdb); 5250 request->cdb_length = cdb_length; 5251 memcpy(request->cdb, cdb, cdb_length); 5252 5253 switch (scmd->sc_data_direction) { 5254 case DMA_TO_DEVICE: 5255 request->data_direction = SOP_READ_FLAG; 5256 break; 5257 case DMA_FROM_DEVICE: 5258 request->data_direction = SOP_WRITE_FLAG; 5259 break; 5260 case DMA_NONE: 5261 request->data_direction = SOP_NO_DIRECTION_FLAG; 5262 break; 5263 case DMA_BIDIRECTIONAL: 5264 request->data_direction = SOP_BIDIRECTIONAL; 5265 break; 5266 default: 5267 dev_err(&ctrl_info->pci_dev->dev, 5268 "unknown data direction: %d\n", 5269 scmd->sc_data_direction); 5270 break; 5271 } 5272 5273 if (encryption_info) { 5274 request->encryption_enable = true; 5275 put_unaligned_le16(encryption_info->data_encryption_key_index, 5276 &request->data_encryption_key_index); 5277 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5278 &request->encrypt_tweak_lower); 5279 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5280 &request->encrypt_tweak_upper); 5281 } 5282 5283 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5284 if (rc) { 5285 pqi_free_io_request(io_request); 5286 return SCSI_MLQUEUE_HOST_BUSY; 5287 } 5288 5289 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5290 5291 return 0; 5292 } 5293 5294 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5295 struct scsi_cmnd *scmd) 5296 { 5297 u16 hw_queue; 5298 5299 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5300 if (hw_queue > ctrl_info->max_hw_queue_index) 5301 hw_queue = 0; 5302 5303 return hw_queue; 5304 } 5305 5306 /* 5307 * This function gets called just before we hand the completed SCSI request 5308 * back to the SML. 5309 */ 5310 5311 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5312 { 5313 struct pqi_scsi_dev *device; 5314 5315 if (!scmd->device) { 5316 set_host_byte(scmd, DID_NO_CONNECT); 5317 return; 5318 } 5319 5320 device = scmd->device->hostdata; 5321 if (!device) { 5322 set_host_byte(scmd, DID_NO_CONNECT); 5323 return; 5324 } 5325 5326 atomic_dec(&device->scsi_cmds_outstanding); 5327 } 5328 5329 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 5330 struct scsi_cmnd *scmd) 5331 { 5332 int rc; 5333 struct pqi_ctrl_info *ctrl_info; 5334 struct pqi_scsi_dev *device; 5335 u16 hw_queue; 5336 struct pqi_queue_group *queue_group; 5337 bool raid_bypassed; 5338 5339 device = scmd->device->hostdata; 5340 ctrl_info = shost_to_hba(shost); 5341 5342 if (!device) { 5343 set_host_byte(scmd, DID_NO_CONNECT); 5344 pqi_scsi_done(scmd); 5345 return 0; 5346 } 5347 5348 atomic_inc(&device->scsi_cmds_outstanding); 5349 5350 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, 5351 device)) { 5352 set_host_byte(scmd, DID_NO_CONNECT); 5353 pqi_scsi_done(scmd); 5354 return 0; 5355 } 5356 5357 pqi_ctrl_busy(ctrl_info); 5358 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || 5359 pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { 5360 rc = SCSI_MLQUEUE_HOST_BUSY; 5361 goto out; 5362 } 5363 5364 /* 5365 * This is necessary because the SML doesn't zero out this field during 5366 * error recovery. 5367 */ 5368 scmd->result = 0; 5369 5370 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 5371 queue_group = &ctrl_info->queue_groups[hw_queue]; 5372 5373 if (pqi_is_logical_device(device)) { 5374 raid_bypassed = false; 5375 if (device->raid_bypass_enabled && 5376 !blk_rq_is_passthrough(scmd->request)) { 5377 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5378 scmd, queue_group); 5379 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 5380 raid_bypassed = true; 5381 } 5382 if (!raid_bypassed) 5383 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5384 queue_group); 5385 } else { 5386 if (device->aio_enabled) 5387 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 5388 queue_group); 5389 else 5390 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5391 queue_group); 5392 } 5393 5394 out: 5395 pqi_ctrl_unbusy(ctrl_info); 5396 if (rc) 5397 atomic_dec(&device->scsi_cmds_outstanding); 5398 5399 return rc; 5400 } 5401 5402 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5403 struct pqi_queue_group *queue_group) 5404 { 5405 unsigned int path; 5406 unsigned long flags; 5407 bool list_is_empty; 5408 5409 for (path = 0; path < 2; path++) { 5410 while (1) { 5411 spin_lock_irqsave( 5412 &queue_group->submit_lock[path], flags); 5413 list_is_empty = 5414 list_empty(&queue_group->request_list[path]); 5415 spin_unlock_irqrestore( 5416 &queue_group->submit_lock[path], flags); 5417 if (list_is_empty) 5418 break; 5419 pqi_check_ctrl_health(ctrl_info); 5420 if (pqi_ctrl_offline(ctrl_info)) 5421 return -ENXIO; 5422 usleep_range(1000, 2000); 5423 } 5424 } 5425 5426 return 0; 5427 } 5428 5429 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5430 { 5431 int rc; 5432 unsigned int i; 5433 unsigned int path; 5434 struct pqi_queue_group *queue_group; 5435 pqi_index_t iq_pi; 5436 pqi_index_t iq_ci; 5437 5438 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5439 queue_group = &ctrl_info->queue_groups[i]; 5440 5441 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5442 if (rc) 5443 return rc; 5444 5445 for (path = 0; path < 2; path++) { 5446 iq_pi = queue_group->iq_pi_copy[path]; 5447 5448 while (1) { 5449 iq_ci = readl(queue_group->iq_ci[path]); 5450 if (iq_ci == iq_pi) 5451 break; 5452 pqi_check_ctrl_health(ctrl_info); 5453 if (pqi_ctrl_offline(ctrl_info)) 5454 return -ENXIO; 5455 usleep_range(1000, 2000); 5456 } 5457 } 5458 } 5459 5460 return 0; 5461 } 5462 5463 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5464 struct pqi_scsi_dev *device) 5465 { 5466 unsigned int i; 5467 unsigned int path; 5468 struct pqi_queue_group *queue_group; 5469 unsigned long flags; 5470 struct pqi_io_request *io_request; 5471 struct pqi_io_request *next; 5472 struct scsi_cmnd *scmd; 5473 struct pqi_scsi_dev *scsi_device; 5474 5475 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5476 queue_group = &ctrl_info->queue_groups[i]; 5477 5478 for (path = 0; path < 2; path++) { 5479 spin_lock_irqsave( 5480 &queue_group->submit_lock[path], flags); 5481 5482 list_for_each_entry_safe(io_request, next, 5483 &queue_group->request_list[path], 5484 request_list_entry) { 5485 scmd = io_request->scmd; 5486 if (!scmd) 5487 continue; 5488 5489 scsi_device = scmd->device->hostdata; 5490 if (scsi_device != device) 5491 continue; 5492 5493 list_del(&io_request->request_list_entry); 5494 set_host_byte(scmd, DID_RESET); 5495 pqi_scsi_done(scmd); 5496 } 5497 5498 spin_unlock_irqrestore( 5499 &queue_group->submit_lock[path], flags); 5500 } 5501 } 5502 } 5503 5504 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) 5505 { 5506 unsigned int i; 5507 unsigned int path; 5508 struct pqi_queue_group *queue_group; 5509 unsigned long flags; 5510 struct pqi_io_request *io_request; 5511 struct pqi_io_request *next; 5512 struct scsi_cmnd *scmd; 5513 5514 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5515 queue_group = &ctrl_info->queue_groups[i]; 5516 5517 for (path = 0; path < 2; path++) { 5518 spin_lock_irqsave(&queue_group->submit_lock[path], 5519 flags); 5520 5521 list_for_each_entry_safe(io_request, next, 5522 &queue_group->request_list[path], 5523 request_list_entry) { 5524 5525 scmd = io_request->scmd; 5526 if (!scmd) 5527 continue; 5528 5529 list_del(&io_request->request_list_entry); 5530 set_host_byte(scmd, DID_RESET); 5531 pqi_scsi_done(scmd); 5532 } 5533 5534 spin_unlock_irqrestore( 5535 &queue_group->submit_lock[path], flags); 5536 } 5537 } 5538 } 5539 5540 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5541 struct pqi_scsi_dev *device, unsigned long timeout_secs) 5542 { 5543 unsigned long timeout; 5544 5545 timeout = (timeout_secs * PQI_HZ) + jiffies; 5546 5547 while (atomic_read(&device->scsi_cmds_outstanding)) { 5548 pqi_check_ctrl_health(ctrl_info); 5549 if (pqi_ctrl_offline(ctrl_info)) 5550 return -ENXIO; 5551 if (timeout_secs != NO_TIMEOUT) { 5552 if (time_after(jiffies, timeout)) { 5553 dev_err(&ctrl_info->pci_dev->dev, 5554 "timed out waiting for pending IO\n"); 5555 return -ETIMEDOUT; 5556 } 5557 } 5558 usleep_range(1000, 2000); 5559 } 5560 5561 return 0; 5562 } 5563 5564 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5565 unsigned long timeout_secs) 5566 { 5567 bool io_pending; 5568 unsigned long flags; 5569 unsigned long timeout; 5570 struct pqi_scsi_dev *device; 5571 5572 timeout = (timeout_secs * PQI_HZ) + jiffies; 5573 while (1) { 5574 io_pending = false; 5575 5576 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5577 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5578 scsi_device_list_entry) { 5579 if (atomic_read(&device->scsi_cmds_outstanding)) { 5580 io_pending = true; 5581 break; 5582 } 5583 } 5584 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5585 flags); 5586 5587 if (!io_pending) 5588 break; 5589 5590 pqi_check_ctrl_health(ctrl_info); 5591 if (pqi_ctrl_offline(ctrl_info)) 5592 return -ENXIO; 5593 5594 if (timeout_secs != NO_TIMEOUT) { 5595 if (time_after(jiffies, timeout)) { 5596 dev_err(&ctrl_info->pci_dev->dev, 5597 "timed out waiting for pending IO\n"); 5598 return -ETIMEDOUT; 5599 } 5600 } 5601 usleep_range(1000, 2000); 5602 } 5603 5604 return 0; 5605 } 5606 5607 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) 5608 { 5609 while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { 5610 pqi_check_ctrl_health(ctrl_info); 5611 if (pqi_ctrl_offline(ctrl_info)) 5612 return -ENXIO; 5613 usleep_range(1000, 2000); 5614 } 5615 5616 return 0; 5617 } 5618 5619 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5620 void *context) 5621 { 5622 struct completion *waiting = context; 5623 5624 complete(waiting); 5625 } 5626 5627 #define PQI_LUN_RESET_TIMEOUT_SECS 30 5628 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 5629 5630 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5631 struct pqi_scsi_dev *device, struct completion *wait) 5632 { 5633 int rc; 5634 5635 while (1) { 5636 if (wait_for_completion_io_timeout(wait, 5637 PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { 5638 rc = 0; 5639 break; 5640 } 5641 5642 pqi_check_ctrl_health(ctrl_info); 5643 if (pqi_ctrl_offline(ctrl_info)) { 5644 rc = -ENXIO; 5645 break; 5646 } 5647 } 5648 5649 return rc; 5650 } 5651 5652 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5653 struct pqi_scsi_dev *device) 5654 { 5655 int rc; 5656 struct pqi_io_request *io_request; 5657 DECLARE_COMPLETION_ONSTACK(wait); 5658 struct pqi_task_management_request *request; 5659 5660 io_request = pqi_alloc_io_request(ctrl_info); 5661 io_request->io_complete_callback = pqi_lun_reset_complete; 5662 io_request->context = &wait; 5663 5664 request = io_request->iu; 5665 memset(request, 0, sizeof(*request)); 5666 5667 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5668 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5669 &request->header.iu_length); 5670 put_unaligned_le16(io_request->index, &request->request_id); 5671 memcpy(request->lun_number, device->scsi3addr, 5672 sizeof(request->lun_number)); 5673 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5674 if (ctrl_info->tmf_iu_timeout_supported) 5675 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS, 5676 &request->timeout); 5677 5678 pqi_start_io(ctrl_info, 5679 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5680 io_request); 5681 5682 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5683 if (rc == 0) 5684 rc = io_request->status; 5685 5686 pqi_free_io_request(io_request); 5687 5688 return rc; 5689 } 5690 5691 /* Performs a reset at the LUN level. */ 5692 5693 #define PQI_LUN_RESET_RETRIES 3 5694 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5695 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120 5696 5697 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5698 struct pqi_scsi_dev *device) 5699 { 5700 int rc; 5701 unsigned int retries; 5702 unsigned long timeout_secs; 5703 5704 for (retries = 0;;) { 5705 rc = pqi_lun_reset(ctrl_info, device); 5706 if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) 5707 break; 5708 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5709 } 5710 5711 timeout_secs = rc ? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS : NO_TIMEOUT; 5712 5713 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); 5714 5715 return rc == 0 ? SUCCESS : FAILED; 5716 } 5717 5718 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5719 struct pqi_scsi_dev *device) 5720 { 5721 int rc; 5722 5723 mutex_lock(&ctrl_info->lun_reset_mutex); 5724 5725 pqi_ctrl_block_requests(ctrl_info); 5726 pqi_ctrl_wait_until_quiesced(ctrl_info); 5727 pqi_fail_io_queued_for_device(ctrl_info, device); 5728 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5729 pqi_device_reset_start(device); 5730 pqi_ctrl_unblock_requests(ctrl_info); 5731 5732 if (rc) 5733 rc = FAILED; 5734 else 5735 rc = _pqi_device_reset(ctrl_info, device); 5736 5737 pqi_device_reset_done(device); 5738 5739 mutex_unlock(&ctrl_info->lun_reset_mutex); 5740 5741 return rc; 5742 } 5743 5744 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5745 { 5746 int rc; 5747 struct Scsi_Host *shost; 5748 struct pqi_ctrl_info *ctrl_info; 5749 struct pqi_scsi_dev *device; 5750 5751 shost = scmd->device->host; 5752 ctrl_info = shost_to_hba(shost); 5753 device = scmd->device->hostdata; 5754 5755 dev_err(&ctrl_info->pci_dev->dev, 5756 "resetting scsi %d:%d:%d:%d\n", 5757 shost->host_no, device->bus, device->target, device->lun); 5758 5759 pqi_check_ctrl_health(ctrl_info); 5760 if (pqi_ctrl_offline(ctrl_info) || 5761 pqi_device_reset_blocked(ctrl_info)) { 5762 rc = FAILED; 5763 goto out; 5764 } 5765 5766 pqi_wait_until_ofa_finished(ctrl_info); 5767 5768 atomic_inc(&ctrl_info->sync_cmds_outstanding); 5769 rc = pqi_device_reset(ctrl_info, device); 5770 atomic_dec(&ctrl_info->sync_cmds_outstanding); 5771 5772 out: 5773 dev_err(&ctrl_info->pci_dev->dev, 5774 "reset of scsi %d:%d:%d:%d: %s\n", 5775 shost->host_no, device->bus, device->target, device->lun, 5776 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5777 5778 return rc; 5779 } 5780 5781 static int pqi_slave_alloc(struct scsi_device *sdev) 5782 { 5783 struct pqi_scsi_dev *device; 5784 unsigned long flags; 5785 struct pqi_ctrl_info *ctrl_info; 5786 struct scsi_target *starget; 5787 struct sas_rphy *rphy; 5788 5789 ctrl_info = shost_to_hba(sdev->host); 5790 5791 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5792 5793 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5794 starget = scsi_target(sdev); 5795 rphy = target_to_rphy(starget); 5796 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5797 if (device) { 5798 device->target = sdev_id(sdev); 5799 device->lun = sdev->lun; 5800 device->target_lun_valid = true; 5801 } 5802 } else { 5803 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5804 sdev_id(sdev), sdev->lun); 5805 } 5806 5807 if (device) { 5808 sdev->hostdata = device; 5809 device->sdev = sdev; 5810 if (device->queue_depth) { 5811 device->advertised_queue_depth = device->queue_depth; 5812 scsi_change_queue_depth(sdev, 5813 device->advertised_queue_depth); 5814 } 5815 if (pqi_is_logical_device(device)) 5816 pqi_disable_write_same(sdev); 5817 else 5818 sdev->allow_restart = 1; 5819 } 5820 5821 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5822 5823 return 0; 5824 } 5825 5826 static int pqi_map_queues(struct Scsi_Host *shost) 5827 { 5828 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5829 5830 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 5831 ctrl_info->pci_dev, 0); 5832 } 5833 5834 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5835 void __user *arg) 5836 { 5837 struct pci_dev *pci_dev; 5838 u32 subsystem_vendor; 5839 u32 subsystem_device; 5840 cciss_pci_info_struct pciinfo; 5841 5842 if (!arg) 5843 return -EINVAL; 5844 5845 pci_dev = ctrl_info->pci_dev; 5846 5847 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5848 pciinfo.bus = pci_dev->bus->number; 5849 pciinfo.dev_fn = pci_dev->devfn; 5850 subsystem_vendor = pci_dev->subsystem_vendor; 5851 subsystem_device = pci_dev->subsystem_device; 5852 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5853 subsystem_vendor; 5854 5855 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5856 return -EFAULT; 5857 5858 return 0; 5859 } 5860 5861 static int pqi_getdrivver_ioctl(void __user *arg) 5862 { 5863 u32 version; 5864 5865 if (!arg) 5866 return -EINVAL; 5867 5868 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5869 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5870 5871 if (copy_to_user(arg, &version, sizeof(version))) 5872 return -EFAULT; 5873 5874 return 0; 5875 } 5876 5877 struct ciss_error_info { 5878 u8 scsi_status; 5879 int command_status; 5880 size_t sense_data_length; 5881 }; 5882 5883 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5884 struct ciss_error_info *ciss_error_info) 5885 { 5886 int ciss_cmd_status; 5887 size_t sense_data_length; 5888 5889 switch (pqi_error_info->data_out_result) { 5890 case PQI_DATA_IN_OUT_GOOD: 5891 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5892 break; 5893 case PQI_DATA_IN_OUT_UNDERFLOW: 5894 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5895 break; 5896 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5897 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5898 break; 5899 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5900 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5901 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5902 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5903 case PQI_DATA_IN_OUT_ERROR: 5904 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5905 break; 5906 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5907 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5908 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5909 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5910 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5911 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5912 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5913 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5914 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5915 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5916 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5917 break; 5918 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5919 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5920 break; 5921 case PQI_DATA_IN_OUT_ABORTED: 5922 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5923 break; 5924 case PQI_DATA_IN_OUT_TIMEOUT: 5925 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5926 break; 5927 default: 5928 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5929 break; 5930 } 5931 5932 sense_data_length = 5933 get_unaligned_le16(&pqi_error_info->sense_data_length); 5934 if (sense_data_length == 0) 5935 sense_data_length = 5936 get_unaligned_le16(&pqi_error_info->response_data_length); 5937 if (sense_data_length) 5938 if (sense_data_length > sizeof(pqi_error_info->data)) 5939 sense_data_length = sizeof(pqi_error_info->data); 5940 5941 ciss_error_info->scsi_status = pqi_error_info->status; 5942 ciss_error_info->command_status = ciss_cmd_status; 5943 ciss_error_info->sense_data_length = sense_data_length; 5944 } 5945 5946 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5947 { 5948 int rc; 5949 char *kernel_buffer = NULL; 5950 u16 iu_length; 5951 size_t sense_data_length; 5952 IOCTL_Command_struct iocommand; 5953 struct pqi_raid_path_request request; 5954 struct pqi_raid_error_info pqi_error_info; 5955 struct ciss_error_info ciss_error_info; 5956 5957 if (pqi_ctrl_offline(ctrl_info)) 5958 return -ENXIO; 5959 if (!arg) 5960 return -EINVAL; 5961 if (!capable(CAP_SYS_RAWIO)) 5962 return -EPERM; 5963 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5964 return -EFAULT; 5965 if (iocommand.buf_size < 1 && 5966 iocommand.Request.Type.Direction != XFER_NONE) 5967 return -EINVAL; 5968 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5969 return -EINVAL; 5970 if (iocommand.Request.Type.Type != TYPE_CMD) 5971 return -EINVAL; 5972 5973 switch (iocommand.Request.Type.Direction) { 5974 case XFER_NONE: 5975 case XFER_WRITE: 5976 case XFER_READ: 5977 case XFER_READ | XFER_WRITE: 5978 break; 5979 default: 5980 return -EINVAL; 5981 } 5982 5983 if (iocommand.buf_size > 0) { 5984 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5985 if (!kernel_buffer) 5986 return -ENOMEM; 5987 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5988 if (copy_from_user(kernel_buffer, iocommand.buf, 5989 iocommand.buf_size)) { 5990 rc = -EFAULT; 5991 goto out; 5992 } 5993 } else { 5994 memset(kernel_buffer, 0, iocommand.buf_size); 5995 } 5996 } 5997 5998 memset(&request, 0, sizeof(request)); 5999 6000 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6001 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6002 PQI_REQUEST_HEADER_LENGTH; 6003 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6004 sizeof(request.lun_number)); 6005 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6006 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6007 6008 switch (iocommand.Request.Type.Direction) { 6009 case XFER_NONE: 6010 request.data_direction = SOP_NO_DIRECTION_FLAG; 6011 break; 6012 case XFER_WRITE: 6013 request.data_direction = SOP_WRITE_FLAG; 6014 break; 6015 case XFER_READ: 6016 request.data_direction = SOP_READ_FLAG; 6017 break; 6018 case XFER_READ | XFER_WRITE: 6019 request.data_direction = SOP_BIDIRECTIONAL; 6020 break; 6021 } 6022 6023 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6024 6025 if (iocommand.buf_size > 0) { 6026 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6027 6028 rc = pqi_map_single(ctrl_info->pci_dev, 6029 &request.sg_descriptors[0], kernel_buffer, 6030 iocommand.buf_size, DMA_BIDIRECTIONAL); 6031 if (rc) 6032 goto out; 6033 6034 iu_length += sizeof(request.sg_descriptors[0]); 6035 } 6036 6037 put_unaligned_le16(iu_length, &request.header.iu_length); 6038 6039 if (ctrl_info->raid_iu_timeout_supported) 6040 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6041 6042 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6043 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 6044 6045 if (iocommand.buf_size > 0) 6046 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6047 DMA_BIDIRECTIONAL); 6048 6049 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6050 6051 if (rc == 0) { 6052 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6053 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6054 iocommand.error_info.CommandStatus = 6055 ciss_error_info.command_status; 6056 sense_data_length = ciss_error_info.sense_data_length; 6057 if (sense_data_length) { 6058 if (sense_data_length > 6059 sizeof(iocommand.error_info.SenseInfo)) 6060 sense_data_length = 6061 sizeof(iocommand.error_info.SenseInfo); 6062 memcpy(iocommand.error_info.SenseInfo, 6063 pqi_error_info.data, sense_data_length); 6064 iocommand.error_info.SenseLen = sense_data_length; 6065 } 6066 } 6067 6068 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6069 rc = -EFAULT; 6070 goto out; 6071 } 6072 6073 if (rc == 0 && iocommand.buf_size > 0 && 6074 (iocommand.Request.Type.Direction & XFER_READ)) { 6075 if (copy_to_user(iocommand.buf, kernel_buffer, 6076 iocommand.buf_size)) { 6077 rc = -EFAULT; 6078 } 6079 } 6080 6081 out: 6082 kfree(kernel_buffer); 6083 6084 return rc; 6085 } 6086 6087 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6088 void __user *arg) 6089 { 6090 int rc; 6091 struct pqi_ctrl_info *ctrl_info; 6092 6093 ctrl_info = shost_to_hba(sdev->host); 6094 6095 if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) 6096 return -EBUSY; 6097 6098 switch (cmd) { 6099 case CCISS_DEREGDISK: 6100 case CCISS_REGNEWDISK: 6101 case CCISS_REGNEWD: 6102 rc = pqi_scan_scsi_devices(ctrl_info); 6103 break; 6104 case CCISS_GETPCIINFO: 6105 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6106 break; 6107 case CCISS_GETDRIVVER: 6108 rc = pqi_getdrivver_ioctl(arg); 6109 break; 6110 case CCISS_PASSTHRU: 6111 rc = pqi_passthru_ioctl(ctrl_info, arg); 6112 break; 6113 default: 6114 rc = -EINVAL; 6115 break; 6116 } 6117 6118 return rc; 6119 } 6120 6121 static ssize_t pqi_firmware_version_show(struct device *dev, 6122 struct device_attribute *attr, char *buffer) 6123 { 6124 struct Scsi_Host *shost; 6125 struct pqi_ctrl_info *ctrl_info; 6126 6127 shost = class_to_shost(dev); 6128 ctrl_info = shost_to_hba(shost); 6129 6130 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6131 } 6132 6133 static ssize_t pqi_driver_version_show(struct device *dev, 6134 struct device_attribute *attr, char *buffer) 6135 { 6136 return snprintf(buffer, PAGE_SIZE, "%s\n", 6137 DRIVER_VERSION BUILD_TIMESTAMP); 6138 } 6139 6140 static ssize_t pqi_serial_number_show(struct device *dev, 6141 struct device_attribute *attr, char *buffer) 6142 { 6143 struct Scsi_Host *shost; 6144 struct pqi_ctrl_info *ctrl_info; 6145 6146 shost = class_to_shost(dev); 6147 ctrl_info = shost_to_hba(shost); 6148 6149 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6150 } 6151 6152 static ssize_t pqi_model_show(struct device *dev, 6153 struct device_attribute *attr, char *buffer) 6154 { 6155 struct Scsi_Host *shost; 6156 struct pqi_ctrl_info *ctrl_info; 6157 6158 shost = class_to_shost(dev); 6159 ctrl_info = shost_to_hba(shost); 6160 6161 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6162 } 6163 6164 static ssize_t pqi_vendor_show(struct device *dev, 6165 struct device_attribute *attr, char *buffer) 6166 { 6167 struct Scsi_Host *shost; 6168 struct pqi_ctrl_info *ctrl_info; 6169 6170 shost = class_to_shost(dev); 6171 ctrl_info = shost_to_hba(shost); 6172 6173 return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6174 } 6175 6176 static ssize_t pqi_host_rescan_store(struct device *dev, 6177 struct device_attribute *attr, const char *buffer, size_t count) 6178 { 6179 struct Scsi_Host *shost = class_to_shost(dev); 6180 6181 pqi_scan_start(shost); 6182 6183 return count; 6184 } 6185 6186 static ssize_t pqi_lockup_action_show(struct device *dev, 6187 struct device_attribute *attr, char *buffer) 6188 { 6189 int count = 0; 6190 unsigned int i; 6191 6192 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6193 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6194 count += snprintf(buffer + count, PAGE_SIZE - count, 6195 "[%s] ", pqi_lockup_actions[i].name); 6196 else 6197 count += snprintf(buffer + count, PAGE_SIZE - count, 6198 "%s ", pqi_lockup_actions[i].name); 6199 } 6200 6201 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 6202 6203 return count; 6204 } 6205 6206 static ssize_t pqi_lockup_action_store(struct device *dev, 6207 struct device_attribute *attr, const char *buffer, size_t count) 6208 { 6209 unsigned int i; 6210 char *action_name; 6211 char action_name_buffer[32]; 6212 6213 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6214 action_name = strstrip(action_name_buffer); 6215 6216 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6217 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6218 pqi_lockup_action = pqi_lockup_actions[i].action; 6219 return count; 6220 } 6221 } 6222 6223 return -EINVAL; 6224 } 6225 6226 static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); 6227 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 6228 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 6229 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 6230 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 6231 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6232 static DEVICE_ATTR(lockup_action, 0644, 6233 pqi_lockup_action_show, pqi_lockup_action_store); 6234 6235 static struct device_attribute *pqi_shost_attrs[] = { 6236 &dev_attr_driver_version, 6237 &dev_attr_firmware_version, 6238 &dev_attr_model, 6239 &dev_attr_serial_number, 6240 &dev_attr_vendor, 6241 &dev_attr_rescan, 6242 &dev_attr_lockup_action, 6243 NULL 6244 }; 6245 6246 static ssize_t pqi_unique_id_show(struct device *dev, 6247 struct device_attribute *attr, char *buffer) 6248 { 6249 struct pqi_ctrl_info *ctrl_info; 6250 struct scsi_device *sdev; 6251 struct pqi_scsi_dev *device; 6252 unsigned long flags; 6253 u8 unique_id[16]; 6254 6255 sdev = to_scsi_device(dev); 6256 ctrl_info = shost_to_hba(sdev->host); 6257 6258 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6259 6260 device = sdev->hostdata; 6261 if (!device) { 6262 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6263 flags); 6264 return -ENODEV; 6265 } 6266 6267 if (device->is_physical_device) { 6268 memset(unique_id, 0, 8); 6269 memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); 6270 } else { 6271 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 6272 } 6273 6274 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6275 6276 return snprintf(buffer, PAGE_SIZE, 6277 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", 6278 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 6279 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 6280 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 6281 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 6282 } 6283 6284 static ssize_t pqi_lunid_show(struct device *dev, 6285 struct device_attribute *attr, char *buffer) 6286 { 6287 struct pqi_ctrl_info *ctrl_info; 6288 struct scsi_device *sdev; 6289 struct pqi_scsi_dev *device; 6290 unsigned long flags; 6291 u8 lunid[8]; 6292 6293 sdev = to_scsi_device(dev); 6294 ctrl_info = shost_to_hba(sdev->host); 6295 6296 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6297 6298 device = sdev->hostdata; 6299 if (!device) { 6300 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6301 flags); 6302 return -ENODEV; 6303 } 6304 6305 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 6306 6307 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6308 6309 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 6310 } 6311 6312 #define MAX_PATHS 8 6313 6314 static ssize_t pqi_path_info_show(struct device *dev, 6315 struct device_attribute *attr, char *buf) 6316 { 6317 struct pqi_ctrl_info *ctrl_info; 6318 struct scsi_device *sdev; 6319 struct pqi_scsi_dev *device; 6320 unsigned long flags; 6321 int i; 6322 int output_len = 0; 6323 u8 box; 6324 u8 bay; 6325 u8 path_map_index; 6326 char *active; 6327 u8 phys_connector[2]; 6328 6329 sdev = to_scsi_device(dev); 6330 ctrl_info = shost_to_hba(sdev->host); 6331 6332 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6333 6334 device = sdev->hostdata; 6335 if (!device) { 6336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6337 flags); 6338 return -ENODEV; 6339 } 6340 6341 bay = device->bay; 6342 for (i = 0; i < MAX_PATHS; i++) { 6343 path_map_index = 1 << i; 6344 if (i == device->active_path_index) 6345 active = "Active"; 6346 else if (device->path_map & path_map_index) 6347 active = "Inactive"; 6348 else 6349 continue; 6350 6351 output_len += scnprintf(buf + output_len, 6352 PAGE_SIZE - output_len, 6353 "[%d:%d:%d:%d] %20.20s ", 6354 ctrl_info->scsi_host->host_no, 6355 device->bus, device->target, 6356 device->lun, 6357 scsi_device_type(device->devtype)); 6358 6359 if (device->devtype == TYPE_RAID || 6360 pqi_is_logical_device(device)) 6361 goto end_buffer; 6362 6363 memcpy(&phys_connector, &device->phys_connector[i], 6364 sizeof(phys_connector)); 6365 if (phys_connector[0] < '0') 6366 phys_connector[0] = '0'; 6367 if (phys_connector[1] < '0') 6368 phys_connector[1] = '0'; 6369 6370 output_len += scnprintf(buf + output_len, 6371 PAGE_SIZE - output_len, 6372 "PORT: %.2s ", phys_connector); 6373 6374 box = device->box[i]; 6375 if (box != 0 && box != 0xFF) 6376 output_len += scnprintf(buf + output_len, 6377 PAGE_SIZE - output_len, 6378 "BOX: %hhu ", box); 6379 6380 if ((device->devtype == TYPE_DISK || 6381 device->devtype == TYPE_ZBC) && 6382 pqi_expose_device(device)) 6383 output_len += scnprintf(buf + output_len, 6384 PAGE_SIZE - output_len, 6385 "BAY: %hhu ", bay); 6386 6387 end_buffer: 6388 output_len += scnprintf(buf + output_len, 6389 PAGE_SIZE - output_len, 6390 "%s\n", active); 6391 } 6392 6393 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6394 6395 return output_len; 6396 } 6397 6398 static ssize_t pqi_sas_address_show(struct device *dev, 6399 struct device_attribute *attr, char *buffer) 6400 { 6401 struct pqi_ctrl_info *ctrl_info; 6402 struct scsi_device *sdev; 6403 struct pqi_scsi_dev *device; 6404 unsigned long flags; 6405 u64 sas_address; 6406 6407 sdev = to_scsi_device(dev); 6408 ctrl_info = shost_to_hba(sdev->host); 6409 6410 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6411 6412 device = sdev->hostdata; 6413 if (pqi_is_logical_device(device)) { 6414 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6415 flags); 6416 return -ENODEV; 6417 } 6418 6419 sas_address = device->sas_address; 6420 6421 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6422 6423 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 6424 } 6425 6426 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 6427 struct device_attribute *attr, char *buffer) 6428 { 6429 struct pqi_ctrl_info *ctrl_info; 6430 struct scsi_device *sdev; 6431 struct pqi_scsi_dev *device; 6432 unsigned long flags; 6433 6434 sdev = to_scsi_device(dev); 6435 ctrl_info = shost_to_hba(sdev->host); 6436 6437 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6438 6439 device = sdev->hostdata; 6440 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 6441 buffer[1] = '\n'; 6442 buffer[2] = '\0'; 6443 6444 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6445 6446 return 2; 6447 } 6448 6449 static ssize_t pqi_raid_level_show(struct device *dev, 6450 struct device_attribute *attr, char *buffer) 6451 { 6452 struct pqi_ctrl_info *ctrl_info; 6453 struct scsi_device *sdev; 6454 struct pqi_scsi_dev *device; 6455 unsigned long flags; 6456 char *raid_level; 6457 6458 sdev = to_scsi_device(dev); 6459 ctrl_info = shost_to_hba(sdev->host); 6460 6461 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6462 6463 device = sdev->hostdata; 6464 6465 if (pqi_is_logical_device(device)) 6466 raid_level = pqi_raid_level_to_string(device->raid_level); 6467 else 6468 raid_level = "N/A"; 6469 6470 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6471 6472 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 6473 } 6474 6475 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 6476 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 6477 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 6478 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 6479 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 6480 pqi_ssd_smart_path_enabled_show, NULL); 6481 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 6482 6483 static struct device_attribute *pqi_sdev_attrs[] = { 6484 &dev_attr_lunid, 6485 &dev_attr_unique_id, 6486 &dev_attr_path_info, 6487 &dev_attr_sas_address, 6488 &dev_attr_ssd_smart_path_enabled, 6489 &dev_attr_raid_level, 6490 NULL 6491 }; 6492 6493 static struct scsi_host_template pqi_driver_template = { 6494 .module = THIS_MODULE, 6495 .name = DRIVER_NAME_SHORT, 6496 .proc_name = DRIVER_NAME_SHORT, 6497 .queuecommand = pqi_scsi_queue_command, 6498 .scan_start = pqi_scan_start, 6499 .scan_finished = pqi_scan_finished, 6500 .this_id = -1, 6501 .eh_device_reset_handler = pqi_eh_device_reset_handler, 6502 .ioctl = pqi_ioctl, 6503 .slave_alloc = pqi_slave_alloc, 6504 .map_queues = pqi_map_queues, 6505 .sdev_attrs = pqi_sdev_attrs, 6506 .shost_attrs = pqi_shost_attrs, 6507 }; 6508 6509 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 6510 { 6511 int rc; 6512 struct Scsi_Host *shost; 6513 6514 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 6515 if (!shost) { 6516 dev_err(&ctrl_info->pci_dev->dev, 6517 "scsi_host_alloc failed for controller %u\n", 6518 ctrl_info->ctrl_id); 6519 return -ENOMEM; 6520 } 6521 6522 shost->io_port = 0; 6523 shost->n_io_port = 0; 6524 shost->this_id = -1; 6525 shost->max_channel = PQI_MAX_BUS; 6526 shost->max_cmd_len = MAX_COMMAND_SIZE; 6527 shost->max_lun = ~0; 6528 shost->max_id = ~0; 6529 shost->max_sectors = ctrl_info->max_sectors; 6530 shost->can_queue = ctrl_info->scsi_ml_can_queue; 6531 shost->cmd_per_lun = shost->can_queue; 6532 shost->sg_tablesize = ctrl_info->sg_tablesize; 6533 shost->transportt = pqi_sas_transport_template; 6534 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 6535 shost->unique_id = shost->irq; 6536 shost->nr_hw_queues = ctrl_info->num_queue_groups; 6537 shost->hostdata[0] = (unsigned long)ctrl_info; 6538 6539 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 6540 if (rc) { 6541 dev_err(&ctrl_info->pci_dev->dev, 6542 "scsi_add_host failed for controller %u\n", 6543 ctrl_info->ctrl_id); 6544 goto free_host; 6545 } 6546 6547 rc = pqi_add_sas_host(shost, ctrl_info); 6548 if (rc) { 6549 dev_err(&ctrl_info->pci_dev->dev, 6550 "add SAS host failed for controller %u\n", 6551 ctrl_info->ctrl_id); 6552 goto remove_host; 6553 } 6554 6555 ctrl_info->scsi_host = shost; 6556 6557 return 0; 6558 6559 remove_host: 6560 scsi_remove_host(shost); 6561 free_host: 6562 scsi_host_put(shost); 6563 6564 return rc; 6565 } 6566 6567 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 6568 { 6569 struct Scsi_Host *shost; 6570 6571 pqi_delete_sas_host(ctrl_info); 6572 6573 shost = ctrl_info->scsi_host; 6574 if (!shost) 6575 return; 6576 6577 scsi_remove_host(shost); 6578 scsi_host_put(shost); 6579 } 6580 6581 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 6582 { 6583 int rc = 0; 6584 struct pqi_device_registers __iomem *pqi_registers; 6585 unsigned long timeout; 6586 unsigned int timeout_msecs; 6587 union pqi_reset_register reset_reg; 6588 6589 pqi_registers = ctrl_info->pqi_registers; 6590 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 6591 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 6592 6593 while (1) { 6594 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 6595 reset_reg.all_bits = readl(&pqi_registers->device_reset); 6596 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 6597 break; 6598 pqi_check_ctrl_health(ctrl_info); 6599 if (pqi_ctrl_offline(ctrl_info)) { 6600 rc = -ENXIO; 6601 break; 6602 } 6603 if (time_after(jiffies, timeout)) { 6604 rc = -ETIMEDOUT; 6605 break; 6606 } 6607 } 6608 6609 return rc; 6610 } 6611 6612 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 6613 { 6614 int rc; 6615 union pqi_reset_register reset_reg; 6616 6617 if (ctrl_info->pqi_reset_quiesce_supported) { 6618 rc = sis_pqi_reset_quiesce(ctrl_info); 6619 if (rc) { 6620 dev_err(&ctrl_info->pci_dev->dev, 6621 "PQI reset failed during quiesce with error %d\n", 6622 rc); 6623 return rc; 6624 } 6625 } 6626 6627 reset_reg.all_bits = 0; 6628 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 6629 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 6630 6631 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 6632 6633 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 6634 if (rc) 6635 dev_err(&ctrl_info->pci_dev->dev, 6636 "PQI reset failed with error %d\n", rc); 6637 6638 return rc; 6639 } 6640 6641 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 6642 { 6643 int rc; 6644 struct bmic_sense_subsystem_info *sense_info; 6645 6646 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 6647 if (!sense_info) 6648 return -ENOMEM; 6649 6650 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 6651 if (rc) 6652 goto out; 6653 6654 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 6655 sizeof(sense_info->ctrl_serial_number)); 6656 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 6657 6658 out: 6659 kfree(sense_info); 6660 6661 return rc; 6662 } 6663 6664 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 6665 { 6666 int rc; 6667 struct bmic_identify_controller *identify; 6668 6669 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 6670 if (!identify) 6671 return -ENOMEM; 6672 6673 rc = pqi_identify_controller(ctrl_info, identify); 6674 if (rc) 6675 goto out; 6676 6677 memcpy(ctrl_info->firmware_version, identify->firmware_version, 6678 sizeof(identify->firmware_version)); 6679 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 6680 snprintf(ctrl_info->firmware_version + 6681 strlen(ctrl_info->firmware_version), 6682 sizeof(ctrl_info->firmware_version), 6683 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 6684 6685 memcpy(ctrl_info->model, identify->product_id, 6686 sizeof(identify->product_id)); 6687 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 6688 6689 memcpy(ctrl_info->vendor, identify->vendor_id, 6690 sizeof(identify->vendor_id)); 6691 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 6692 6693 out: 6694 kfree(identify); 6695 6696 return rc; 6697 } 6698 6699 struct pqi_config_table_section_info { 6700 struct pqi_ctrl_info *ctrl_info; 6701 void *section; 6702 u32 section_offset; 6703 void __iomem *section_iomem_addr; 6704 }; 6705 6706 static inline bool pqi_is_firmware_feature_supported( 6707 struct pqi_config_table_firmware_features *firmware_features, 6708 unsigned int bit_position) 6709 { 6710 unsigned int byte_index; 6711 6712 byte_index = bit_position / BITS_PER_BYTE; 6713 6714 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 6715 return false; 6716 6717 return firmware_features->features_supported[byte_index] & 6718 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6719 } 6720 6721 static inline bool pqi_is_firmware_feature_enabled( 6722 struct pqi_config_table_firmware_features *firmware_features, 6723 void __iomem *firmware_features_iomem_addr, 6724 unsigned int bit_position) 6725 { 6726 unsigned int byte_index; 6727 u8 __iomem *features_enabled_iomem_addr; 6728 6729 byte_index = (bit_position / BITS_PER_BYTE) + 6730 (le16_to_cpu(firmware_features->num_elements) * 2); 6731 6732 features_enabled_iomem_addr = firmware_features_iomem_addr + 6733 offsetof(struct pqi_config_table_firmware_features, 6734 features_supported) + byte_index; 6735 6736 return *((__force u8 *)features_enabled_iomem_addr) & 6737 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6738 } 6739 6740 static inline void pqi_request_firmware_feature( 6741 struct pqi_config_table_firmware_features *firmware_features, 6742 unsigned int bit_position) 6743 { 6744 unsigned int byte_index; 6745 6746 byte_index = (bit_position / BITS_PER_BYTE) + 6747 le16_to_cpu(firmware_features->num_elements); 6748 6749 firmware_features->features_supported[byte_index] |= 6750 (1 << (bit_position % BITS_PER_BYTE)); 6751 } 6752 6753 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 6754 u16 first_section, u16 last_section) 6755 { 6756 struct pqi_vendor_general_request request; 6757 6758 memset(&request, 0, sizeof(request)); 6759 6760 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 6761 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 6762 &request.header.iu_length); 6763 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 6764 &request.function_code); 6765 put_unaligned_le16(first_section, 6766 &request.data.config_table_update.first_section); 6767 put_unaligned_le16(last_section, 6768 &request.data.config_table_update.last_section); 6769 6770 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6771 0, NULL, NO_TIMEOUT); 6772 } 6773 6774 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 6775 struct pqi_config_table_firmware_features *firmware_features, 6776 void __iomem *firmware_features_iomem_addr) 6777 { 6778 void *features_requested; 6779 void __iomem *features_requested_iomem_addr; 6780 6781 features_requested = firmware_features->features_supported + 6782 le16_to_cpu(firmware_features->num_elements); 6783 6784 features_requested_iomem_addr = firmware_features_iomem_addr + 6785 (features_requested - (void *)firmware_features); 6786 6787 memcpy_toio(features_requested_iomem_addr, features_requested, 6788 le16_to_cpu(firmware_features->num_elements)); 6789 6790 return pqi_config_table_update(ctrl_info, 6791 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 6792 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 6793 } 6794 6795 struct pqi_firmware_feature { 6796 char *feature_name; 6797 unsigned int feature_bit; 6798 bool supported; 6799 bool enabled; 6800 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 6801 struct pqi_firmware_feature *firmware_feature); 6802 }; 6803 6804 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 6805 struct pqi_firmware_feature *firmware_feature) 6806 { 6807 if (!firmware_feature->supported) { 6808 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 6809 firmware_feature->feature_name); 6810 return; 6811 } 6812 6813 if (firmware_feature->enabled) { 6814 dev_info(&ctrl_info->pci_dev->dev, 6815 "%s enabled\n", firmware_feature->feature_name); 6816 return; 6817 } 6818 6819 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 6820 firmware_feature->feature_name); 6821 } 6822 6823 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 6824 struct pqi_firmware_feature *firmware_feature) 6825 { 6826 switch (firmware_feature->feature_bit) { 6827 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 6828 ctrl_info->soft_reset_handshake_supported = 6829 firmware_feature->enabled; 6830 break; 6831 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 6832 ctrl_info->raid_iu_timeout_supported = 6833 firmware_feature->enabled; 6834 break; 6835 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 6836 ctrl_info->tmf_iu_timeout_supported = 6837 firmware_feature->enabled; 6838 break; 6839 } 6840 6841 pqi_firmware_feature_status(ctrl_info, firmware_feature); 6842 } 6843 6844 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 6845 struct pqi_firmware_feature *firmware_feature) 6846 { 6847 if (firmware_feature->feature_status) 6848 firmware_feature->feature_status(ctrl_info, firmware_feature); 6849 } 6850 6851 static DEFINE_MUTEX(pqi_firmware_features_mutex); 6852 6853 static struct pqi_firmware_feature pqi_firmware_features[] = { 6854 { 6855 .feature_name = "Online Firmware Activation", 6856 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 6857 .feature_status = pqi_firmware_feature_status, 6858 }, 6859 { 6860 .feature_name = "Serial Management Protocol", 6861 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6862 .feature_status = pqi_firmware_feature_status, 6863 }, 6864 { 6865 .feature_name = "New Soft Reset Handshake", 6866 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 6867 .feature_status = pqi_ctrl_update_feature_flags, 6868 }, 6869 { 6870 .feature_name = "RAID IU Timeout", 6871 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 6872 .feature_status = pqi_ctrl_update_feature_flags, 6873 }, 6874 { 6875 .feature_name = "TMF IU Timeout", 6876 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 6877 .feature_status = pqi_ctrl_update_feature_flags, 6878 }, 6879 }; 6880 6881 static void pqi_process_firmware_features( 6882 struct pqi_config_table_section_info *section_info) 6883 { 6884 int rc; 6885 struct pqi_ctrl_info *ctrl_info; 6886 struct pqi_config_table_firmware_features *firmware_features; 6887 void __iomem *firmware_features_iomem_addr; 6888 unsigned int i; 6889 unsigned int num_features_supported; 6890 6891 ctrl_info = section_info->ctrl_info; 6892 firmware_features = section_info->section; 6893 firmware_features_iomem_addr = section_info->section_iomem_addr; 6894 6895 for (i = 0, num_features_supported = 0; 6896 i < ARRAY_SIZE(pqi_firmware_features); i++) { 6897 if (pqi_is_firmware_feature_supported(firmware_features, 6898 pqi_firmware_features[i].feature_bit)) { 6899 pqi_firmware_features[i].supported = true; 6900 num_features_supported++; 6901 } else { 6902 pqi_firmware_feature_update(ctrl_info, 6903 &pqi_firmware_features[i]); 6904 } 6905 } 6906 6907 if (num_features_supported == 0) 6908 return; 6909 6910 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6911 if (!pqi_firmware_features[i].supported) 6912 continue; 6913 pqi_request_firmware_feature(firmware_features, 6914 pqi_firmware_features[i].feature_bit); 6915 } 6916 6917 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 6918 firmware_features_iomem_addr); 6919 if (rc) { 6920 dev_err(&ctrl_info->pci_dev->dev, 6921 "failed to enable firmware features in PQI configuration table\n"); 6922 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6923 if (!pqi_firmware_features[i].supported) 6924 continue; 6925 pqi_firmware_feature_update(ctrl_info, 6926 &pqi_firmware_features[i]); 6927 } 6928 return; 6929 } 6930 6931 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6932 if (!pqi_firmware_features[i].supported) 6933 continue; 6934 if (pqi_is_firmware_feature_enabled(firmware_features, 6935 firmware_features_iomem_addr, 6936 pqi_firmware_features[i].feature_bit)) { 6937 pqi_firmware_features[i].enabled = true; 6938 } 6939 pqi_firmware_feature_update(ctrl_info, 6940 &pqi_firmware_features[i]); 6941 } 6942 } 6943 6944 static void pqi_init_firmware_features(void) 6945 { 6946 unsigned int i; 6947 6948 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6949 pqi_firmware_features[i].supported = false; 6950 pqi_firmware_features[i].enabled = false; 6951 } 6952 } 6953 6954 static void pqi_process_firmware_features_section( 6955 struct pqi_config_table_section_info *section_info) 6956 { 6957 mutex_lock(&pqi_firmware_features_mutex); 6958 pqi_init_firmware_features(); 6959 pqi_process_firmware_features(section_info); 6960 mutex_unlock(&pqi_firmware_features_mutex); 6961 } 6962 6963 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 6964 { 6965 u32 table_length; 6966 u32 section_offset; 6967 void __iomem *table_iomem_addr; 6968 struct pqi_config_table *config_table; 6969 struct pqi_config_table_section_header *section; 6970 struct pqi_config_table_section_info section_info; 6971 6972 table_length = ctrl_info->config_table_length; 6973 if (table_length == 0) 6974 return 0; 6975 6976 config_table = kmalloc(table_length, GFP_KERNEL); 6977 if (!config_table) { 6978 dev_err(&ctrl_info->pci_dev->dev, 6979 "failed to allocate memory for PQI configuration table\n"); 6980 return -ENOMEM; 6981 } 6982 6983 /* 6984 * Copy the config table contents from I/O memory space into the 6985 * temporary buffer. 6986 */ 6987 table_iomem_addr = ctrl_info->iomem_base + 6988 ctrl_info->config_table_offset; 6989 memcpy_fromio(config_table, table_iomem_addr, table_length); 6990 6991 section_info.ctrl_info = ctrl_info; 6992 section_offset = 6993 get_unaligned_le32(&config_table->first_section_offset); 6994 6995 while (section_offset) { 6996 section = (void *)config_table + section_offset; 6997 6998 section_info.section = section; 6999 section_info.section_offset = section_offset; 7000 section_info.section_iomem_addr = 7001 table_iomem_addr + section_offset; 7002 7003 switch (get_unaligned_le16(§ion->section_id)) { 7004 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 7005 pqi_process_firmware_features_section(§ion_info); 7006 break; 7007 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 7008 if (pqi_disable_heartbeat) 7009 dev_warn(&ctrl_info->pci_dev->dev, 7010 "heartbeat disabled by module parameter\n"); 7011 else 7012 ctrl_info->heartbeat_counter = 7013 table_iomem_addr + 7014 section_offset + 7015 offsetof( 7016 struct pqi_config_table_heartbeat, 7017 heartbeat_counter); 7018 break; 7019 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 7020 ctrl_info->soft_reset_status = 7021 table_iomem_addr + 7022 section_offset + 7023 offsetof(struct pqi_config_table_soft_reset, 7024 soft_reset_status); 7025 break; 7026 } 7027 7028 section_offset = 7029 get_unaligned_le16(§ion->next_section_offset); 7030 } 7031 7032 kfree(config_table); 7033 7034 return 0; 7035 } 7036 7037 /* Switches the controller from PQI mode back into SIS mode. */ 7038 7039 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 7040 { 7041 int rc; 7042 7043 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 7044 rc = pqi_reset(ctrl_info); 7045 if (rc) 7046 return rc; 7047 rc = sis_reenable_sis_mode(ctrl_info); 7048 if (rc) { 7049 dev_err(&ctrl_info->pci_dev->dev, 7050 "re-enabling SIS mode failed with error %d\n", rc); 7051 return rc; 7052 } 7053 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7054 7055 return 0; 7056 } 7057 7058 /* 7059 * If the controller isn't already in SIS mode, this function forces it into 7060 * SIS mode. 7061 */ 7062 7063 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 7064 { 7065 if (!sis_is_firmware_running(ctrl_info)) 7066 return -ENXIO; 7067 7068 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 7069 return 0; 7070 7071 if (sis_is_kernel_up(ctrl_info)) { 7072 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7073 return 0; 7074 } 7075 7076 return pqi_revert_to_sis_mode(ctrl_info); 7077 } 7078 7079 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 7080 7081 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 7082 { 7083 int rc; 7084 7085 if (reset_devices) { 7086 sis_soft_reset(ctrl_info); 7087 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7088 } else { 7089 rc = pqi_force_sis_mode(ctrl_info); 7090 if (rc) 7091 return rc; 7092 } 7093 7094 /* 7095 * Wait until the controller is ready to start accepting SIS 7096 * commands. 7097 */ 7098 rc = sis_wait_for_ctrl_ready(ctrl_info); 7099 if (rc) 7100 return rc; 7101 7102 /* 7103 * Get the controller properties. This allows us to determine 7104 * whether or not it supports PQI mode. 7105 */ 7106 rc = sis_get_ctrl_properties(ctrl_info); 7107 if (rc) { 7108 dev_err(&ctrl_info->pci_dev->dev, 7109 "error obtaining controller properties\n"); 7110 return rc; 7111 } 7112 7113 rc = sis_get_pqi_capabilities(ctrl_info); 7114 if (rc) { 7115 dev_err(&ctrl_info->pci_dev->dev, 7116 "error obtaining controller capabilities\n"); 7117 return rc; 7118 } 7119 7120 if (reset_devices) { 7121 if (ctrl_info->max_outstanding_requests > 7122 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 7123 ctrl_info->max_outstanding_requests = 7124 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 7125 } else { 7126 if (ctrl_info->max_outstanding_requests > 7127 PQI_MAX_OUTSTANDING_REQUESTS) 7128 ctrl_info->max_outstanding_requests = 7129 PQI_MAX_OUTSTANDING_REQUESTS; 7130 } 7131 7132 pqi_calculate_io_resources(ctrl_info); 7133 7134 rc = pqi_alloc_error_buffer(ctrl_info); 7135 if (rc) { 7136 dev_err(&ctrl_info->pci_dev->dev, 7137 "failed to allocate PQI error buffer\n"); 7138 return rc; 7139 } 7140 7141 /* 7142 * If the function we are about to call succeeds, the 7143 * controller will transition from legacy SIS mode 7144 * into PQI mode. 7145 */ 7146 rc = sis_init_base_struct_addr(ctrl_info); 7147 if (rc) { 7148 dev_err(&ctrl_info->pci_dev->dev, 7149 "error initializing PQI mode\n"); 7150 return rc; 7151 } 7152 7153 /* Wait for the controller to complete the SIS -> PQI transition. */ 7154 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7155 if (rc) { 7156 dev_err(&ctrl_info->pci_dev->dev, 7157 "transition to PQI mode failed\n"); 7158 return rc; 7159 } 7160 7161 /* From here on, we are running in PQI mode. */ 7162 ctrl_info->pqi_mode_enabled = true; 7163 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7164 7165 rc = pqi_alloc_admin_queues(ctrl_info); 7166 if (rc) { 7167 dev_err(&ctrl_info->pci_dev->dev, 7168 "failed to allocate admin queues\n"); 7169 return rc; 7170 } 7171 7172 rc = pqi_create_admin_queues(ctrl_info); 7173 if (rc) { 7174 dev_err(&ctrl_info->pci_dev->dev, 7175 "error creating admin queues\n"); 7176 return rc; 7177 } 7178 7179 rc = pqi_report_device_capability(ctrl_info); 7180 if (rc) { 7181 dev_err(&ctrl_info->pci_dev->dev, 7182 "obtaining device capability failed\n"); 7183 return rc; 7184 } 7185 7186 rc = pqi_validate_device_capability(ctrl_info); 7187 if (rc) 7188 return rc; 7189 7190 pqi_calculate_queue_resources(ctrl_info); 7191 7192 rc = pqi_enable_msix_interrupts(ctrl_info); 7193 if (rc) 7194 return rc; 7195 7196 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 7197 ctrl_info->max_msix_vectors = 7198 ctrl_info->num_msix_vectors_enabled; 7199 pqi_calculate_queue_resources(ctrl_info); 7200 } 7201 7202 rc = pqi_alloc_io_resources(ctrl_info); 7203 if (rc) 7204 return rc; 7205 7206 rc = pqi_alloc_operational_queues(ctrl_info); 7207 if (rc) { 7208 dev_err(&ctrl_info->pci_dev->dev, 7209 "failed to allocate operational queues\n"); 7210 return rc; 7211 } 7212 7213 pqi_init_operational_queues(ctrl_info); 7214 7215 rc = pqi_request_irqs(ctrl_info); 7216 if (rc) 7217 return rc; 7218 7219 rc = pqi_create_queues(ctrl_info); 7220 if (rc) 7221 return rc; 7222 7223 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7224 7225 ctrl_info->controller_online = true; 7226 7227 rc = pqi_process_config_table(ctrl_info); 7228 if (rc) 7229 return rc; 7230 7231 pqi_start_heartbeat_timer(ctrl_info); 7232 7233 rc = pqi_enable_events(ctrl_info); 7234 if (rc) { 7235 dev_err(&ctrl_info->pci_dev->dev, 7236 "error enabling events\n"); 7237 return rc; 7238 } 7239 7240 /* Register with the SCSI subsystem. */ 7241 rc = pqi_register_scsi(ctrl_info); 7242 if (rc) 7243 return rc; 7244 7245 rc = pqi_get_ctrl_product_details(ctrl_info); 7246 if (rc) { 7247 dev_err(&ctrl_info->pci_dev->dev, 7248 "error obtaining product details\n"); 7249 return rc; 7250 } 7251 7252 rc = pqi_get_ctrl_serial_number(ctrl_info); 7253 if (rc) { 7254 dev_err(&ctrl_info->pci_dev->dev, 7255 "error obtaining ctrl serial number\n"); 7256 return rc; 7257 } 7258 7259 rc = pqi_set_diag_rescan(ctrl_info); 7260 if (rc) { 7261 dev_err(&ctrl_info->pci_dev->dev, 7262 "error enabling multi-lun rescan\n"); 7263 return rc; 7264 } 7265 7266 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7267 if (rc) { 7268 dev_err(&ctrl_info->pci_dev->dev, 7269 "error updating host wellness\n"); 7270 return rc; 7271 } 7272 7273 pqi_schedule_update_time_worker(ctrl_info); 7274 7275 pqi_scan_scsi_devices(ctrl_info); 7276 7277 return 0; 7278 } 7279 7280 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 7281 { 7282 unsigned int i; 7283 struct pqi_admin_queues *admin_queues; 7284 struct pqi_event_queue *event_queue; 7285 7286 admin_queues = &ctrl_info->admin_queues; 7287 admin_queues->iq_pi_copy = 0; 7288 admin_queues->oq_ci_copy = 0; 7289 writel(0, admin_queues->oq_pi); 7290 7291 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 7292 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 7293 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 7294 ctrl_info->queue_groups[i].oq_ci_copy = 0; 7295 7296 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 7297 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 7298 writel(0, ctrl_info->queue_groups[i].oq_pi); 7299 } 7300 7301 event_queue = &ctrl_info->event_queue; 7302 writel(0, event_queue->oq_pi); 7303 event_queue->oq_ci_copy = 0; 7304 } 7305 7306 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 7307 { 7308 int rc; 7309 7310 rc = pqi_force_sis_mode(ctrl_info); 7311 if (rc) 7312 return rc; 7313 7314 /* 7315 * Wait until the controller is ready to start accepting SIS 7316 * commands. 7317 */ 7318 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 7319 if (rc) 7320 return rc; 7321 7322 /* 7323 * Get the controller properties. This allows us to determine 7324 * whether or not it supports PQI mode. 7325 */ 7326 rc = sis_get_ctrl_properties(ctrl_info); 7327 if (rc) { 7328 dev_err(&ctrl_info->pci_dev->dev, 7329 "error obtaining controller properties\n"); 7330 return rc; 7331 } 7332 7333 rc = sis_get_pqi_capabilities(ctrl_info); 7334 if (rc) { 7335 dev_err(&ctrl_info->pci_dev->dev, 7336 "error obtaining controller capabilities\n"); 7337 return rc; 7338 } 7339 7340 /* 7341 * If the function we are about to call succeeds, the 7342 * controller will transition from legacy SIS mode 7343 * into PQI mode. 7344 */ 7345 rc = sis_init_base_struct_addr(ctrl_info); 7346 if (rc) { 7347 dev_err(&ctrl_info->pci_dev->dev, 7348 "error initializing PQI mode\n"); 7349 return rc; 7350 } 7351 7352 /* Wait for the controller to complete the SIS -> PQI transition. */ 7353 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7354 if (rc) { 7355 dev_err(&ctrl_info->pci_dev->dev, 7356 "transition to PQI mode failed\n"); 7357 return rc; 7358 } 7359 7360 /* From here on, we are running in PQI mode. */ 7361 ctrl_info->pqi_mode_enabled = true; 7362 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7363 7364 pqi_reinit_queues(ctrl_info); 7365 7366 rc = pqi_create_admin_queues(ctrl_info); 7367 if (rc) { 7368 dev_err(&ctrl_info->pci_dev->dev, 7369 "error creating admin queues\n"); 7370 return rc; 7371 } 7372 7373 rc = pqi_create_queues(ctrl_info); 7374 if (rc) 7375 return rc; 7376 7377 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7378 7379 ctrl_info->controller_online = true; 7380 pqi_ctrl_unblock_requests(ctrl_info); 7381 7382 rc = pqi_process_config_table(ctrl_info); 7383 if (rc) 7384 return rc; 7385 7386 pqi_start_heartbeat_timer(ctrl_info); 7387 7388 rc = pqi_enable_events(ctrl_info); 7389 if (rc) { 7390 dev_err(&ctrl_info->pci_dev->dev, 7391 "error enabling events\n"); 7392 return rc; 7393 } 7394 7395 rc = pqi_get_ctrl_product_details(ctrl_info); 7396 if (rc) { 7397 dev_err(&ctrl_info->pci_dev->dev, 7398 "error obtaining product details\n"); 7399 return rc; 7400 } 7401 7402 rc = pqi_set_diag_rescan(ctrl_info); 7403 if (rc) { 7404 dev_err(&ctrl_info->pci_dev->dev, 7405 "error enabling multi-lun rescan\n"); 7406 return rc; 7407 } 7408 7409 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7410 if (rc) { 7411 dev_err(&ctrl_info->pci_dev->dev, 7412 "error updating host wellness\n"); 7413 return rc; 7414 } 7415 7416 pqi_schedule_update_time_worker(ctrl_info); 7417 7418 pqi_scan_scsi_devices(ctrl_info); 7419 7420 return 0; 7421 } 7422 7423 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 7424 u16 timeout) 7425 { 7426 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 7427 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 7428 } 7429 7430 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 7431 { 7432 int rc; 7433 u64 mask; 7434 7435 rc = pci_enable_device(ctrl_info->pci_dev); 7436 if (rc) { 7437 dev_err(&ctrl_info->pci_dev->dev, 7438 "failed to enable PCI device\n"); 7439 return rc; 7440 } 7441 7442 if (sizeof(dma_addr_t) > 4) 7443 mask = DMA_BIT_MASK(64); 7444 else 7445 mask = DMA_BIT_MASK(32); 7446 7447 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 7448 if (rc) { 7449 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 7450 goto disable_device; 7451 } 7452 7453 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 7454 if (rc) { 7455 dev_err(&ctrl_info->pci_dev->dev, 7456 "failed to obtain PCI resources\n"); 7457 goto disable_device; 7458 } 7459 7460 ctrl_info->iomem_base = ioremap(pci_resource_start( 7461 ctrl_info->pci_dev, 0), 7462 sizeof(struct pqi_ctrl_registers)); 7463 if (!ctrl_info->iomem_base) { 7464 dev_err(&ctrl_info->pci_dev->dev, 7465 "failed to map memory for controller registers\n"); 7466 rc = -ENOMEM; 7467 goto release_regions; 7468 } 7469 7470 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 7471 7472 /* Increase the PCIe completion timeout. */ 7473 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 7474 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 7475 if (rc) { 7476 dev_err(&ctrl_info->pci_dev->dev, 7477 "failed to set PCIe completion timeout\n"); 7478 goto release_regions; 7479 } 7480 7481 /* Enable bus mastering. */ 7482 pci_set_master(ctrl_info->pci_dev); 7483 7484 ctrl_info->registers = ctrl_info->iomem_base; 7485 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 7486 7487 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 7488 7489 return 0; 7490 7491 release_regions: 7492 pci_release_regions(ctrl_info->pci_dev); 7493 disable_device: 7494 pci_disable_device(ctrl_info->pci_dev); 7495 7496 return rc; 7497 } 7498 7499 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 7500 { 7501 iounmap(ctrl_info->iomem_base); 7502 pci_release_regions(ctrl_info->pci_dev); 7503 if (pci_is_enabled(ctrl_info->pci_dev)) 7504 pci_disable_device(ctrl_info->pci_dev); 7505 pci_set_drvdata(ctrl_info->pci_dev, NULL); 7506 } 7507 7508 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 7509 { 7510 struct pqi_ctrl_info *ctrl_info; 7511 7512 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 7513 GFP_KERNEL, numa_node); 7514 if (!ctrl_info) 7515 return NULL; 7516 7517 mutex_init(&ctrl_info->scan_mutex); 7518 mutex_init(&ctrl_info->lun_reset_mutex); 7519 mutex_init(&ctrl_info->ofa_mutex); 7520 7521 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7522 spin_lock_init(&ctrl_info->scsi_device_list_lock); 7523 7524 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 7525 atomic_set(&ctrl_info->num_interrupts, 0); 7526 atomic_set(&ctrl_info->sync_cmds_outstanding, 0); 7527 7528 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 7529 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 7530 7531 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 7532 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 7533 7534 sema_init(&ctrl_info->sync_request_sem, 7535 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 7536 init_waitqueue_head(&ctrl_info->block_requests_wait); 7537 7538 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 7539 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 7540 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 7541 pqi_raid_bypass_retry_worker); 7542 7543 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 7544 ctrl_info->irq_mode = IRQ_MODE_NONE; 7545 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 7546 7547 return ctrl_info; 7548 } 7549 7550 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 7551 { 7552 kfree(ctrl_info); 7553 } 7554 7555 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 7556 { 7557 pqi_free_irqs(ctrl_info); 7558 pqi_disable_msix_interrupts(ctrl_info); 7559 } 7560 7561 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 7562 { 7563 pqi_stop_heartbeat_timer(ctrl_info); 7564 pqi_free_interrupts(ctrl_info); 7565 if (ctrl_info->queue_memory_base) 7566 dma_free_coherent(&ctrl_info->pci_dev->dev, 7567 ctrl_info->queue_memory_length, 7568 ctrl_info->queue_memory_base, 7569 ctrl_info->queue_memory_base_dma_handle); 7570 if (ctrl_info->admin_queue_memory_base) 7571 dma_free_coherent(&ctrl_info->pci_dev->dev, 7572 ctrl_info->admin_queue_memory_length, 7573 ctrl_info->admin_queue_memory_base, 7574 ctrl_info->admin_queue_memory_base_dma_handle); 7575 pqi_free_all_io_requests(ctrl_info); 7576 if (ctrl_info->error_buffer) 7577 dma_free_coherent(&ctrl_info->pci_dev->dev, 7578 ctrl_info->error_buffer_length, 7579 ctrl_info->error_buffer, 7580 ctrl_info->error_buffer_dma_handle); 7581 if (ctrl_info->iomem_base) 7582 pqi_cleanup_pci_init(ctrl_info); 7583 pqi_free_ctrl_info(ctrl_info); 7584 } 7585 7586 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 7587 { 7588 pqi_cancel_rescan_worker(ctrl_info); 7589 pqi_cancel_update_time_worker(ctrl_info); 7590 pqi_remove_all_scsi_devices(ctrl_info); 7591 pqi_unregister_scsi(ctrl_info); 7592 if (ctrl_info->pqi_mode_enabled) 7593 pqi_revert_to_sis_mode(ctrl_info); 7594 pqi_free_ctrl_resources(ctrl_info); 7595 } 7596 7597 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 7598 { 7599 pqi_cancel_update_time_worker(ctrl_info); 7600 pqi_cancel_rescan_worker(ctrl_info); 7601 pqi_wait_until_lun_reset_finished(ctrl_info); 7602 pqi_wait_until_scan_finished(ctrl_info); 7603 pqi_ctrl_ofa_start(ctrl_info); 7604 pqi_ctrl_block_requests(ctrl_info); 7605 pqi_ctrl_wait_until_quiesced(ctrl_info); 7606 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); 7607 pqi_fail_io_queued_for_all_devices(ctrl_info); 7608 pqi_wait_until_inbound_queues_empty(ctrl_info); 7609 pqi_stop_heartbeat_timer(ctrl_info); 7610 ctrl_info->pqi_mode_enabled = false; 7611 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7612 } 7613 7614 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 7615 { 7616 pqi_ofa_free_host_buffer(ctrl_info); 7617 ctrl_info->pqi_mode_enabled = true; 7618 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7619 ctrl_info->controller_online = true; 7620 pqi_ctrl_unblock_requests(ctrl_info); 7621 pqi_start_heartbeat_timer(ctrl_info); 7622 pqi_schedule_update_time_worker(ctrl_info); 7623 pqi_clear_soft_reset_status(ctrl_info, 7624 PQI_SOFT_RESET_ABORT); 7625 pqi_scan_scsi_devices(ctrl_info); 7626 } 7627 7628 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, 7629 u32 total_size, u32 chunk_size) 7630 { 7631 u32 sg_count; 7632 u32 size; 7633 int i; 7634 struct pqi_sg_descriptor *mem_descriptor = NULL; 7635 struct device *dev; 7636 struct pqi_ofa_memory *ofap; 7637 7638 dev = &ctrl_info->pci_dev->dev; 7639 7640 sg_count = (total_size + chunk_size - 1); 7641 sg_count /= chunk_size; 7642 7643 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7644 7645 if (sg_count*chunk_size < total_size) 7646 goto out; 7647 7648 ctrl_info->pqi_ofa_chunk_virt_addr = 7649 kcalloc(sg_count, sizeof(void *), GFP_KERNEL); 7650 if (!ctrl_info->pqi_ofa_chunk_virt_addr) 7651 goto out; 7652 7653 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { 7654 dma_addr_t dma_handle; 7655 7656 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7657 dma_alloc_coherent(dev, chunk_size, &dma_handle, 7658 GFP_KERNEL); 7659 7660 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7661 break; 7662 7663 mem_descriptor = &ofap->sg_descriptor[i]; 7664 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); 7665 put_unaligned_le32 (chunk_size, &mem_descriptor->length); 7666 } 7667 7668 if (!size || size < total_size) 7669 goto out_free_chunks; 7670 7671 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 7672 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 7673 put_unaligned_le32(size, &ofap->bytes_allocated); 7674 7675 return 0; 7676 7677 out_free_chunks: 7678 while (--i >= 0) { 7679 mem_descriptor = &ofap->sg_descriptor[i]; 7680 dma_free_coherent(dev, chunk_size, 7681 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7682 get_unaligned_le64(&mem_descriptor->address)); 7683 } 7684 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7685 7686 out: 7687 put_unaligned_le32 (0, &ofap->bytes_allocated); 7688 return -ENOMEM; 7689 } 7690 7691 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 7692 { 7693 u32 total_size; 7694 u32 min_chunk_size; 7695 u32 chunk_sz; 7696 7697 total_size = le32_to_cpu( 7698 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); 7699 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; 7700 7701 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) 7702 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) 7703 return 0; 7704 7705 return -ENOMEM; 7706 } 7707 7708 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 7709 u32 bytes_requested) 7710 { 7711 struct pqi_ofa_memory *pqi_ofa_memory; 7712 struct device *dev; 7713 7714 dev = &ctrl_info->pci_dev->dev; 7715 pqi_ofa_memory = dma_alloc_coherent(dev, 7716 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7717 &ctrl_info->pqi_ofa_mem_dma_handle, 7718 GFP_KERNEL); 7719 7720 if (!pqi_ofa_memory) 7721 return; 7722 7723 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); 7724 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, 7725 sizeof(pqi_ofa_memory->signature)); 7726 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); 7727 7728 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; 7729 7730 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 7731 dev_err(dev, "Failed to allocate host buffer of size = %u", 7732 bytes_requested); 7733 } 7734 7735 return; 7736 } 7737 7738 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 7739 { 7740 int i; 7741 struct pqi_sg_descriptor *mem_descriptor; 7742 struct pqi_ofa_memory *ofap; 7743 7744 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7745 7746 if (!ofap) 7747 return; 7748 7749 if (!ofap->bytes_allocated) 7750 goto out; 7751 7752 mem_descriptor = ofap->sg_descriptor; 7753 7754 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); 7755 i++) { 7756 dma_free_coherent(&ctrl_info->pci_dev->dev, 7757 get_unaligned_le32(&mem_descriptor[i].length), 7758 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7759 get_unaligned_le64(&mem_descriptor[i].address)); 7760 } 7761 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7762 7763 out: 7764 dma_free_coherent(&ctrl_info->pci_dev->dev, 7765 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, 7766 ctrl_info->pqi_ofa_mem_dma_handle); 7767 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 7768 } 7769 7770 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 7771 { 7772 struct pqi_vendor_general_request request; 7773 size_t size; 7774 struct pqi_ofa_memory *ofap; 7775 7776 memset(&request, 0, sizeof(request)); 7777 7778 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7779 7780 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7781 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7782 &request.header.iu_length); 7783 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 7784 &request.function_code); 7785 7786 if (ofap) { 7787 size = offsetof(struct pqi_ofa_memory, sg_descriptor) + 7788 get_unaligned_le16(&ofap->num_memory_descriptors) * 7789 sizeof(struct pqi_sg_descriptor); 7790 7791 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 7792 &request.data.ofa_memory_allocation.buffer_address); 7793 put_unaligned_le32(size, 7794 &request.data.ofa_memory_allocation.buffer_length); 7795 7796 } 7797 7798 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 7799 0, NULL, NO_TIMEOUT); 7800 } 7801 7802 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) 7803 { 7804 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7805 return pqi_ctrl_init_resume(ctrl_info); 7806 } 7807 7808 static void pqi_perform_lockup_action(void) 7809 { 7810 switch (pqi_lockup_action) { 7811 case PANIC: 7812 panic("FATAL: Smart Family Controller lockup detected"); 7813 break; 7814 case REBOOT: 7815 emergency_restart(); 7816 break; 7817 case NONE: 7818 default: 7819 break; 7820 } 7821 } 7822 7823 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 7824 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 7825 .status = SAM_STAT_CHECK_CONDITION, 7826 }; 7827 7828 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 7829 { 7830 unsigned int i; 7831 struct pqi_io_request *io_request; 7832 struct scsi_cmnd *scmd; 7833 7834 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7835 io_request = &ctrl_info->io_request_pool[i]; 7836 if (atomic_read(&io_request->refcount) == 0) 7837 continue; 7838 7839 scmd = io_request->scmd; 7840 if (scmd) { 7841 set_host_byte(scmd, DID_NO_CONNECT); 7842 } else { 7843 io_request->status = -ENXIO; 7844 io_request->error_info = 7845 &pqi_ctrl_offline_raid_error_info; 7846 } 7847 7848 io_request->io_complete_callback(io_request, 7849 io_request->context); 7850 } 7851 } 7852 7853 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 7854 { 7855 pqi_perform_lockup_action(); 7856 pqi_stop_heartbeat_timer(ctrl_info); 7857 pqi_free_interrupts(ctrl_info); 7858 pqi_cancel_rescan_worker(ctrl_info); 7859 pqi_cancel_update_time_worker(ctrl_info); 7860 pqi_ctrl_wait_until_quiesced(ctrl_info); 7861 pqi_fail_all_outstanding_requests(ctrl_info); 7862 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 7863 pqi_ctrl_unblock_requests(ctrl_info); 7864 } 7865 7866 static void pqi_ctrl_offline_worker(struct work_struct *work) 7867 { 7868 struct pqi_ctrl_info *ctrl_info; 7869 7870 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 7871 pqi_take_ctrl_offline_deferred(ctrl_info); 7872 } 7873 7874 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 7875 { 7876 if (!ctrl_info->controller_online) 7877 return; 7878 7879 ctrl_info->controller_online = false; 7880 ctrl_info->pqi_mode_enabled = false; 7881 pqi_ctrl_block_requests(ctrl_info); 7882 if (!pqi_disable_ctrl_shutdown) 7883 sis_shutdown_ctrl(ctrl_info); 7884 pci_disable_device(ctrl_info->pci_dev); 7885 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 7886 schedule_work(&ctrl_info->ctrl_offline_work); 7887 } 7888 7889 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 7890 const struct pci_device_id *id) 7891 { 7892 char *ctrl_description; 7893 7894 if (id->driver_data) 7895 ctrl_description = (char *)id->driver_data; 7896 else 7897 ctrl_description = "Microsemi Smart Family Controller"; 7898 7899 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 7900 } 7901 7902 static int pqi_pci_probe(struct pci_dev *pci_dev, 7903 const struct pci_device_id *id) 7904 { 7905 int rc; 7906 int node, cp_node; 7907 struct pqi_ctrl_info *ctrl_info; 7908 7909 pqi_print_ctrl_info(pci_dev, id); 7910 7911 if (pqi_disable_device_id_wildcards && 7912 id->subvendor == PCI_ANY_ID && 7913 id->subdevice == PCI_ANY_ID) { 7914 dev_warn(&pci_dev->dev, 7915 "controller not probed because device ID wildcards are disabled\n"); 7916 return -ENODEV; 7917 } 7918 7919 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 7920 dev_warn(&pci_dev->dev, 7921 "controller device ID matched using wildcards\n"); 7922 7923 node = dev_to_node(&pci_dev->dev); 7924 if (node == NUMA_NO_NODE) { 7925 cp_node = cpu_to_node(0); 7926 if (cp_node == NUMA_NO_NODE) 7927 cp_node = 0; 7928 set_dev_node(&pci_dev->dev, cp_node); 7929 } 7930 7931 ctrl_info = pqi_alloc_ctrl_info(node); 7932 if (!ctrl_info) { 7933 dev_err(&pci_dev->dev, 7934 "failed to allocate controller info block\n"); 7935 return -ENOMEM; 7936 } 7937 7938 ctrl_info->pci_dev = pci_dev; 7939 7940 rc = pqi_pci_init(ctrl_info); 7941 if (rc) 7942 goto error; 7943 7944 rc = pqi_ctrl_init(ctrl_info); 7945 if (rc) 7946 goto error; 7947 7948 return 0; 7949 7950 error: 7951 pqi_remove_ctrl(ctrl_info); 7952 7953 return rc; 7954 } 7955 7956 static void pqi_pci_remove(struct pci_dev *pci_dev) 7957 { 7958 struct pqi_ctrl_info *ctrl_info; 7959 7960 ctrl_info = pci_get_drvdata(pci_dev); 7961 if (!ctrl_info) 7962 return; 7963 7964 ctrl_info->in_shutdown = true; 7965 7966 pqi_remove_ctrl(ctrl_info); 7967 } 7968 7969 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 7970 { 7971 unsigned int i; 7972 struct pqi_io_request *io_request; 7973 struct scsi_cmnd *scmd; 7974 7975 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7976 io_request = &ctrl_info->io_request_pool[i]; 7977 if (atomic_read(&io_request->refcount) == 0) 7978 continue; 7979 scmd = io_request->scmd; 7980 WARN_ON(scmd != NULL); /* IO command from SML */ 7981 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 7982 } 7983 } 7984 7985 static void pqi_shutdown(struct pci_dev *pci_dev) 7986 { 7987 int rc; 7988 struct pqi_ctrl_info *ctrl_info; 7989 7990 ctrl_info = pci_get_drvdata(pci_dev); 7991 if (!ctrl_info) { 7992 dev_err(&pci_dev->dev, 7993 "cache could not be flushed\n"); 7994 return; 7995 } 7996 7997 pqi_disable_events(ctrl_info); 7998 pqi_wait_until_ofa_finished(ctrl_info); 7999 pqi_cancel_update_time_worker(ctrl_info); 8000 pqi_cancel_rescan_worker(ctrl_info); 8001 pqi_cancel_event_worker(ctrl_info); 8002 8003 pqi_ctrl_shutdown_start(ctrl_info); 8004 pqi_ctrl_wait_until_quiesced(ctrl_info); 8005 8006 rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8007 if (rc) { 8008 dev_err(&pci_dev->dev, 8009 "wait for pending I/O failed\n"); 8010 return; 8011 } 8012 8013 pqi_ctrl_block_device_reset(ctrl_info); 8014 pqi_wait_until_lun_reset_finished(ctrl_info); 8015 8016 /* 8017 * Write all data in the controller's battery-backed cache to 8018 * storage. 8019 */ 8020 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 8021 if (rc) 8022 dev_err(&pci_dev->dev, 8023 "unable to flush controller cache\n"); 8024 8025 pqi_ctrl_block_requests(ctrl_info); 8026 8027 rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); 8028 if (rc) { 8029 dev_err(&pci_dev->dev, 8030 "wait for pending sync cmds failed\n"); 8031 return; 8032 } 8033 8034 pqi_crash_if_pending_command(ctrl_info); 8035 pqi_reset(ctrl_info); 8036 } 8037 8038 static void pqi_process_lockup_action_param(void) 8039 { 8040 unsigned int i; 8041 8042 if (!pqi_lockup_action_param) 8043 return; 8044 8045 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 8046 if (strcmp(pqi_lockup_action_param, 8047 pqi_lockup_actions[i].name) == 0) { 8048 pqi_lockup_action = pqi_lockup_actions[i].action; 8049 return; 8050 } 8051 } 8052 8053 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 8054 DRIVER_NAME_SHORT, pqi_lockup_action_param); 8055 } 8056 8057 static void pqi_process_module_params(void) 8058 { 8059 pqi_process_lockup_action_param(); 8060 } 8061 8062 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 8063 { 8064 struct pqi_ctrl_info *ctrl_info; 8065 8066 ctrl_info = pci_get_drvdata(pci_dev); 8067 8068 pqi_disable_events(ctrl_info); 8069 pqi_cancel_update_time_worker(ctrl_info); 8070 pqi_cancel_rescan_worker(ctrl_info); 8071 pqi_wait_until_scan_finished(ctrl_info); 8072 pqi_wait_until_lun_reset_finished(ctrl_info); 8073 pqi_wait_until_ofa_finished(ctrl_info); 8074 pqi_flush_cache(ctrl_info, SUSPEND); 8075 pqi_ctrl_block_requests(ctrl_info); 8076 pqi_ctrl_wait_until_quiesced(ctrl_info); 8077 pqi_wait_until_inbound_queues_empty(ctrl_info); 8078 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 8079 pqi_stop_heartbeat_timer(ctrl_info); 8080 8081 if (state.event == PM_EVENT_FREEZE) 8082 return 0; 8083 8084 pci_save_state(pci_dev); 8085 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 8086 8087 ctrl_info->controller_online = false; 8088 ctrl_info->pqi_mode_enabled = false; 8089 8090 return 0; 8091 } 8092 8093 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 8094 { 8095 int rc; 8096 struct pqi_ctrl_info *ctrl_info; 8097 8098 ctrl_info = pci_get_drvdata(pci_dev); 8099 8100 if (pci_dev->current_state != PCI_D0) { 8101 ctrl_info->max_hw_queue_index = 0; 8102 pqi_free_interrupts(ctrl_info); 8103 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 8104 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 8105 IRQF_SHARED, DRIVER_NAME_SHORT, 8106 &ctrl_info->queue_groups[0]); 8107 if (rc) { 8108 dev_err(&ctrl_info->pci_dev->dev, 8109 "irq %u init failed with error %d\n", 8110 pci_dev->irq, rc); 8111 return rc; 8112 } 8113 pqi_start_heartbeat_timer(ctrl_info); 8114 pqi_ctrl_unblock_requests(ctrl_info); 8115 return 0; 8116 } 8117 8118 pci_set_power_state(pci_dev, PCI_D0); 8119 pci_restore_state(pci_dev); 8120 8121 return pqi_ctrl_init_resume(ctrl_info); 8122 } 8123 8124 /* Define the PCI IDs for the controllers that we support. */ 8125 static const struct pci_device_id pqi_pci_id_table[] = { 8126 { 8127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8128 0x105b, 0x1211) 8129 }, 8130 { 8131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8132 0x105b, 0x1321) 8133 }, 8134 { 8135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8136 0x152d, 0x8a22) 8137 }, 8138 { 8139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8140 0x152d, 0x8a23) 8141 }, 8142 { 8143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8144 0x152d, 0x8a24) 8145 }, 8146 { 8147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8148 0x152d, 0x8a36) 8149 }, 8150 { 8151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8152 0x152d, 0x8a37) 8153 }, 8154 { 8155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8156 0x193d, 0x1104) 8157 }, 8158 { 8159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8160 0x193d, 0x1105) 8161 }, 8162 { 8163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8164 0x193d, 0x1106) 8165 }, 8166 { 8167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8168 0x193d, 0x1107) 8169 }, 8170 { 8171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8172 0x193d, 0x8460) 8173 }, 8174 { 8175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8176 0x193d, 0x8461) 8177 }, 8178 { 8179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8180 0x193d, 0xc460) 8181 }, 8182 { 8183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8184 0x193d, 0xc461) 8185 }, 8186 { 8187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8188 0x193d, 0xf460) 8189 }, 8190 { 8191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8192 0x193d, 0xf461) 8193 }, 8194 { 8195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8196 0x1bd4, 0x0045) 8197 }, 8198 { 8199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8200 0x1bd4, 0x0046) 8201 }, 8202 { 8203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8204 0x1bd4, 0x0047) 8205 }, 8206 { 8207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8208 0x1bd4, 0x0048) 8209 }, 8210 { 8211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8212 0x1bd4, 0x004a) 8213 }, 8214 { 8215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8216 0x1bd4, 0x004b) 8217 }, 8218 { 8219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8220 0x1bd4, 0x004c) 8221 }, 8222 { 8223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8224 0x1bd4, 0x004f) 8225 }, 8226 { 8227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8228 0x19e5, 0xd227) 8229 }, 8230 { 8231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8232 0x19e5, 0xd228) 8233 }, 8234 { 8235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8236 0x19e5, 0xd229) 8237 }, 8238 { 8239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8240 0x19e5, 0xd22a) 8241 }, 8242 { 8243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8244 0x19e5, 0xd22b) 8245 }, 8246 { 8247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8248 0x19e5, 0xd22c) 8249 }, 8250 { 8251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8252 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 8253 }, 8254 { 8255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8256 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 8257 }, 8258 { 8259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8260 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 8261 }, 8262 { 8263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8264 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 8265 }, 8266 { 8267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8268 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 8269 }, 8270 { 8271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8272 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 8273 }, 8274 { 8275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8276 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 8277 }, 8278 { 8279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8280 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 8281 }, 8282 { 8283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8284 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 8285 }, 8286 { 8287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8288 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 8289 }, 8290 { 8291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8292 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 8293 }, 8294 { 8295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8296 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 8297 }, 8298 { 8299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8300 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 8301 }, 8302 { 8303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8304 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 8305 }, 8306 { 8307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8308 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 8309 }, 8310 { 8311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8312 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 8313 }, 8314 { 8315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8316 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 8317 }, 8318 { 8319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8320 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 8321 }, 8322 { 8323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8324 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 8325 }, 8326 { 8327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8328 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 8329 }, 8330 { 8331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8332 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 8333 }, 8334 { 8335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8336 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 8337 }, 8338 { 8339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8340 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 8341 }, 8342 { 8343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8344 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 8345 }, 8346 { 8347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8348 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 8349 }, 8350 { 8351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8352 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 8353 }, 8354 { 8355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8356 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 8357 }, 8358 { 8359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8360 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 8361 }, 8362 { 8363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8364 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 8365 }, 8366 { 8367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8368 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 8369 }, 8370 { 8371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8372 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 8373 }, 8374 { 8375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8376 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 8377 }, 8378 { 8379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8380 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 8381 }, 8382 { 8383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8384 PCI_VENDOR_ID_ADVANTECH, 0x8312) 8385 }, 8386 { 8387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8388 PCI_VENDOR_ID_DELL, 0x1fe0) 8389 }, 8390 { 8391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8392 PCI_VENDOR_ID_HP, 0x0600) 8393 }, 8394 { 8395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8396 PCI_VENDOR_ID_HP, 0x0601) 8397 }, 8398 { 8399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8400 PCI_VENDOR_ID_HP, 0x0602) 8401 }, 8402 { 8403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8404 PCI_VENDOR_ID_HP, 0x0603) 8405 }, 8406 { 8407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8408 PCI_VENDOR_ID_HP, 0x0609) 8409 }, 8410 { 8411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8412 PCI_VENDOR_ID_HP, 0x0650) 8413 }, 8414 { 8415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8416 PCI_VENDOR_ID_HP, 0x0651) 8417 }, 8418 { 8419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8420 PCI_VENDOR_ID_HP, 0x0652) 8421 }, 8422 { 8423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8424 PCI_VENDOR_ID_HP, 0x0653) 8425 }, 8426 { 8427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8428 PCI_VENDOR_ID_HP, 0x0654) 8429 }, 8430 { 8431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8432 PCI_VENDOR_ID_HP, 0x0655) 8433 }, 8434 { 8435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8436 PCI_VENDOR_ID_HP, 0x0700) 8437 }, 8438 { 8439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8440 PCI_VENDOR_ID_HP, 0x0701) 8441 }, 8442 { 8443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8444 PCI_VENDOR_ID_HP, 0x1001) 8445 }, 8446 { 8447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8448 PCI_VENDOR_ID_HP, 0x1100) 8449 }, 8450 { 8451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8452 PCI_VENDOR_ID_HP, 0x1101) 8453 }, 8454 { 8455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8456 0x1d8d, 0x0800) 8457 }, 8458 { 8459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8460 0x1d8d, 0x0908) 8461 }, 8462 { 8463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8464 0x1d8d, 0x0806) 8465 }, 8466 { 8467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8468 0x1d8d, 0x0916) 8469 }, 8470 { 8471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8472 PCI_VENDOR_ID_GIGABYTE, 0x1000) 8473 }, 8474 { 8475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8476 PCI_ANY_ID, PCI_ANY_ID) 8477 }, 8478 { 0 } 8479 }; 8480 8481 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 8482 8483 static struct pci_driver pqi_pci_driver = { 8484 .name = DRIVER_NAME_SHORT, 8485 .id_table = pqi_pci_id_table, 8486 .probe = pqi_pci_probe, 8487 .remove = pqi_pci_remove, 8488 .shutdown = pqi_shutdown, 8489 #if defined(CONFIG_PM) 8490 .suspend = pqi_suspend, 8491 .resume = pqi_resume, 8492 #endif 8493 }; 8494 8495 static int __init pqi_init(void) 8496 { 8497 int rc; 8498 8499 pr_info(DRIVER_NAME "\n"); 8500 8501 pqi_sas_transport_template = 8502 sas_attach_transport(&pqi_sas_transport_functions); 8503 if (!pqi_sas_transport_template) 8504 return -ENODEV; 8505 8506 pqi_process_module_params(); 8507 8508 rc = pci_register_driver(&pqi_pci_driver); 8509 if (rc) 8510 sas_release_transport(pqi_sas_transport_template); 8511 8512 return rc; 8513 } 8514 8515 static void __exit pqi_cleanup(void) 8516 { 8517 pci_unregister_driver(&pqi_pci_driver); 8518 sas_release_transport(pqi_sas_transport_template); 8519 } 8520 8521 module_init(pqi_init); 8522 module_exit(pqi_cleanup); 8523 8524 static void __attribute__((unused)) verify_structures(void) 8525 { 8526 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8527 sis_host_to_ctrl_doorbell) != 0x20); 8528 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8529 sis_interrupt_mask) != 0x34); 8530 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8531 sis_ctrl_to_host_doorbell) != 0x9c); 8532 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8533 sis_ctrl_to_host_doorbell_clear) != 0xa0); 8534 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8535 sis_driver_scratch) != 0xb0); 8536 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8537 sis_firmware_status) != 0xbc); 8538 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8539 sis_mailbox) != 0x1000); 8540 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8541 pqi_registers) != 0x4000); 8542 8543 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8544 iu_type) != 0x0); 8545 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8546 iu_length) != 0x2); 8547 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8548 response_queue_id) != 0x4); 8549 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8550 work_area) != 0x6); 8551 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 8552 8553 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8554 status) != 0x0); 8555 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8556 service_response) != 0x1); 8557 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8558 data_present) != 0x2); 8559 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8560 reserved) != 0x3); 8561 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8562 residual_count) != 0x4); 8563 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8564 data_length) != 0x8); 8565 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8566 reserved1) != 0xa); 8567 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8568 data) != 0xc); 8569 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 8570 8571 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8572 data_in_result) != 0x0); 8573 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8574 data_out_result) != 0x1); 8575 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8576 reserved) != 0x2); 8577 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8578 status) != 0x5); 8579 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8580 status_qualifier) != 0x6); 8581 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8582 sense_data_length) != 0x8); 8583 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8584 response_data_length) != 0xa); 8585 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8586 data_in_transferred) != 0xc); 8587 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8588 data_out_transferred) != 0x10); 8589 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8590 data) != 0x14); 8591 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 8592 8593 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8594 signature) != 0x0); 8595 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8596 function_and_status_code) != 0x8); 8597 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8598 max_admin_iq_elements) != 0x10); 8599 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8600 max_admin_oq_elements) != 0x11); 8601 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8602 admin_iq_element_length) != 0x12); 8603 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8604 admin_oq_element_length) != 0x13); 8605 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8606 max_reset_timeout) != 0x14); 8607 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8608 legacy_intx_status) != 0x18); 8609 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8610 legacy_intx_mask_set) != 0x1c); 8611 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8612 legacy_intx_mask_clear) != 0x20); 8613 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8614 device_status) != 0x40); 8615 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8616 admin_iq_pi_offset) != 0x48); 8617 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8618 admin_oq_ci_offset) != 0x50); 8619 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8620 admin_iq_element_array_addr) != 0x58); 8621 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8622 admin_oq_element_array_addr) != 0x60); 8623 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8624 admin_iq_ci_addr) != 0x68); 8625 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8626 admin_oq_pi_addr) != 0x70); 8627 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8628 admin_iq_num_elements) != 0x78); 8629 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8630 admin_oq_num_elements) != 0x79); 8631 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8632 admin_queue_int_msg_num) != 0x7a); 8633 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8634 device_error) != 0x80); 8635 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8636 error_details) != 0x88); 8637 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8638 device_reset) != 0x90); 8639 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8640 power_action) != 0x94); 8641 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 8642 8643 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8644 header.iu_type) != 0); 8645 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8646 header.iu_length) != 2); 8647 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8648 header.work_area) != 6); 8649 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8650 request_id) != 8); 8651 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8652 function_code) != 10); 8653 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8654 data.report_device_capability.buffer_length) != 44); 8655 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8656 data.report_device_capability.sg_descriptor) != 48); 8657 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8658 data.create_operational_iq.queue_id) != 12); 8659 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8660 data.create_operational_iq.element_array_addr) != 16); 8661 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8662 data.create_operational_iq.ci_addr) != 24); 8663 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8664 data.create_operational_iq.num_elements) != 32); 8665 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8666 data.create_operational_iq.element_length) != 34); 8667 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8668 data.create_operational_iq.queue_protocol) != 36); 8669 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8670 data.create_operational_oq.queue_id) != 12); 8671 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8672 data.create_operational_oq.element_array_addr) != 16); 8673 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8674 data.create_operational_oq.pi_addr) != 24); 8675 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8676 data.create_operational_oq.num_elements) != 32); 8677 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8678 data.create_operational_oq.element_length) != 34); 8679 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8680 data.create_operational_oq.queue_protocol) != 36); 8681 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8682 data.create_operational_oq.int_msg_num) != 40); 8683 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8684 data.create_operational_oq.coalescing_count) != 42); 8685 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8686 data.create_operational_oq.min_coalescing_time) != 44); 8687 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8688 data.create_operational_oq.max_coalescing_time) != 48); 8689 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8690 data.delete_operational_queue.queue_id) != 12); 8691 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 8692 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8693 data.create_operational_iq) != 64 - 11); 8694 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8695 data.create_operational_oq) != 64 - 11); 8696 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8697 data.delete_operational_queue) != 64 - 11); 8698 8699 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8700 header.iu_type) != 0); 8701 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8702 header.iu_length) != 2); 8703 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8704 header.work_area) != 6); 8705 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8706 request_id) != 8); 8707 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8708 function_code) != 10); 8709 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8710 status) != 11); 8711 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8712 data.create_operational_iq.status_descriptor) != 12); 8713 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8714 data.create_operational_iq.iq_pi_offset) != 16); 8715 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8716 data.create_operational_oq.status_descriptor) != 12); 8717 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8718 data.create_operational_oq.oq_ci_offset) != 16); 8719 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 8720 8721 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8722 header.iu_type) != 0); 8723 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8724 header.iu_length) != 2); 8725 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8726 header.response_queue_id) != 4); 8727 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8728 header.work_area) != 6); 8729 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8730 request_id) != 8); 8731 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8732 nexus_id) != 10); 8733 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8734 buffer_length) != 12); 8735 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8736 lun_number) != 16); 8737 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8738 protocol_specific) != 24); 8739 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8740 error_index) != 27); 8741 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8742 cdb) != 32); 8743 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8744 timeout) != 60); 8745 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8746 sg_descriptors) != 64); 8747 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 8748 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8749 8750 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8751 header.iu_type) != 0); 8752 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8753 header.iu_length) != 2); 8754 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8755 header.response_queue_id) != 4); 8756 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8757 header.work_area) != 6); 8758 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8759 request_id) != 8); 8760 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8761 nexus_id) != 12); 8762 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8763 buffer_length) != 16); 8764 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8765 data_encryption_key_index) != 22); 8766 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8767 encrypt_tweak_lower) != 24); 8768 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8769 encrypt_tweak_upper) != 28); 8770 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8771 cdb) != 32); 8772 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8773 error_index) != 48); 8774 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8775 num_sg_descriptors) != 50); 8776 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8777 cdb_length) != 51); 8778 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8779 lun_number) != 52); 8780 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8781 sg_descriptors) != 64); 8782 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 8783 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8784 8785 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8786 header.iu_type) != 0); 8787 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8788 header.iu_length) != 2); 8789 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8790 request_id) != 8); 8791 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8792 error_index) != 10); 8793 8794 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8795 header.iu_type) != 0); 8796 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8797 header.iu_length) != 2); 8798 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8799 header.response_queue_id) != 4); 8800 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8801 request_id) != 8); 8802 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8803 data.report_event_configuration.buffer_length) != 12); 8804 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8805 data.report_event_configuration.sg_descriptors) != 16); 8806 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8807 data.set_event_configuration.global_event_oq_id) != 10); 8808 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8809 data.set_event_configuration.buffer_length) != 12); 8810 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8811 data.set_event_configuration.sg_descriptors) != 16); 8812 8813 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8814 max_inbound_iu_length) != 6); 8815 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8816 max_outbound_iu_length) != 14); 8817 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 8818 8819 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8820 data_length) != 0); 8821 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8822 iq_arbitration_priority_support_bitmask) != 8); 8823 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8824 maximum_aw_a) != 9); 8825 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8826 maximum_aw_b) != 10); 8827 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8828 maximum_aw_c) != 11); 8829 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8830 max_inbound_queues) != 16); 8831 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8832 max_elements_per_iq) != 18); 8833 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8834 max_iq_element_length) != 24); 8835 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8836 min_iq_element_length) != 26); 8837 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8838 max_outbound_queues) != 30); 8839 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8840 max_elements_per_oq) != 32); 8841 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8842 intr_coalescing_time_granularity) != 34); 8843 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8844 max_oq_element_length) != 36); 8845 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8846 min_oq_element_length) != 38); 8847 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8848 iu_layer_descriptors) != 64); 8849 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 8850 8851 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8852 event_type) != 0); 8853 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8854 oq_id) != 2); 8855 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 8856 8857 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8858 num_event_descriptors) != 2); 8859 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8860 descriptors) != 4); 8861 8862 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 8863 ARRAY_SIZE(pqi_supported_event_types)); 8864 8865 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8866 header.iu_type) != 0); 8867 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8868 header.iu_length) != 2); 8869 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8870 event_type) != 8); 8871 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8872 event_id) != 10); 8873 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8874 additional_event_id) != 12); 8875 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8876 data) != 16); 8877 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 8878 8879 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8880 header.iu_type) != 0); 8881 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8882 header.iu_length) != 2); 8883 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8884 event_type) != 8); 8885 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8886 event_id) != 10); 8887 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8888 additional_event_id) != 12); 8889 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 8890 8891 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8892 header.iu_type) != 0); 8893 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8894 header.iu_length) != 2); 8895 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8896 request_id) != 8); 8897 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8898 nexus_id) != 10); 8899 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8900 timeout) != 14); 8901 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8902 lun_number) != 16); 8903 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8904 protocol_specific) != 24); 8905 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8906 outbound_queue_id_to_manage) != 26); 8907 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8908 request_id_to_manage) != 28); 8909 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8910 task_management_function) != 30); 8911 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 8912 8913 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8914 header.iu_type) != 0); 8915 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8916 header.iu_length) != 2); 8917 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8918 request_id) != 8); 8919 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8920 nexus_id) != 10); 8921 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8922 additional_response_info) != 12); 8923 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8924 response_code) != 15); 8925 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 8926 8927 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8928 configured_logical_drive_count) != 0); 8929 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8930 configuration_signature) != 1); 8931 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8932 firmware_version) != 5); 8933 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8934 extended_logical_unit_count) != 154); 8935 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8936 firmware_build_number) != 190); 8937 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8938 controller_mode) != 292); 8939 8940 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8941 phys_bay_in_box) != 115); 8942 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8943 device_type) != 120); 8944 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8945 redundant_path_present_map) != 1736); 8946 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8947 active_path_number) != 1738); 8948 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8949 alternate_paths_phys_connector) != 1739); 8950 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8951 alternate_paths_phys_box_on_port) != 1755); 8952 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8953 current_queue_depth_limit) != 1796); 8954 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 8955 8956 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 8957 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 8958 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 8959 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8960 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 8961 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8962 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 8963 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 8964 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8965 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 8966 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 8967 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8968 8969 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 8970 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 8971 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 8972 } 8973