1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.1.4-115" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 1 46 #define DRIVER_RELEASE 4 47 #define DRIVER_REVISION 115 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 78 /* for flags argument to pqi_submit_raid_request_synchronous() */ 79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 80 81 static struct scsi_transport_template *pqi_sas_transport_template; 82 83 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 84 85 enum pqi_lockup_action { 86 NONE, 87 REBOOT, 88 PANIC 89 }; 90 91 static enum pqi_lockup_action pqi_lockup_action = NONE; 92 93 static struct { 94 enum pqi_lockup_action action; 95 char *name; 96 } pqi_lockup_actions[] = { 97 { 98 .action = NONE, 99 .name = "none", 100 }, 101 { 102 .action = REBOOT, 103 .name = "reboot", 104 }, 105 { 106 .action = PANIC, 107 .name = "panic", 108 }, 109 }; 110 111 static unsigned int pqi_supported_event_types[] = { 112 PQI_EVENT_TYPE_HOTPLUG, 113 PQI_EVENT_TYPE_HARDWARE, 114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 118 }; 119 120 static int pqi_disable_device_id_wildcards; 121 module_param_named(disable_device_id_wildcards, 122 pqi_disable_device_id_wildcards, int, 0644); 123 MODULE_PARM_DESC(disable_device_id_wildcards, 124 "Disable device ID wildcards."); 125 126 static int pqi_disable_heartbeat; 127 module_param_named(disable_heartbeat, 128 pqi_disable_heartbeat, int, 0644); 129 MODULE_PARM_DESC(disable_heartbeat, 130 "Disable heartbeat."); 131 132 static int pqi_disable_ctrl_shutdown; 133 module_param_named(disable_ctrl_shutdown, 134 pqi_disable_ctrl_shutdown, int, 0644); 135 MODULE_PARM_DESC(disable_ctrl_shutdown, 136 "Disable controller shutdown when controller locked up."); 137 138 static char *pqi_lockup_action_param; 139 module_param_named(lockup_action, 140 pqi_lockup_action_param, charp, 0644); 141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 142 "\t\tSupported: none, reboot, panic\n" 143 "\t\tDefault: none"); 144 145 static char *raid_levels[] = { 146 "RAID-0", 147 "RAID-4", 148 "RAID-1(1+0)", 149 "RAID-5", 150 "RAID-5+1", 151 "RAID-ADG", 152 "RAID-1(ADM)", 153 }; 154 155 static char *pqi_raid_level_to_string(u8 raid_level) 156 { 157 if (raid_level < ARRAY_SIZE(raid_levels)) 158 return raid_levels[raid_level]; 159 160 return "RAID UNKNOWN"; 161 } 162 163 #define SA_RAID_0 0 164 #define SA_RAID_4 1 165 #define SA_RAID_1 2 /* also used for RAID 10 */ 166 #define SA_RAID_5 3 /* also used for RAID 50 */ 167 #define SA_RAID_51 4 168 #define SA_RAID_6 5 /* also used for RAID 60 */ 169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 170 #define SA_RAID_MAX SA_RAID_ADM 171 #define SA_RAID_UNKNOWN 0xff 172 173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 174 { 175 pqi_prep_for_scsi_done(scmd); 176 scmd->scsi_done(scmd); 177 } 178 179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 180 { 181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 182 } 183 184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 185 { 186 void *hostdata = shost_priv(shost); 187 188 return *((struct pqi_ctrl_info **)hostdata); 189 } 190 191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 192 { 193 return !device->is_physical_device; 194 } 195 196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 197 { 198 return scsi3addr[2] != 0; 199 } 200 201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 202 { 203 return !ctrl_info->controller_online; 204 } 205 206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 207 { 208 if (ctrl_info->controller_online) 209 if (!sis_is_firmware_running(ctrl_info)) 210 pqi_take_ctrl_offline(ctrl_info); 211 } 212 213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 214 { 215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 216 } 217 218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 219 struct pqi_ctrl_info *ctrl_info) 220 { 221 return sis_read_driver_scratch(ctrl_info); 222 } 223 224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 225 enum pqi_ctrl_mode mode) 226 { 227 sis_write_driver_scratch(ctrl_info, mode); 228 } 229 230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 231 { 232 ctrl_info->block_requests = true; 233 scsi_block_requests(ctrl_info->scsi_host); 234 } 235 236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 237 { 238 ctrl_info->block_requests = false; 239 wake_up_all(&ctrl_info->block_requests_wait); 240 pqi_retry_raid_bypass_requests(ctrl_info); 241 scsi_unblock_requests(ctrl_info->scsi_host); 242 } 243 244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 245 { 246 return ctrl_info->block_requests; 247 } 248 249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 250 unsigned long timeout_msecs) 251 { 252 unsigned long remaining_msecs; 253 254 if (!pqi_ctrl_blocked(ctrl_info)) 255 return timeout_msecs; 256 257 atomic_inc(&ctrl_info->num_blocked_threads); 258 259 if (timeout_msecs == NO_TIMEOUT) { 260 wait_event(ctrl_info->block_requests_wait, 261 !pqi_ctrl_blocked(ctrl_info)); 262 remaining_msecs = timeout_msecs; 263 } else { 264 unsigned long remaining_jiffies; 265 266 remaining_jiffies = 267 wait_event_timeout(ctrl_info->block_requests_wait, 268 !pqi_ctrl_blocked(ctrl_info), 269 msecs_to_jiffies(timeout_msecs)); 270 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 271 } 272 273 atomic_dec(&ctrl_info->num_blocked_threads); 274 275 return remaining_msecs; 276 } 277 278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 279 { 280 atomic_inc(&ctrl_info->num_busy_threads); 281 } 282 283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 284 { 285 atomic_dec(&ctrl_info->num_busy_threads); 286 } 287 288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 289 { 290 while (atomic_read(&ctrl_info->num_busy_threads) > 291 atomic_read(&ctrl_info->num_blocked_threads)) 292 usleep_range(1000, 2000); 293 } 294 295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 296 { 297 return device->device_offline; 298 } 299 300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 301 { 302 device->in_reset = true; 303 } 304 305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 306 { 307 device->in_reset = false; 308 } 309 310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 311 { 312 return device->in_reset; 313 } 314 315 static inline void pqi_schedule_rescan_worker_with_delay( 316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 317 { 318 if (pqi_ctrl_offline(ctrl_info)) 319 return; 320 321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 322 } 323 324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 325 { 326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 327 } 328 329 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 330 331 static inline void pqi_schedule_rescan_worker_delayed( 332 struct pqi_ctrl_info *ctrl_info) 333 { 334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 335 } 336 337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 338 { 339 cancel_delayed_work_sync(&ctrl_info->rescan_work); 340 } 341 342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 343 { 344 if (!ctrl_info->heartbeat_counter) 345 return 0; 346 347 return readl(ctrl_info->heartbeat_counter); 348 } 349 350 static int pqi_map_single(struct pci_dev *pci_dev, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 352 size_t buffer_length, int data_direction) 353 { 354 dma_addr_t bus_address; 355 356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE) 357 return 0; 358 359 bus_address = pci_map_single(pci_dev, buffer, buffer_length, 360 data_direction); 361 if (pci_dma_mapping_error(pci_dev, bus_address)) 362 return -ENOMEM; 363 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 365 put_unaligned_le32(buffer_length, &sg_descriptor->length); 366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 367 368 return 0; 369 } 370 371 static void pqi_pci_unmap(struct pci_dev *pci_dev, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 373 int data_direction) 374 { 375 int i; 376 377 if (data_direction == PCI_DMA_NONE) 378 return; 379 380 for (i = 0; i < num_descriptors; i++) 381 pci_unmap_single(pci_dev, 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 383 get_unaligned_le32(&descriptors[i].length), 384 data_direction); 385 } 386 387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 388 struct pqi_raid_path_request *request, u8 cmd, 389 u8 *scsi3addr, void *buffer, size_t buffer_length, 390 u16 vpd_page, int *pci_direction) 391 { 392 u8 *cdb; 393 int pci_dir; 394 395 memset(request, 0, sizeof(*request)); 396 397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 398 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 400 &request->header.iu_length); 401 put_unaligned_le32(buffer_length, &request->buffer_length); 402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 405 406 cdb = request->cdb; 407 408 switch (cmd) { 409 case INQUIRY: 410 request->data_direction = SOP_READ_FLAG; 411 cdb[0] = INQUIRY; 412 if (vpd_page & VPD_PAGE) { 413 cdb[1] = 0x1; 414 cdb[2] = (u8)vpd_page; 415 } 416 cdb[4] = (u8)buffer_length; 417 break; 418 case CISS_REPORT_LOG: 419 case CISS_REPORT_PHYS: 420 request->data_direction = SOP_READ_FLAG; 421 cdb[0] = cmd; 422 if (cmd == CISS_REPORT_PHYS) 423 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 424 else 425 cdb[1] = CISS_REPORT_LOG_EXTENDED; 426 put_unaligned_be32(buffer_length, &cdb[6]); 427 break; 428 case CISS_GET_RAID_MAP: 429 request->data_direction = SOP_READ_FLAG; 430 cdb[0] = CISS_READ; 431 cdb[1] = CISS_GET_RAID_MAP; 432 put_unaligned_be32(buffer_length, &cdb[6]); 433 break; 434 case SA_FLUSH_CACHE: 435 request->data_direction = SOP_WRITE_FLAG; 436 cdb[0] = BMIC_WRITE; 437 cdb[6] = BMIC_FLUSH_CACHE; 438 put_unaligned_be16(buffer_length, &cdb[7]); 439 break; 440 case BMIC_IDENTIFY_CONTROLLER: 441 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 442 request->data_direction = SOP_READ_FLAG; 443 cdb[0] = BMIC_READ; 444 cdb[6] = cmd; 445 put_unaligned_be16(buffer_length, &cdb[7]); 446 break; 447 case BMIC_WRITE_HOST_WELLNESS: 448 request->data_direction = SOP_WRITE_FLAG; 449 cdb[0] = BMIC_WRITE; 450 cdb[6] = cmd; 451 put_unaligned_be16(buffer_length, &cdb[7]); 452 break; 453 default: 454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 455 cmd); 456 break; 457 } 458 459 switch (request->data_direction) { 460 case SOP_READ_FLAG: 461 pci_dir = PCI_DMA_FROMDEVICE; 462 break; 463 case SOP_WRITE_FLAG: 464 pci_dir = PCI_DMA_TODEVICE; 465 break; 466 case SOP_NO_DIRECTION_FLAG: 467 pci_dir = PCI_DMA_NONE; 468 break; 469 default: 470 pci_dir = PCI_DMA_BIDIRECTIONAL; 471 break; 472 } 473 474 *pci_direction = pci_dir; 475 476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 477 buffer, buffer_length, pci_dir); 478 } 479 480 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 481 { 482 io_request->scmd = NULL; 483 io_request->status = 0; 484 io_request->error_info = NULL; 485 io_request->raid_bypass = false; 486 } 487 488 static struct pqi_io_request *pqi_alloc_io_request( 489 struct pqi_ctrl_info *ctrl_info) 490 { 491 struct pqi_io_request *io_request; 492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 493 494 while (1) { 495 io_request = &ctrl_info->io_request_pool[i]; 496 if (atomic_inc_return(&io_request->refcount) == 1) 497 break; 498 atomic_dec(&io_request->refcount); 499 i = (i + 1) % ctrl_info->max_io_slots; 500 } 501 502 /* benignly racy */ 503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 504 505 pqi_reinit_io_request(io_request); 506 507 return io_request; 508 } 509 510 static void pqi_free_io_request(struct pqi_io_request *io_request) 511 { 512 atomic_dec(&io_request->refcount); 513 } 514 515 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 516 struct bmic_identify_controller *buffer) 517 { 518 int rc; 519 int pci_direction; 520 struct pqi_raid_path_request request; 521 522 rc = pqi_build_raid_path_request(ctrl_info, &request, 523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 524 sizeof(*buffer), 0, &pci_direction); 525 if (rc) 526 return rc; 527 528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 529 NULL, NO_TIMEOUT); 530 531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 532 pci_direction); 533 534 return rc; 535 } 536 537 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 539 { 540 int rc; 541 int pci_direction; 542 struct pqi_raid_path_request request; 543 544 rc = pqi_build_raid_path_request(ctrl_info, &request, 545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 546 &pci_direction); 547 if (rc) 548 return rc; 549 550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 551 NULL, NO_TIMEOUT); 552 553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 554 pci_direction); 555 556 return rc; 557 } 558 559 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 560 struct pqi_scsi_dev *device, 561 struct bmic_identify_physical_device *buffer, 562 size_t buffer_length) 563 { 564 int rc; 565 int pci_direction; 566 u16 bmic_device_index; 567 struct pqi_raid_path_request request; 568 569 rc = pqi_build_raid_path_request(ctrl_info, &request, 570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 571 buffer_length, 0, &pci_direction); 572 if (rc) 573 return rc; 574 575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 576 request.cdb[2] = (u8)bmic_device_index; 577 request.cdb[9] = (u8)(bmic_device_index >> 8); 578 579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 580 0, NULL, NO_TIMEOUT); 581 582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 583 pci_direction); 584 585 return rc; 586 } 587 588 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 589 enum bmic_flush_cache_shutdown_event shutdown_event) 590 { 591 int rc; 592 struct pqi_raid_path_request request; 593 int pci_direction; 594 struct bmic_flush_cache *flush_cache; 595 596 /* 597 * Don't bother trying to flush the cache if the controller is 598 * locked up. 599 */ 600 if (pqi_ctrl_offline(ctrl_info)) 601 return -ENXIO; 602 603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 604 if (!flush_cache) 605 return -ENOMEM; 606 607 flush_cache->shutdown_event = shutdown_event; 608 609 rc = pqi_build_raid_path_request(ctrl_info, &request, 610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 611 sizeof(*flush_cache), 0, &pci_direction); 612 if (rc) 613 goto out; 614 615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 616 0, NULL, NO_TIMEOUT); 617 618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 619 pci_direction); 620 621 out: 622 kfree(flush_cache); 623 624 return rc; 625 } 626 627 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 628 void *buffer, size_t buffer_length) 629 { 630 int rc; 631 struct pqi_raid_path_request request; 632 int pci_direction; 633 634 rc = pqi_build_raid_path_request(ctrl_info, &request, 635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 636 buffer_length, 0, &pci_direction); 637 if (rc) 638 return rc; 639 640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 641 0, NULL, NO_TIMEOUT); 642 643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 644 pci_direction); 645 646 return rc; 647 } 648 649 #pragma pack(1) 650 651 struct bmic_host_wellness_driver_version { 652 u8 start_tag[4]; 653 u8 driver_version_tag[2]; 654 __le16 driver_version_length; 655 char driver_version[32]; 656 u8 end_tag[2]; 657 }; 658 659 #pragma pack() 660 661 static int pqi_write_driver_version_to_host_wellness( 662 struct pqi_ctrl_info *ctrl_info) 663 { 664 int rc; 665 struct bmic_host_wellness_driver_version *buffer; 666 size_t buffer_length; 667 668 buffer_length = sizeof(*buffer); 669 670 buffer = kmalloc(buffer_length, GFP_KERNEL); 671 if (!buffer) 672 return -ENOMEM; 673 674 buffer->start_tag[0] = '<'; 675 buffer->start_tag[1] = 'H'; 676 buffer->start_tag[2] = 'W'; 677 buffer->start_tag[3] = '>'; 678 buffer->driver_version_tag[0] = 'D'; 679 buffer->driver_version_tag[1] = 'V'; 680 put_unaligned_le16(sizeof(buffer->driver_version), 681 &buffer->driver_version_length); 682 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 683 sizeof(buffer->driver_version) - 1); 684 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 685 buffer->end_tag[0] = 'Z'; 686 buffer->end_tag[1] = 'Z'; 687 688 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 689 690 kfree(buffer); 691 692 return rc; 693 } 694 695 #pragma pack(1) 696 697 struct bmic_host_wellness_time { 698 u8 start_tag[4]; 699 u8 time_tag[2]; 700 __le16 time_length; 701 u8 time[8]; 702 u8 dont_write_tag[2]; 703 u8 end_tag[2]; 704 }; 705 706 #pragma pack() 707 708 static int pqi_write_current_time_to_host_wellness( 709 struct pqi_ctrl_info *ctrl_info) 710 { 711 int rc; 712 struct bmic_host_wellness_time *buffer; 713 size_t buffer_length; 714 time64_t local_time; 715 unsigned int year; 716 struct tm tm; 717 718 buffer_length = sizeof(*buffer); 719 720 buffer = kmalloc(buffer_length, GFP_KERNEL); 721 if (!buffer) 722 return -ENOMEM; 723 724 buffer->start_tag[0] = '<'; 725 buffer->start_tag[1] = 'H'; 726 buffer->start_tag[2] = 'W'; 727 buffer->start_tag[3] = '>'; 728 buffer->time_tag[0] = 'T'; 729 buffer->time_tag[1] = 'D'; 730 put_unaligned_le16(sizeof(buffer->time), 731 &buffer->time_length); 732 733 local_time = ktime_get_real_seconds(); 734 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 735 year = tm.tm_year + 1900; 736 737 buffer->time[0] = bin2bcd(tm.tm_hour); 738 buffer->time[1] = bin2bcd(tm.tm_min); 739 buffer->time[2] = bin2bcd(tm.tm_sec); 740 buffer->time[3] = 0; 741 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 742 buffer->time[5] = bin2bcd(tm.tm_mday); 743 buffer->time[6] = bin2bcd(year / 100); 744 buffer->time[7] = bin2bcd(year % 100); 745 746 buffer->dont_write_tag[0] = 'D'; 747 buffer->dont_write_tag[1] = 'W'; 748 buffer->end_tag[0] = 'Z'; 749 buffer->end_tag[1] = 'Z'; 750 751 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 752 753 kfree(buffer); 754 755 return rc; 756 } 757 758 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 759 760 static void pqi_update_time_worker(struct work_struct *work) 761 { 762 int rc; 763 struct pqi_ctrl_info *ctrl_info; 764 765 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 766 update_time_work); 767 768 if (pqi_ctrl_offline(ctrl_info)) 769 return; 770 771 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 772 if (rc) 773 dev_warn(&ctrl_info->pci_dev->dev, 774 "error updating time on controller\n"); 775 776 schedule_delayed_work(&ctrl_info->update_time_work, 777 PQI_UPDATE_TIME_WORK_INTERVAL); 778 } 779 780 static inline void pqi_schedule_update_time_worker( 781 struct pqi_ctrl_info *ctrl_info) 782 { 783 schedule_delayed_work(&ctrl_info->update_time_work, 0); 784 } 785 786 static inline void pqi_cancel_update_time_worker( 787 struct pqi_ctrl_info *ctrl_info) 788 { 789 cancel_delayed_work_sync(&ctrl_info->update_time_work); 790 } 791 792 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 793 void *buffer, size_t buffer_length) 794 { 795 int rc; 796 int pci_direction; 797 struct pqi_raid_path_request request; 798 799 rc = pqi_build_raid_path_request(ctrl_info, &request, 800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction); 801 if (rc) 802 return rc; 803 804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 805 NULL, NO_TIMEOUT); 806 807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 808 pci_direction); 809 810 return rc; 811 } 812 813 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 814 void **buffer) 815 { 816 int rc; 817 size_t lun_list_length; 818 size_t lun_data_length; 819 size_t new_lun_list_length; 820 void *lun_data = NULL; 821 struct report_lun_header *report_lun_header; 822 823 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 824 if (!report_lun_header) { 825 rc = -ENOMEM; 826 goto out; 827 } 828 829 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 830 sizeof(*report_lun_header)); 831 if (rc) 832 goto out; 833 834 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 835 836 again: 837 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 838 839 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 840 if (!lun_data) { 841 rc = -ENOMEM; 842 goto out; 843 } 844 845 if (lun_list_length == 0) { 846 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 847 goto out; 848 } 849 850 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 851 if (rc) 852 goto out; 853 854 new_lun_list_length = get_unaligned_be32( 855 &((struct report_lun_header *)lun_data)->list_length); 856 857 if (new_lun_list_length > lun_list_length) { 858 lun_list_length = new_lun_list_length; 859 kfree(lun_data); 860 goto again; 861 } 862 863 out: 864 kfree(report_lun_header); 865 866 if (rc) { 867 kfree(lun_data); 868 lun_data = NULL; 869 } 870 871 *buffer = lun_data; 872 873 return rc; 874 } 875 876 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 877 void **buffer) 878 { 879 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 880 buffer); 881 } 882 883 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 884 void **buffer) 885 { 886 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 887 } 888 889 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 890 struct report_phys_lun_extended **physdev_list, 891 struct report_log_lun_extended **logdev_list) 892 { 893 int rc; 894 size_t logdev_list_length; 895 size_t logdev_data_length; 896 struct report_log_lun_extended *internal_logdev_list; 897 struct report_log_lun_extended *logdev_data; 898 struct report_lun_header report_lun_header; 899 900 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 901 if (rc) 902 dev_err(&ctrl_info->pci_dev->dev, 903 "report physical LUNs failed\n"); 904 905 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 906 if (rc) 907 dev_err(&ctrl_info->pci_dev->dev, 908 "report logical LUNs failed\n"); 909 910 /* 911 * Tack the controller itself onto the end of the logical device list. 912 */ 913 914 logdev_data = *logdev_list; 915 916 if (logdev_data) { 917 logdev_list_length = 918 get_unaligned_be32(&logdev_data->header.list_length); 919 } else { 920 memset(&report_lun_header, 0, sizeof(report_lun_header)); 921 logdev_data = 922 (struct report_log_lun_extended *)&report_lun_header; 923 logdev_list_length = 0; 924 } 925 926 logdev_data_length = sizeof(struct report_lun_header) + 927 logdev_list_length; 928 929 internal_logdev_list = kmalloc(logdev_data_length + 930 sizeof(struct report_log_lun_extended), GFP_KERNEL); 931 if (!internal_logdev_list) { 932 kfree(*logdev_list); 933 *logdev_list = NULL; 934 return -ENOMEM; 935 } 936 937 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 938 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 939 sizeof(struct report_log_lun_extended_entry)); 940 put_unaligned_be32(logdev_list_length + 941 sizeof(struct report_log_lun_extended_entry), 942 &internal_logdev_list->header.list_length); 943 944 kfree(*logdev_list); 945 *logdev_list = internal_logdev_list; 946 947 return 0; 948 } 949 950 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 951 int bus, int target, int lun) 952 { 953 device->bus = bus; 954 device->target = target; 955 device->lun = lun; 956 } 957 958 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 959 { 960 u8 *scsi3addr; 961 u32 lunid; 962 int bus; 963 int target; 964 int lun; 965 966 scsi3addr = device->scsi3addr; 967 lunid = get_unaligned_le32(scsi3addr); 968 969 if (pqi_is_hba_lunid(scsi3addr)) { 970 /* The specified device is the controller. */ 971 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 972 device->target_lun_valid = true; 973 return; 974 } 975 976 if (pqi_is_logical_device(device)) { 977 if (device->is_external_raid_device) { 978 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 979 target = (lunid >> 16) & 0x3fff; 980 lun = lunid & 0xff; 981 } else { 982 bus = PQI_RAID_VOLUME_BUS; 983 target = 0; 984 lun = lunid & 0x3fff; 985 } 986 pqi_set_bus_target_lun(device, bus, target, lun); 987 device->target_lun_valid = true; 988 return; 989 } 990 991 /* 992 * Defer target and LUN assignment for non-controller physical devices 993 * because the SAS transport layer will make these assignments later. 994 */ 995 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 996 } 997 998 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 999 struct pqi_scsi_dev *device) 1000 { 1001 int rc; 1002 u8 raid_level; 1003 u8 *buffer; 1004 1005 raid_level = SA_RAID_UNKNOWN; 1006 1007 buffer = kmalloc(64, GFP_KERNEL); 1008 if (buffer) { 1009 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1010 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1011 if (rc == 0) { 1012 raid_level = buffer[8]; 1013 if (raid_level > SA_RAID_MAX) 1014 raid_level = SA_RAID_UNKNOWN; 1015 } 1016 kfree(buffer); 1017 } 1018 1019 device->raid_level = raid_level; 1020 } 1021 1022 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1023 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1024 { 1025 char *err_msg; 1026 u32 raid_map_size; 1027 u32 r5or6_blocks_per_row; 1028 unsigned int num_phys_disks; 1029 unsigned int num_raid_map_entries; 1030 1031 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1032 1033 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1034 err_msg = "RAID map too small"; 1035 goto bad_raid_map; 1036 } 1037 1038 if (raid_map_size > sizeof(*raid_map)) { 1039 err_msg = "RAID map too large"; 1040 goto bad_raid_map; 1041 } 1042 1043 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * 1044 (get_unaligned_le16(&raid_map->data_disks_per_row) + 1045 get_unaligned_le16(&raid_map->metadata_disks_per_row)); 1046 num_raid_map_entries = num_phys_disks * 1047 get_unaligned_le16(&raid_map->row_cnt); 1048 1049 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { 1050 err_msg = "invalid number of map entries in RAID map"; 1051 goto bad_raid_map; 1052 } 1053 1054 if (device->raid_level == SA_RAID_1) { 1055 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1056 err_msg = "invalid RAID-1 map"; 1057 goto bad_raid_map; 1058 } 1059 } else if (device->raid_level == SA_RAID_ADM) { 1060 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1061 err_msg = "invalid RAID-1(ADM) map"; 1062 goto bad_raid_map; 1063 } 1064 } else if ((device->raid_level == SA_RAID_5 || 1065 device->raid_level == SA_RAID_6) && 1066 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1067 /* RAID 50/60 */ 1068 r5or6_blocks_per_row = 1069 get_unaligned_le16(&raid_map->strip_size) * 1070 get_unaligned_le16(&raid_map->data_disks_per_row); 1071 if (r5or6_blocks_per_row == 0) { 1072 err_msg = "invalid RAID-5 or RAID-6 map"; 1073 goto bad_raid_map; 1074 } 1075 } 1076 1077 return 0; 1078 1079 bad_raid_map: 1080 dev_warn(&ctrl_info->pci_dev->dev, 1081 "logical device %08x%08x %s\n", 1082 *((u32 *)&device->scsi3addr), 1083 *((u32 *)&device->scsi3addr[4]), err_msg); 1084 1085 return -EINVAL; 1086 } 1087 1088 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1089 struct pqi_scsi_dev *device) 1090 { 1091 int rc; 1092 int pci_direction; 1093 struct pqi_raid_path_request request; 1094 struct raid_map *raid_map; 1095 1096 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1097 if (!raid_map) 1098 return -ENOMEM; 1099 1100 rc = pqi_build_raid_path_request(ctrl_info, &request, 1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1102 sizeof(*raid_map), 0, &pci_direction); 1103 if (rc) 1104 goto error; 1105 1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1107 NULL, NO_TIMEOUT); 1108 1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 1110 pci_direction); 1111 1112 if (rc) 1113 goto error; 1114 1115 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1116 if (rc) 1117 goto error; 1118 1119 device->raid_map = raid_map; 1120 1121 return 0; 1122 1123 error: 1124 kfree(raid_map); 1125 1126 return rc; 1127 } 1128 1129 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1130 struct pqi_scsi_dev *device) 1131 { 1132 int rc; 1133 u8 *buffer; 1134 u8 bypass_status; 1135 1136 buffer = kmalloc(64, GFP_KERNEL); 1137 if (!buffer) 1138 return; 1139 1140 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1141 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1142 if (rc) 1143 goto out; 1144 1145 #define RAID_BYPASS_STATUS 4 1146 #define RAID_BYPASS_CONFIGURED 0x1 1147 #define RAID_BYPASS_ENABLED 0x2 1148 1149 bypass_status = buffer[RAID_BYPASS_STATUS]; 1150 device->raid_bypass_configured = 1151 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1152 if (device->raid_bypass_configured && 1153 (bypass_status & RAID_BYPASS_ENABLED) && 1154 pqi_get_raid_map(ctrl_info, device) == 0) 1155 device->raid_bypass_enabled = true; 1156 1157 out: 1158 kfree(buffer); 1159 } 1160 1161 /* 1162 * Use vendor-specific VPD to determine online/offline status of a volume. 1163 */ 1164 1165 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1166 struct pqi_scsi_dev *device) 1167 { 1168 int rc; 1169 size_t page_length; 1170 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1171 bool volume_offline = true; 1172 u32 volume_flags; 1173 struct ciss_vpd_logical_volume_status *vpd; 1174 1175 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1176 if (!vpd) 1177 goto no_buffer; 1178 1179 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1180 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1181 if (rc) 1182 goto out; 1183 1184 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1185 volume_status) + vpd->page_length; 1186 if (page_length < sizeof(*vpd)) 1187 goto out; 1188 1189 volume_status = vpd->volume_status; 1190 volume_flags = get_unaligned_be32(&vpd->flags); 1191 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1192 1193 out: 1194 kfree(vpd); 1195 no_buffer: 1196 device->volume_status = volume_status; 1197 device->volume_offline = volume_offline; 1198 } 1199 1200 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1201 struct pqi_scsi_dev *device) 1202 { 1203 int rc; 1204 u8 *buffer; 1205 1206 buffer = kmalloc(64, GFP_KERNEL); 1207 if (!buffer) 1208 return -ENOMEM; 1209 1210 /* Send an inquiry to the device to see what it is. */ 1211 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1212 if (rc) 1213 goto out; 1214 1215 scsi_sanitize_inquiry_string(&buffer[8], 8); 1216 scsi_sanitize_inquiry_string(&buffer[16], 16); 1217 1218 device->devtype = buffer[0] & 0x1f; 1219 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1220 memcpy(device->model, &buffer[16], sizeof(device->model)); 1221 1222 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1223 if (device->is_external_raid_device) { 1224 device->raid_level = SA_RAID_UNKNOWN; 1225 device->volume_status = CISS_LV_OK; 1226 device->volume_offline = false; 1227 } else { 1228 pqi_get_raid_level(ctrl_info, device); 1229 pqi_get_raid_bypass_status(ctrl_info, device); 1230 pqi_get_volume_status(ctrl_info, device); 1231 } 1232 } 1233 1234 out: 1235 kfree(buffer); 1236 1237 return rc; 1238 } 1239 1240 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1241 struct pqi_scsi_dev *device, 1242 struct bmic_identify_physical_device *id_phys) 1243 { 1244 int rc; 1245 1246 memset(id_phys, 0, sizeof(*id_phys)); 1247 1248 rc = pqi_identify_physical_device(ctrl_info, device, 1249 id_phys, sizeof(*id_phys)); 1250 if (rc) { 1251 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1252 return; 1253 } 1254 1255 device->queue_depth = 1256 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1257 device->device_type = id_phys->device_type; 1258 device->active_path_index = id_phys->active_path_number; 1259 device->path_map = id_phys->redundant_path_present_map; 1260 memcpy(&device->box, 1261 &id_phys->alternate_paths_phys_box_on_port, 1262 sizeof(device->box)); 1263 memcpy(&device->phys_connector, 1264 &id_phys->alternate_paths_phys_connector, 1265 sizeof(device->phys_connector)); 1266 device->bay = id_phys->phys_bay_in_box; 1267 } 1268 1269 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1270 struct pqi_scsi_dev *device) 1271 { 1272 char *status; 1273 static const char unknown_state_str[] = 1274 "Volume is in an unknown state (%u)"; 1275 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1276 1277 switch (device->volume_status) { 1278 case CISS_LV_OK: 1279 status = "Volume online"; 1280 break; 1281 case CISS_LV_FAILED: 1282 status = "Volume failed"; 1283 break; 1284 case CISS_LV_NOT_CONFIGURED: 1285 status = "Volume not configured"; 1286 break; 1287 case CISS_LV_DEGRADED: 1288 status = "Volume degraded"; 1289 break; 1290 case CISS_LV_READY_FOR_RECOVERY: 1291 status = "Volume ready for recovery operation"; 1292 break; 1293 case CISS_LV_UNDERGOING_RECOVERY: 1294 status = "Volume undergoing recovery"; 1295 break; 1296 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1297 status = "Wrong physical drive was replaced"; 1298 break; 1299 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1300 status = "A physical drive not properly connected"; 1301 break; 1302 case CISS_LV_HARDWARE_OVERHEATING: 1303 status = "Hardware is overheating"; 1304 break; 1305 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1306 status = "Hardware has overheated"; 1307 break; 1308 case CISS_LV_UNDERGOING_EXPANSION: 1309 status = "Volume undergoing expansion"; 1310 break; 1311 case CISS_LV_NOT_AVAILABLE: 1312 status = "Volume waiting for transforming volume"; 1313 break; 1314 case CISS_LV_QUEUED_FOR_EXPANSION: 1315 status = "Volume queued for expansion"; 1316 break; 1317 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1318 status = "Volume disabled due to SCSI ID conflict"; 1319 break; 1320 case CISS_LV_EJECTED: 1321 status = "Volume has been ejected"; 1322 break; 1323 case CISS_LV_UNDERGOING_ERASE: 1324 status = "Volume undergoing background erase"; 1325 break; 1326 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1327 status = "Volume ready for predictive spare rebuild"; 1328 break; 1329 case CISS_LV_UNDERGOING_RPI: 1330 status = "Volume undergoing rapid parity initialization"; 1331 break; 1332 case CISS_LV_PENDING_RPI: 1333 status = "Volume queued for rapid parity initialization"; 1334 break; 1335 case CISS_LV_ENCRYPTED_NO_KEY: 1336 status = "Encrypted volume inaccessible - key not present"; 1337 break; 1338 case CISS_LV_UNDERGOING_ENCRYPTION: 1339 status = "Volume undergoing encryption process"; 1340 break; 1341 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1342 status = "Volume undergoing encryption re-keying process"; 1343 break; 1344 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1345 status = "Volume encrypted but encryption is disabled"; 1346 break; 1347 case CISS_LV_PENDING_ENCRYPTION: 1348 status = "Volume pending migration to encrypted state"; 1349 break; 1350 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1351 status = "Volume pending encryption rekeying"; 1352 break; 1353 case CISS_LV_NOT_SUPPORTED: 1354 status = "Volume not supported on this controller"; 1355 break; 1356 case CISS_LV_STATUS_UNAVAILABLE: 1357 status = "Volume status not available"; 1358 break; 1359 default: 1360 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1361 unknown_state_str, device->volume_status); 1362 status = unknown_state_buffer; 1363 break; 1364 } 1365 1366 dev_info(&ctrl_info->pci_dev->dev, 1367 "scsi %d:%d:%d:%d %s\n", 1368 ctrl_info->scsi_host->host_no, 1369 device->bus, device->target, device->lun, status); 1370 } 1371 1372 static void pqi_rescan_worker(struct work_struct *work) 1373 { 1374 struct pqi_ctrl_info *ctrl_info; 1375 1376 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1377 rescan_work); 1378 1379 pqi_scan_scsi_devices(ctrl_info); 1380 } 1381 1382 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1383 struct pqi_scsi_dev *device) 1384 { 1385 int rc; 1386 1387 if (pqi_is_logical_device(device)) 1388 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1389 device->target, device->lun); 1390 else 1391 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1392 1393 return rc; 1394 } 1395 1396 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1397 struct pqi_scsi_dev *device) 1398 { 1399 if (pqi_is_logical_device(device)) 1400 scsi_remove_device(device->sdev); 1401 else 1402 pqi_remove_sas_device(device); 1403 } 1404 1405 /* Assumes the SCSI device list lock is held. */ 1406 1407 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1408 int bus, int target, int lun) 1409 { 1410 struct pqi_scsi_dev *device; 1411 1412 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1413 scsi_device_list_entry) 1414 if (device->bus == bus && device->target == target && 1415 device->lun == lun) 1416 return device; 1417 1418 return NULL; 1419 } 1420 1421 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1422 struct pqi_scsi_dev *dev2) 1423 { 1424 if (dev1->is_physical_device != dev2->is_physical_device) 1425 return false; 1426 1427 if (dev1->is_physical_device) 1428 return dev1->wwid == dev2->wwid; 1429 1430 return memcmp(dev1->volume_id, dev2->volume_id, 1431 sizeof(dev1->volume_id)) == 0; 1432 } 1433 1434 enum pqi_find_result { 1435 DEVICE_NOT_FOUND, 1436 DEVICE_CHANGED, 1437 DEVICE_SAME, 1438 }; 1439 1440 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1441 struct pqi_scsi_dev *device_to_find, 1442 struct pqi_scsi_dev **matching_device) 1443 { 1444 struct pqi_scsi_dev *device; 1445 1446 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1447 scsi_device_list_entry) { 1448 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1449 device->scsi3addr)) { 1450 *matching_device = device; 1451 if (pqi_device_equal(device_to_find, device)) { 1452 if (device_to_find->volume_offline) 1453 return DEVICE_CHANGED; 1454 return DEVICE_SAME; 1455 } 1456 return DEVICE_CHANGED; 1457 } 1458 } 1459 1460 return DEVICE_NOT_FOUND; 1461 } 1462 1463 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1464 1465 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1466 char *action, struct pqi_scsi_dev *device) 1467 { 1468 ssize_t count; 1469 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1470 1471 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1472 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1473 1474 if (device->target_lun_valid) 1475 count += snprintf(buffer + count, 1476 PQI_DEV_INFO_BUFFER_LENGTH - count, 1477 "%d:%d", 1478 device->target, 1479 device->lun); 1480 else 1481 count += snprintf(buffer + count, 1482 PQI_DEV_INFO_BUFFER_LENGTH - count, 1483 "-:-"); 1484 1485 if (pqi_is_logical_device(device)) 1486 count += snprintf(buffer + count, 1487 PQI_DEV_INFO_BUFFER_LENGTH - count, 1488 " %08x%08x", 1489 *((u32 *)&device->scsi3addr), 1490 *((u32 *)&device->scsi3addr[4])); 1491 else 1492 count += snprintf(buffer + count, 1493 PQI_DEV_INFO_BUFFER_LENGTH - count, 1494 " %016llx", device->sas_address); 1495 1496 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1497 " %s %.8s %.16s ", 1498 scsi_device_type(device->devtype), 1499 device->vendor, 1500 device->model); 1501 1502 if (pqi_is_logical_device(device)) { 1503 if (device->devtype == TYPE_DISK) 1504 count += snprintf(buffer + count, 1505 PQI_DEV_INFO_BUFFER_LENGTH - count, 1506 "SSDSmartPathCap%c En%c %-12s", 1507 device->raid_bypass_configured ? '+' : '-', 1508 device->raid_bypass_enabled ? '+' : '-', 1509 pqi_raid_level_to_string(device->raid_level)); 1510 } else { 1511 count += snprintf(buffer + count, 1512 PQI_DEV_INFO_BUFFER_LENGTH - count, 1513 "AIO%c", device->aio_enabled ? '+' : '-'); 1514 if (device->devtype == TYPE_DISK || 1515 device->devtype == TYPE_ZBC) 1516 count += snprintf(buffer + count, 1517 PQI_DEV_INFO_BUFFER_LENGTH - count, 1518 " qd=%-6d", device->queue_depth); 1519 } 1520 1521 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1522 } 1523 1524 /* Assumes the SCSI device list lock is held. */ 1525 1526 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1527 struct pqi_scsi_dev *new_device) 1528 { 1529 existing_device->devtype = new_device->devtype; 1530 existing_device->device_type = new_device->device_type; 1531 existing_device->bus = new_device->bus; 1532 if (new_device->target_lun_valid) { 1533 existing_device->target = new_device->target; 1534 existing_device->lun = new_device->lun; 1535 existing_device->target_lun_valid = true; 1536 } 1537 1538 /* By definition, the scsi3addr and wwid fields are already the same. */ 1539 1540 existing_device->is_physical_device = new_device->is_physical_device; 1541 existing_device->is_external_raid_device = 1542 new_device->is_external_raid_device; 1543 existing_device->aio_enabled = new_device->aio_enabled; 1544 memcpy(existing_device->vendor, new_device->vendor, 1545 sizeof(existing_device->vendor)); 1546 memcpy(existing_device->model, new_device->model, 1547 sizeof(existing_device->model)); 1548 existing_device->sas_address = new_device->sas_address; 1549 existing_device->raid_level = new_device->raid_level; 1550 existing_device->queue_depth = new_device->queue_depth; 1551 existing_device->aio_handle = new_device->aio_handle; 1552 existing_device->volume_status = new_device->volume_status; 1553 existing_device->active_path_index = new_device->active_path_index; 1554 existing_device->path_map = new_device->path_map; 1555 existing_device->bay = new_device->bay; 1556 memcpy(existing_device->box, new_device->box, 1557 sizeof(existing_device->box)); 1558 memcpy(existing_device->phys_connector, new_device->phys_connector, 1559 sizeof(existing_device->phys_connector)); 1560 existing_device->offload_to_mirror = 0; 1561 kfree(existing_device->raid_map); 1562 existing_device->raid_map = new_device->raid_map; 1563 existing_device->raid_bypass_configured = 1564 new_device->raid_bypass_configured; 1565 existing_device->raid_bypass_enabled = 1566 new_device->raid_bypass_enabled; 1567 1568 /* To prevent this from being freed later. */ 1569 new_device->raid_map = NULL; 1570 } 1571 1572 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1573 { 1574 if (device) { 1575 kfree(device->raid_map); 1576 kfree(device); 1577 } 1578 } 1579 1580 /* 1581 * Called when exposing a new device to the OS fails in order to re-adjust 1582 * our internal SCSI device list to match the SCSI ML's view. 1583 */ 1584 1585 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1586 struct pqi_scsi_dev *device) 1587 { 1588 unsigned long flags; 1589 1590 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1591 list_del(&device->scsi_device_list_entry); 1592 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1593 1594 /* Allow the device structure to be freed later. */ 1595 device->keep_device = false; 1596 } 1597 1598 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1599 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1600 { 1601 int rc; 1602 unsigned int i; 1603 unsigned long flags; 1604 enum pqi_find_result find_result; 1605 struct pqi_scsi_dev *device; 1606 struct pqi_scsi_dev *next; 1607 struct pqi_scsi_dev *matching_device; 1608 LIST_HEAD(add_list); 1609 LIST_HEAD(delete_list); 1610 1611 /* 1612 * The idea here is to do as little work as possible while holding the 1613 * spinlock. That's why we go to great pains to defer anything other 1614 * than updating the internal device list until after we release the 1615 * spinlock. 1616 */ 1617 1618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1619 1620 /* Assume that all devices in the existing list have gone away. */ 1621 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1622 scsi_device_list_entry) 1623 device->device_gone = true; 1624 1625 for (i = 0; i < num_new_devices; i++) { 1626 device = new_device_list[i]; 1627 1628 find_result = pqi_scsi_find_entry(ctrl_info, device, 1629 &matching_device); 1630 1631 switch (find_result) { 1632 case DEVICE_SAME: 1633 /* 1634 * The newly found device is already in the existing 1635 * device list. 1636 */ 1637 device->new_device = false; 1638 matching_device->device_gone = false; 1639 pqi_scsi_update_device(matching_device, device); 1640 break; 1641 case DEVICE_NOT_FOUND: 1642 /* 1643 * The newly found device is NOT in the existing device 1644 * list. 1645 */ 1646 device->new_device = true; 1647 break; 1648 case DEVICE_CHANGED: 1649 /* 1650 * The original device has gone away and we need to add 1651 * the new device. 1652 */ 1653 device->new_device = true; 1654 break; 1655 } 1656 } 1657 1658 /* Process all devices that have gone away. */ 1659 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1660 scsi_device_list_entry) { 1661 if (device->device_gone) { 1662 list_del(&device->scsi_device_list_entry); 1663 list_add_tail(&device->delete_list_entry, &delete_list); 1664 } 1665 } 1666 1667 /* Process all new devices. */ 1668 for (i = 0; i < num_new_devices; i++) { 1669 device = new_device_list[i]; 1670 if (!device->new_device) 1671 continue; 1672 if (device->volume_offline) 1673 continue; 1674 list_add_tail(&device->scsi_device_list_entry, 1675 &ctrl_info->scsi_device_list); 1676 list_add_tail(&device->add_list_entry, &add_list); 1677 /* To prevent this device structure from being freed later. */ 1678 device->keep_device = true; 1679 } 1680 1681 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1682 1683 /* Remove all devices that have gone away. */ 1684 list_for_each_entry_safe(device, next, &delete_list, 1685 delete_list_entry) { 1686 if (device->volume_offline) { 1687 pqi_dev_info(ctrl_info, "offline", device); 1688 pqi_show_volume_status(ctrl_info, device); 1689 } else { 1690 pqi_dev_info(ctrl_info, "removed", device); 1691 } 1692 if (device->sdev) 1693 pqi_remove_device(ctrl_info, device); 1694 list_del(&device->delete_list_entry); 1695 pqi_free_device(device); 1696 } 1697 1698 /* 1699 * Notify the SCSI ML if the queue depth of any existing device has 1700 * changed. 1701 */ 1702 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1703 scsi_device_list_entry) { 1704 if (device->sdev && device->queue_depth != 1705 device->advertised_queue_depth) { 1706 device->advertised_queue_depth = device->queue_depth; 1707 scsi_change_queue_depth(device->sdev, 1708 device->advertised_queue_depth); 1709 } 1710 } 1711 1712 /* Expose any new devices. */ 1713 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1714 if (!device->sdev) { 1715 pqi_dev_info(ctrl_info, "added", device); 1716 rc = pqi_add_device(ctrl_info, device); 1717 if (rc) { 1718 dev_warn(&ctrl_info->pci_dev->dev, 1719 "scsi %d:%d:%d:%d addition failed, device not added\n", 1720 ctrl_info->scsi_host->host_no, 1721 device->bus, device->target, 1722 device->lun); 1723 pqi_fixup_botched_add(ctrl_info, device); 1724 } 1725 } 1726 } 1727 } 1728 1729 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1730 { 1731 bool is_supported = false; 1732 1733 switch (device->devtype) { 1734 case TYPE_DISK: 1735 case TYPE_ZBC: 1736 case TYPE_TAPE: 1737 case TYPE_MEDIUM_CHANGER: 1738 case TYPE_ENCLOSURE: 1739 is_supported = true; 1740 break; 1741 case TYPE_RAID: 1742 /* 1743 * Only support the HBA controller itself as a RAID 1744 * controller. If it's a RAID controller other than 1745 * the HBA itself (an external RAID controller, for 1746 * example), we don't support it. 1747 */ 1748 if (pqi_is_hba_lunid(device->scsi3addr)) 1749 is_supported = true; 1750 break; 1751 } 1752 1753 return is_supported; 1754 } 1755 1756 static inline bool pqi_skip_device(u8 *scsi3addr) 1757 { 1758 /* Ignore all masked devices. */ 1759 if (MASKED_DEVICE(scsi3addr)) 1760 return true; 1761 1762 return false; 1763 } 1764 1765 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1766 { 1767 int i; 1768 int rc; 1769 LIST_HEAD(new_device_list_head); 1770 struct report_phys_lun_extended *physdev_list = NULL; 1771 struct report_log_lun_extended *logdev_list = NULL; 1772 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1773 struct report_log_lun_extended_entry *log_lun_ext_entry; 1774 struct bmic_identify_physical_device *id_phys = NULL; 1775 u32 num_physicals; 1776 u32 num_logicals; 1777 struct pqi_scsi_dev **new_device_list = NULL; 1778 struct pqi_scsi_dev *device; 1779 struct pqi_scsi_dev *next; 1780 unsigned int num_new_devices; 1781 unsigned int num_valid_devices; 1782 bool is_physical_device; 1783 u8 *scsi3addr; 1784 static char *out_of_memory_msg = 1785 "failed to allocate memory, device discovery stopped"; 1786 1787 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1788 if (rc) 1789 goto out; 1790 1791 if (physdev_list) 1792 num_physicals = 1793 get_unaligned_be32(&physdev_list->header.list_length) 1794 / sizeof(physdev_list->lun_entries[0]); 1795 else 1796 num_physicals = 0; 1797 1798 if (logdev_list) 1799 num_logicals = 1800 get_unaligned_be32(&logdev_list->header.list_length) 1801 / sizeof(logdev_list->lun_entries[0]); 1802 else 1803 num_logicals = 0; 1804 1805 if (num_physicals) { 1806 /* 1807 * We need this buffer for calls to pqi_get_physical_disk_info() 1808 * below. We allocate it here instead of inside 1809 * pqi_get_physical_disk_info() because it's a fairly large 1810 * buffer. 1811 */ 1812 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 1813 if (!id_phys) { 1814 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1815 out_of_memory_msg); 1816 rc = -ENOMEM; 1817 goto out; 1818 } 1819 } 1820 1821 num_new_devices = num_physicals + num_logicals; 1822 1823 new_device_list = kmalloc_array(num_new_devices, 1824 sizeof(*new_device_list), 1825 GFP_KERNEL); 1826 if (!new_device_list) { 1827 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 1828 rc = -ENOMEM; 1829 goto out; 1830 } 1831 1832 for (i = 0; i < num_new_devices; i++) { 1833 device = kzalloc(sizeof(*device), GFP_KERNEL); 1834 if (!device) { 1835 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1836 out_of_memory_msg); 1837 rc = -ENOMEM; 1838 goto out; 1839 } 1840 list_add_tail(&device->new_device_list_entry, 1841 &new_device_list_head); 1842 } 1843 1844 device = NULL; 1845 num_valid_devices = 0; 1846 1847 for (i = 0; i < num_new_devices; i++) { 1848 1849 if (i < num_physicals) { 1850 is_physical_device = true; 1851 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 1852 log_lun_ext_entry = NULL; 1853 scsi3addr = phys_lun_ext_entry->lunid; 1854 } else { 1855 is_physical_device = false; 1856 phys_lun_ext_entry = NULL; 1857 log_lun_ext_entry = 1858 &logdev_list->lun_entries[i - num_physicals]; 1859 scsi3addr = log_lun_ext_entry->lunid; 1860 } 1861 1862 if (is_physical_device && pqi_skip_device(scsi3addr)) 1863 continue; 1864 1865 if (device) 1866 device = list_next_entry(device, new_device_list_entry); 1867 else 1868 device = list_first_entry(&new_device_list_head, 1869 struct pqi_scsi_dev, new_device_list_entry); 1870 1871 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1872 device->is_physical_device = is_physical_device; 1873 if (!is_physical_device) 1874 device->is_external_raid_device = 1875 pqi_is_external_raid_addr(scsi3addr); 1876 1877 /* Gather information about the device. */ 1878 rc = pqi_get_device_info(ctrl_info, device); 1879 if (rc == -ENOMEM) { 1880 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1881 out_of_memory_msg); 1882 goto out; 1883 } 1884 if (rc) { 1885 if (device->is_physical_device) 1886 dev_warn(&ctrl_info->pci_dev->dev, 1887 "obtaining device info failed, skipping physical device %016llx\n", 1888 get_unaligned_be64( 1889 &phys_lun_ext_entry->wwid)); 1890 else 1891 dev_warn(&ctrl_info->pci_dev->dev, 1892 "obtaining device info failed, skipping logical device %08x%08x\n", 1893 *((u32 *)&device->scsi3addr), 1894 *((u32 *)&device->scsi3addr[4])); 1895 rc = 0; 1896 continue; 1897 } 1898 1899 if (!pqi_is_supported_device(device)) 1900 continue; 1901 1902 pqi_assign_bus_target_lun(device); 1903 1904 if (device->is_physical_device) { 1905 device->wwid = phys_lun_ext_entry->wwid; 1906 if ((phys_lun_ext_entry->device_flags & 1907 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 1908 phys_lun_ext_entry->aio_handle) 1909 device->aio_enabled = true; 1910 } else { 1911 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 1912 sizeof(device->volume_id)); 1913 } 1914 1915 switch (device->devtype) { 1916 case TYPE_DISK: 1917 case TYPE_ZBC: 1918 case TYPE_ENCLOSURE: 1919 if (device->is_physical_device) { 1920 device->sas_address = 1921 get_unaligned_be64(&device->wwid); 1922 if (device->devtype == TYPE_DISK || 1923 device->devtype == TYPE_ZBC) { 1924 device->aio_handle = 1925 phys_lun_ext_entry->aio_handle; 1926 pqi_get_physical_disk_info(ctrl_info, 1927 device, id_phys); 1928 } 1929 } 1930 break; 1931 } 1932 1933 new_device_list[num_valid_devices++] = device; 1934 } 1935 1936 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 1937 1938 out: 1939 list_for_each_entry_safe(device, next, &new_device_list_head, 1940 new_device_list_entry) { 1941 if (device->keep_device) 1942 continue; 1943 list_del(&device->new_device_list_entry); 1944 pqi_free_device(device); 1945 } 1946 1947 kfree(new_device_list); 1948 kfree(physdev_list); 1949 kfree(logdev_list); 1950 kfree(id_phys); 1951 1952 return rc; 1953 } 1954 1955 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1956 { 1957 unsigned long flags; 1958 struct pqi_scsi_dev *device; 1959 1960 while (1) { 1961 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1962 1963 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 1964 struct pqi_scsi_dev, scsi_device_list_entry); 1965 if (device) 1966 list_del(&device->scsi_device_list_entry); 1967 1968 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 1969 flags); 1970 1971 if (!device) 1972 break; 1973 1974 if (device->sdev) 1975 pqi_remove_device(ctrl_info, device); 1976 pqi_free_device(device); 1977 } 1978 } 1979 1980 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1981 { 1982 int rc; 1983 1984 if (pqi_ctrl_offline(ctrl_info)) 1985 return -ENXIO; 1986 1987 mutex_lock(&ctrl_info->scan_mutex); 1988 1989 rc = pqi_update_scsi_devices(ctrl_info); 1990 if (rc) 1991 pqi_schedule_rescan_worker_delayed(ctrl_info); 1992 1993 mutex_unlock(&ctrl_info->scan_mutex); 1994 1995 return rc; 1996 } 1997 1998 static void pqi_scan_start(struct Scsi_Host *shost) 1999 { 2000 pqi_scan_scsi_devices(shost_to_hba(shost)); 2001 } 2002 2003 /* Returns TRUE if scan is finished. */ 2004 2005 static int pqi_scan_finished(struct Scsi_Host *shost, 2006 unsigned long elapsed_time) 2007 { 2008 struct pqi_ctrl_info *ctrl_info; 2009 2010 ctrl_info = shost_priv(shost); 2011 2012 return !mutex_is_locked(&ctrl_info->scan_mutex); 2013 } 2014 2015 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2016 { 2017 mutex_lock(&ctrl_info->scan_mutex); 2018 mutex_unlock(&ctrl_info->scan_mutex); 2019 } 2020 2021 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2022 { 2023 mutex_lock(&ctrl_info->lun_reset_mutex); 2024 mutex_unlock(&ctrl_info->lun_reset_mutex); 2025 } 2026 2027 static inline void pqi_set_encryption_info( 2028 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2029 u64 first_block) 2030 { 2031 u32 volume_blk_size; 2032 2033 /* 2034 * Set the encryption tweak values based on logical block address. 2035 * If the block size is 512, the tweak value is equal to the LBA. 2036 * For other block sizes, tweak value is (LBA * block size) / 512. 2037 */ 2038 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2039 if (volume_blk_size != 512) 2040 first_block = (first_block * volume_blk_size) / 512; 2041 2042 encryption_info->data_encryption_key_index = 2043 get_unaligned_le16(&raid_map->data_encryption_key_index); 2044 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2045 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2046 } 2047 2048 /* 2049 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2050 */ 2051 2052 #define PQI_RAID_BYPASS_INELIGIBLE 1 2053 2054 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2055 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2056 struct pqi_queue_group *queue_group) 2057 { 2058 struct raid_map *raid_map; 2059 bool is_write = false; 2060 u32 map_index; 2061 u64 first_block; 2062 u64 last_block; 2063 u32 block_cnt; 2064 u32 blocks_per_row; 2065 u64 first_row; 2066 u64 last_row; 2067 u32 first_row_offset; 2068 u32 last_row_offset; 2069 u32 first_column; 2070 u32 last_column; 2071 u64 r0_first_row; 2072 u64 r0_last_row; 2073 u32 r5or6_blocks_per_row; 2074 u64 r5or6_first_row; 2075 u64 r5or6_last_row; 2076 u32 r5or6_first_row_offset; 2077 u32 r5or6_last_row_offset; 2078 u32 r5or6_first_column; 2079 u32 r5or6_last_column; 2080 u16 data_disks_per_row; 2081 u32 total_disks_per_row; 2082 u16 layout_map_count; 2083 u32 stripesize; 2084 u16 strip_size; 2085 u32 first_group; 2086 u32 last_group; 2087 u32 current_group; 2088 u32 map_row; 2089 u32 aio_handle; 2090 u64 disk_block; 2091 u32 disk_block_cnt; 2092 u8 cdb[16]; 2093 u8 cdb_length; 2094 int offload_to_mirror; 2095 struct pqi_encryption_info *encryption_info_ptr; 2096 struct pqi_encryption_info encryption_info; 2097 #if BITS_PER_LONG == 32 2098 u64 tmpdiv; 2099 #endif 2100 2101 /* Check for valid opcode, get LBA and block count. */ 2102 switch (scmd->cmnd[0]) { 2103 case WRITE_6: 2104 is_write = true; 2105 /* fall through */ 2106 case READ_6: 2107 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2108 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2109 block_cnt = (u32)scmd->cmnd[4]; 2110 if (block_cnt == 0) 2111 block_cnt = 256; 2112 break; 2113 case WRITE_10: 2114 is_write = true; 2115 /* fall through */ 2116 case READ_10: 2117 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2118 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2119 break; 2120 case WRITE_12: 2121 is_write = true; 2122 /* fall through */ 2123 case READ_12: 2124 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2125 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2126 break; 2127 case WRITE_16: 2128 is_write = true; 2129 /* fall through */ 2130 case READ_16: 2131 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2132 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2133 break; 2134 default: 2135 /* Process via normal I/O path. */ 2136 return PQI_RAID_BYPASS_INELIGIBLE; 2137 } 2138 2139 /* Check for write to non-RAID-0. */ 2140 if (is_write && device->raid_level != SA_RAID_0) 2141 return PQI_RAID_BYPASS_INELIGIBLE; 2142 2143 if (unlikely(block_cnt == 0)) 2144 return PQI_RAID_BYPASS_INELIGIBLE; 2145 2146 last_block = first_block + block_cnt - 1; 2147 raid_map = device->raid_map; 2148 2149 /* Check for invalid block or wraparound. */ 2150 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2151 last_block < first_block) 2152 return PQI_RAID_BYPASS_INELIGIBLE; 2153 2154 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2155 strip_size = get_unaligned_le16(&raid_map->strip_size); 2156 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2157 2158 /* Calculate stripe information for the request. */ 2159 blocks_per_row = data_disks_per_row * strip_size; 2160 #if BITS_PER_LONG == 32 2161 tmpdiv = first_block; 2162 do_div(tmpdiv, blocks_per_row); 2163 first_row = tmpdiv; 2164 tmpdiv = last_block; 2165 do_div(tmpdiv, blocks_per_row); 2166 last_row = tmpdiv; 2167 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2168 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2169 tmpdiv = first_row_offset; 2170 do_div(tmpdiv, strip_size); 2171 first_column = tmpdiv; 2172 tmpdiv = last_row_offset; 2173 do_div(tmpdiv, strip_size); 2174 last_column = tmpdiv; 2175 #else 2176 first_row = first_block / blocks_per_row; 2177 last_row = last_block / blocks_per_row; 2178 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2179 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2180 first_column = first_row_offset / strip_size; 2181 last_column = last_row_offset / strip_size; 2182 #endif 2183 2184 /* If this isn't a single row/column then give to the controller. */ 2185 if (first_row != last_row || first_column != last_column) 2186 return PQI_RAID_BYPASS_INELIGIBLE; 2187 2188 /* Proceeding with driver mapping. */ 2189 total_disks_per_row = data_disks_per_row + 2190 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2191 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2192 get_unaligned_le16(&raid_map->row_cnt); 2193 map_index = (map_row * total_disks_per_row) + first_column; 2194 2195 /* RAID 1 */ 2196 if (device->raid_level == SA_RAID_1) { 2197 if (device->offload_to_mirror) 2198 map_index += data_disks_per_row; 2199 device->offload_to_mirror = !device->offload_to_mirror; 2200 } else if (device->raid_level == SA_RAID_ADM) { 2201 /* RAID ADM */ 2202 /* 2203 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2204 * divisible by 3. 2205 */ 2206 offload_to_mirror = device->offload_to_mirror; 2207 if (offload_to_mirror == 0) { 2208 /* use physical disk in the first mirrored group. */ 2209 map_index %= data_disks_per_row; 2210 } else { 2211 do { 2212 /* 2213 * Determine mirror group that map_index 2214 * indicates. 2215 */ 2216 current_group = map_index / data_disks_per_row; 2217 2218 if (offload_to_mirror != current_group) { 2219 if (current_group < 2220 layout_map_count - 1) { 2221 /* 2222 * Select raid index from 2223 * next group. 2224 */ 2225 map_index += data_disks_per_row; 2226 current_group++; 2227 } else { 2228 /* 2229 * Select raid index from first 2230 * group. 2231 */ 2232 map_index %= data_disks_per_row; 2233 current_group = 0; 2234 } 2235 } 2236 } while (offload_to_mirror != current_group); 2237 } 2238 2239 /* Set mirror group to use next time. */ 2240 offload_to_mirror = 2241 (offload_to_mirror >= layout_map_count - 1) ? 2242 0 : offload_to_mirror + 1; 2243 WARN_ON(offload_to_mirror >= layout_map_count); 2244 device->offload_to_mirror = offload_to_mirror; 2245 /* 2246 * Avoid direct use of device->offload_to_mirror within this 2247 * function since multiple threads might simultaneously 2248 * increment it beyond the range of device->layout_map_count -1. 2249 */ 2250 } else if ((device->raid_level == SA_RAID_5 || 2251 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2252 /* RAID 50/60 */ 2253 /* Verify first and last block are in same RAID group */ 2254 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2255 stripesize = r5or6_blocks_per_row * layout_map_count; 2256 #if BITS_PER_LONG == 32 2257 tmpdiv = first_block; 2258 first_group = do_div(tmpdiv, stripesize); 2259 tmpdiv = first_group; 2260 do_div(tmpdiv, r5or6_blocks_per_row); 2261 first_group = tmpdiv; 2262 tmpdiv = last_block; 2263 last_group = do_div(tmpdiv, stripesize); 2264 tmpdiv = last_group; 2265 do_div(tmpdiv, r5or6_blocks_per_row); 2266 last_group = tmpdiv; 2267 #else 2268 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2269 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2270 #endif 2271 if (first_group != last_group) 2272 return PQI_RAID_BYPASS_INELIGIBLE; 2273 2274 /* Verify request is in a single row of RAID 5/6 */ 2275 #if BITS_PER_LONG == 32 2276 tmpdiv = first_block; 2277 do_div(tmpdiv, stripesize); 2278 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2279 tmpdiv = last_block; 2280 do_div(tmpdiv, stripesize); 2281 r5or6_last_row = r0_last_row = tmpdiv; 2282 #else 2283 first_row = r5or6_first_row = r0_first_row = 2284 first_block / stripesize; 2285 r5or6_last_row = r0_last_row = last_block / stripesize; 2286 #endif 2287 if (r5or6_first_row != r5or6_last_row) 2288 return PQI_RAID_BYPASS_INELIGIBLE; 2289 2290 /* Verify request is in a single column */ 2291 #if BITS_PER_LONG == 32 2292 tmpdiv = first_block; 2293 first_row_offset = do_div(tmpdiv, stripesize); 2294 tmpdiv = first_row_offset; 2295 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2296 r5or6_first_row_offset = first_row_offset; 2297 tmpdiv = last_block; 2298 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2299 tmpdiv = r5or6_last_row_offset; 2300 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2301 tmpdiv = r5or6_first_row_offset; 2302 do_div(tmpdiv, strip_size); 2303 first_column = r5or6_first_column = tmpdiv; 2304 tmpdiv = r5or6_last_row_offset; 2305 do_div(tmpdiv, strip_size); 2306 r5or6_last_column = tmpdiv; 2307 #else 2308 first_row_offset = r5or6_first_row_offset = 2309 (u32)((first_block % stripesize) % 2310 r5or6_blocks_per_row); 2311 2312 r5or6_last_row_offset = 2313 (u32)((last_block % stripesize) % 2314 r5or6_blocks_per_row); 2315 2316 first_column = r5or6_first_row_offset / strip_size; 2317 r5or6_first_column = first_column; 2318 r5or6_last_column = r5or6_last_row_offset / strip_size; 2319 #endif 2320 if (r5or6_first_column != r5or6_last_column) 2321 return PQI_RAID_BYPASS_INELIGIBLE; 2322 2323 /* Request is eligible */ 2324 map_row = 2325 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2326 get_unaligned_le16(&raid_map->row_cnt); 2327 2328 map_index = (first_group * 2329 (get_unaligned_le16(&raid_map->row_cnt) * 2330 total_disks_per_row)) + 2331 (map_row * total_disks_per_row) + first_column; 2332 } 2333 2334 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 2335 return PQI_RAID_BYPASS_INELIGIBLE; 2336 2337 aio_handle = raid_map->disk_data[map_index].aio_handle; 2338 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2339 first_row * strip_size + 2340 (first_row_offset - first_column * strip_size); 2341 disk_block_cnt = block_cnt; 2342 2343 /* Handle differing logical/physical block sizes. */ 2344 if (raid_map->phys_blk_shift) { 2345 disk_block <<= raid_map->phys_blk_shift; 2346 disk_block_cnt <<= raid_map->phys_blk_shift; 2347 } 2348 2349 if (unlikely(disk_block_cnt > 0xffff)) 2350 return PQI_RAID_BYPASS_INELIGIBLE; 2351 2352 /* Build the new CDB for the physical disk I/O. */ 2353 if (disk_block > 0xffffffff) { 2354 cdb[0] = is_write ? WRITE_16 : READ_16; 2355 cdb[1] = 0; 2356 put_unaligned_be64(disk_block, &cdb[2]); 2357 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2358 cdb[14] = 0; 2359 cdb[15] = 0; 2360 cdb_length = 16; 2361 } else { 2362 cdb[0] = is_write ? WRITE_10 : READ_10; 2363 cdb[1] = 0; 2364 put_unaligned_be32((u32)disk_block, &cdb[2]); 2365 cdb[6] = 0; 2366 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2367 cdb[9] = 0; 2368 cdb_length = 10; 2369 } 2370 2371 if (get_unaligned_le16(&raid_map->flags) & 2372 RAID_MAP_ENCRYPTION_ENABLED) { 2373 pqi_set_encryption_info(&encryption_info, raid_map, 2374 first_block); 2375 encryption_info_ptr = &encryption_info; 2376 } else { 2377 encryption_info_ptr = NULL; 2378 } 2379 2380 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2381 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2382 } 2383 2384 #define PQI_STATUS_IDLE 0x0 2385 2386 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2387 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2388 2389 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2390 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2391 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2392 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2393 #define PQI_DEVICE_STATE_ERROR 0x4 2394 2395 #define PQI_MODE_READY_TIMEOUT_SECS 30 2396 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2397 2398 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2399 { 2400 struct pqi_device_registers __iomem *pqi_registers; 2401 unsigned long timeout; 2402 u64 signature; 2403 u8 status; 2404 2405 pqi_registers = ctrl_info->pqi_registers; 2406 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2407 2408 while (1) { 2409 signature = readq(&pqi_registers->signature); 2410 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2411 sizeof(signature)) == 0) 2412 break; 2413 if (time_after(jiffies, timeout)) { 2414 dev_err(&ctrl_info->pci_dev->dev, 2415 "timed out waiting for PQI signature\n"); 2416 return -ETIMEDOUT; 2417 } 2418 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2419 } 2420 2421 while (1) { 2422 status = readb(&pqi_registers->function_and_status_code); 2423 if (status == PQI_STATUS_IDLE) 2424 break; 2425 if (time_after(jiffies, timeout)) { 2426 dev_err(&ctrl_info->pci_dev->dev, 2427 "timed out waiting for PQI IDLE\n"); 2428 return -ETIMEDOUT; 2429 } 2430 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2431 } 2432 2433 while (1) { 2434 if (readl(&pqi_registers->device_status) == 2435 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2436 break; 2437 if (time_after(jiffies, timeout)) { 2438 dev_err(&ctrl_info->pci_dev->dev, 2439 "timed out waiting for PQI all registers ready\n"); 2440 return -ETIMEDOUT; 2441 } 2442 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2443 } 2444 2445 return 0; 2446 } 2447 2448 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2449 { 2450 struct pqi_scsi_dev *device; 2451 2452 device = io_request->scmd->device->hostdata; 2453 device->raid_bypass_enabled = false; 2454 device->aio_enabled = false; 2455 } 2456 2457 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2458 { 2459 struct pqi_ctrl_info *ctrl_info; 2460 struct pqi_scsi_dev *device; 2461 2462 device = sdev->hostdata; 2463 if (device->device_offline) 2464 return; 2465 2466 device->device_offline = true; 2467 scsi_device_set_state(sdev, SDEV_OFFLINE); 2468 ctrl_info = shost_to_hba(sdev->host); 2469 pqi_schedule_rescan_worker(ctrl_info); 2470 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2471 path, ctrl_info->scsi_host->host_no, device->bus, 2472 device->target, device->lun); 2473 } 2474 2475 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2476 { 2477 u8 scsi_status; 2478 u8 host_byte; 2479 struct scsi_cmnd *scmd; 2480 struct pqi_raid_error_info *error_info; 2481 size_t sense_data_length; 2482 int residual_count; 2483 int xfer_count; 2484 struct scsi_sense_hdr sshdr; 2485 2486 scmd = io_request->scmd; 2487 if (!scmd) 2488 return; 2489 2490 error_info = io_request->error_info; 2491 scsi_status = error_info->status; 2492 host_byte = DID_OK; 2493 2494 switch (error_info->data_out_result) { 2495 case PQI_DATA_IN_OUT_GOOD: 2496 break; 2497 case PQI_DATA_IN_OUT_UNDERFLOW: 2498 xfer_count = 2499 get_unaligned_le32(&error_info->data_out_transferred); 2500 residual_count = scsi_bufflen(scmd) - xfer_count; 2501 scsi_set_resid(scmd, residual_count); 2502 if (xfer_count < scmd->underflow) 2503 host_byte = DID_SOFT_ERROR; 2504 break; 2505 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2506 case PQI_DATA_IN_OUT_ABORTED: 2507 host_byte = DID_ABORT; 2508 break; 2509 case PQI_DATA_IN_OUT_TIMEOUT: 2510 host_byte = DID_TIME_OUT; 2511 break; 2512 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2513 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2514 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2515 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2516 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2517 case PQI_DATA_IN_OUT_ERROR: 2518 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2519 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2520 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2521 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2522 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2523 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2524 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2525 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2526 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2527 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2528 default: 2529 host_byte = DID_ERROR; 2530 break; 2531 } 2532 2533 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2534 if (sense_data_length == 0) 2535 sense_data_length = 2536 get_unaligned_le16(&error_info->response_data_length); 2537 if (sense_data_length) { 2538 if (sense_data_length > sizeof(error_info->data)) 2539 sense_data_length = sizeof(error_info->data); 2540 2541 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2542 scsi_normalize_sense(error_info->data, 2543 sense_data_length, &sshdr) && 2544 sshdr.sense_key == HARDWARE_ERROR && 2545 sshdr.asc == 0x3e && 2546 sshdr.ascq == 0x1) { 2547 pqi_take_device_offline(scmd->device, "RAID"); 2548 host_byte = DID_NO_CONNECT; 2549 } 2550 2551 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2552 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2553 memcpy(scmd->sense_buffer, error_info->data, 2554 sense_data_length); 2555 } 2556 2557 scmd->result = scsi_status; 2558 set_host_byte(scmd, host_byte); 2559 } 2560 2561 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2562 { 2563 u8 scsi_status; 2564 u8 host_byte; 2565 struct scsi_cmnd *scmd; 2566 struct pqi_aio_error_info *error_info; 2567 size_t sense_data_length; 2568 int residual_count; 2569 int xfer_count; 2570 bool device_offline; 2571 2572 scmd = io_request->scmd; 2573 error_info = io_request->error_info; 2574 host_byte = DID_OK; 2575 sense_data_length = 0; 2576 device_offline = false; 2577 2578 switch (error_info->service_response) { 2579 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2580 scsi_status = error_info->status; 2581 break; 2582 case PQI_AIO_SERV_RESPONSE_FAILURE: 2583 switch (error_info->status) { 2584 case PQI_AIO_STATUS_IO_ABORTED: 2585 scsi_status = SAM_STAT_TASK_ABORTED; 2586 break; 2587 case PQI_AIO_STATUS_UNDERRUN: 2588 scsi_status = SAM_STAT_GOOD; 2589 residual_count = get_unaligned_le32( 2590 &error_info->residual_count); 2591 scsi_set_resid(scmd, residual_count); 2592 xfer_count = scsi_bufflen(scmd) - residual_count; 2593 if (xfer_count < scmd->underflow) 2594 host_byte = DID_SOFT_ERROR; 2595 break; 2596 case PQI_AIO_STATUS_OVERRUN: 2597 scsi_status = SAM_STAT_GOOD; 2598 break; 2599 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2600 pqi_aio_path_disabled(io_request); 2601 scsi_status = SAM_STAT_GOOD; 2602 io_request->status = -EAGAIN; 2603 break; 2604 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2605 case PQI_AIO_STATUS_INVALID_DEVICE: 2606 if (!io_request->raid_bypass) { 2607 device_offline = true; 2608 pqi_take_device_offline(scmd->device, "AIO"); 2609 host_byte = DID_NO_CONNECT; 2610 } 2611 scsi_status = SAM_STAT_CHECK_CONDITION; 2612 break; 2613 case PQI_AIO_STATUS_IO_ERROR: 2614 default: 2615 scsi_status = SAM_STAT_CHECK_CONDITION; 2616 break; 2617 } 2618 break; 2619 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2620 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2621 scsi_status = SAM_STAT_GOOD; 2622 break; 2623 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2624 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2625 default: 2626 scsi_status = SAM_STAT_CHECK_CONDITION; 2627 break; 2628 } 2629 2630 if (error_info->data_present) { 2631 sense_data_length = 2632 get_unaligned_le16(&error_info->data_length); 2633 if (sense_data_length) { 2634 if (sense_data_length > sizeof(error_info->data)) 2635 sense_data_length = sizeof(error_info->data); 2636 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2637 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2638 memcpy(scmd->sense_buffer, error_info->data, 2639 sense_data_length); 2640 } 2641 } 2642 2643 if (device_offline && sense_data_length == 0) 2644 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2645 0x3e, 0x1); 2646 2647 scmd->result = scsi_status; 2648 set_host_byte(scmd, host_byte); 2649 } 2650 2651 static void pqi_process_io_error(unsigned int iu_type, 2652 struct pqi_io_request *io_request) 2653 { 2654 switch (iu_type) { 2655 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2656 pqi_process_raid_io_error(io_request); 2657 break; 2658 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2659 pqi_process_aio_io_error(io_request); 2660 break; 2661 } 2662 } 2663 2664 static int pqi_interpret_task_management_response( 2665 struct pqi_task_management_response *response) 2666 { 2667 int rc; 2668 2669 switch (response->response_code) { 2670 case SOP_TMF_COMPLETE: 2671 case SOP_TMF_FUNCTION_SUCCEEDED: 2672 rc = 0; 2673 break; 2674 default: 2675 rc = -EIO; 2676 break; 2677 } 2678 2679 return rc; 2680 } 2681 2682 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2683 struct pqi_queue_group *queue_group) 2684 { 2685 unsigned int num_responses; 2686 pqi_index_t oq_pi; 2687 pqi_index_t oq_ci; 2688 struct pqi_io_request *io_request; 2689 struct pqi_io_response *response; 2690 u16 request_id; 2691 2692 num_responses = 0; 2693 oq_ci = queue_group->oq_ci_copy; 2694 2695 while (1) { 2696 oq_pi = *queue_group->oq_pi; 2697 if (oq_pi == oq_ci) 2698 break; 2699 2700 num_responses++; 2701 response = queue_group->oq_element_array + 2702 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2703 2704 request_id = get_unaligned_le16(&response->request_id); 2705 WARN_ON(request_id >= ctrl_info->max_io_slots); 2706 2707 io_request = &ctrl_info->io_request_pool[request_id]; 2708 WARN_ON(atomic_read(&io_request->refcount) == 0); 2709 2710 switch (response->header.iu_type) { 2711 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2712 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2713 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2714 break; 2715 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2716 io_request->status = 2717 pqi_interpret_task_management_response( 2718 (void *)response); 2719 break; 2720 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2721 pqi_aio_path_disabled(io_request); 2722 io_request->status = -EAGAIN; 2723 break; 2724 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2725 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2726 io_request->error_info = ctrl_info->error_buffer + 2727 (get_unaligned_le16(&response->error_index) * 2728 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2729 pqi_process_io_error(response->header.iu_type, 2730 io_request); 2731 break; 2732 default: 2733 dev_err(&ctrl_info->pci_dev->dev, 2734 "unexpected IU type: 0x%x\n", 2735 response->header.iu_type); 2736 break; 2737 } 2738 2739 io_request->io_complete_callback(io_request, 2740 io_request->context); 2741 2742 /* 2743 * Note that the I/O request structure CANNOT BE TOUCHED after 2744 * returning from the I/O completion callback! 2745 */ 2746 2747 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2748 } 2749 2750 if (num_responses) { 2751 queue_group->oq_ci_copy = oq_ci; 2752 writel(oq_ci, queue_group->oq_ci); 2753 } 2754 2755 return num_responses; 2756 } 2757 2758 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2759 unsigned int ci, unsigned int elements_in_queue) 2760 { 2761 unsigned int num_elements_used; 2762 2763 if (pi >= ci) 2764 num_elements_used = pi - ci; 2765 else 2766 num_elements_used = elements_in_queue - ci + pi; 2767 2768 return elements_in_queue - num_elements_used - 1; 2769 } 2770 2771 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 2772 struct pqi_event_acknowledge_request *iu, size_t iu_length) 2773 { 2774 pqi_index_t iq_pi; 2775 pqi_index_t iq_ci; 2776 unsigned long flags; 2777 void *next_element; 2778 struct pqi_queue_group *queue_group; 2779 2780 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 2781 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 2782 2783 while (1) { 2784 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 2785 2786 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 2787 iq_ci = *queue_group->iq_ci[RAID_PATH]; 2788 2789 if (pqi_num_elements_free(iq_pi, iq_ci, 2790 ctrl_info->num_elements_per_iq)) 2791 break; 2792 2793 spin_unlock_irqrestore( 2794 &queue_group->submit_lock[RAID_PATH], flags); 2795 2796 if (pqi_ctrl_offline(ctrl_info)) 2797 return; 2798 } 2799 2800 next_element = queue_group->iq_element_array[RAID_PATH] + 2801 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 2802 2803 memcpy(next_element, iu, iu_length); 2804 2805 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 2806 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 2807 2808 /* 2809 * This write notifies the controller that an IU is available to be 2810 * processed. 2811 */ 2812 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 2813 2814 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 2815 } 2816 2817 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 2818 struct pqi_event *event) 2819 { 2820 struct pqi_event_acknowledge_request request; 2821 2822 memset(&request, 0, sizeof(request)); 2823 2824 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 2825 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 2826 &request.header.iu_length); 2827 request.event_type = event->event_type; 2828 request.event_id = event->event_id; 2829 request.additional_event_id = event->additional_event_id; 2830 2831 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 2832 } 2833 2834 static void pqi_event_worker(struct work_struct *work) 2835 { 2836 unsigned int i; 2837 struct pqi_ctrl_info *ctrl_info; 2838 struct pqi_event *event; 2839 2840 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 2841 2842 pqi_ctrl_busy(ctrl_info); 2843 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 2844 if (pqi_ctrl_offline(ctrl_info)) 2845 goto out; 2846 2847 pqi_schedule_rescan_worker_delayed(ctrl_info); 2848 2849 event = ctrl_info->events; 2850 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 2851 if (event->pending) { 2852 event->pending = false; 2853 pqi_acknowledge_event(ctrl_info, event); 2854 } 2855 event++; 2856 } 2857 2858 out: 2859 pqi_ctrl_unbusy(ctrl_info); 2860 } 2861 2862 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2863 2864 static void pqi_heartbeat_timer_handler(struct timer_list *t) 2865 { 2866 int num_interrupts; 2867 u32 heartbeat_count; 2868 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 2869 heartbeat_timer); 2870 2871 pqi_check_ctrl_health(ctrl_info); 2872 if (pqi_ctrl_offline(ctrl_info)) 2873 return; 2874 2875 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 2876 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 2877 2878 if (num_interrupts == ctrl_info->previous_num_interrupts) { 2879 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 2880 dev_err(&ctrl_info->pci_dev->dev, 2881 "no heartbeat detected - last heartbeat count: %u\n", 2882 heartbeat_count); 2883 pqi_take_ctrl_offline(ctrl_info); 2884 return; 2885 } 2886 } else { 2887 ctrl_info->previous_num_interrupts = num_interrupts; 2888 } 2889 2890 ctrl_info->previous_heartbeat_count = heartbeat_count; 2891 mod_timer(&ctrl_info->heartbeat_timer, 2892 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 2893 } 2894 2895 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2896 { 2897 if (!ctrl_info->heartbeat_counter) 2898 return; 2899 2900 ctrl_info->previous_num_interrupts = 2901 atomic_read(&ctrl_info->num_interrupts); 2902 ctrl_info->previous_heartbeat_count = 2903 pqi_read_heartbeat_counter(ctrl_info); 2904 2905 ctrl_info->heartbeat_timer.expires = 2906 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2907 add_timer(&ctrl_info->heartbeat_timer); 2908 } 2909 2910 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2911 { 2912 del_timer_sync(&ctrl_info->heartbeat_timer); 2913 } 2914 2915 static inline int pqi_event_type_to_event_index(unsigned int event_type) 2916 { 2917 int index; 2918 2919 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 2920 if (event_type == pqi_supported_event_types[index]) 2921 return index; 2922 2923 return -1; 2924 } 2925 2926 static inline bool pqi_is_supported_event(unsigned int event_type) 2927 { 2928 return pqi_event_type_to_event_index(event_type) != -1; 2929 } 2930 2931 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 2932 { 2933 unsigned int num_events; 2934 pqi_index_t oq_pi; 2935 pqi_index_t oq_ci; 2936 struct pqi_event_queue *event_queue; 2937 struct pqi_event_response *response; 2938 struct pqi_event *event; 2939 int event_index; 2940 2941 event_queue = &ctrl_info->event_queue; 2942 num_events = 0; 2943 oq_ci = event_queue->oq_ci_copy; 2944 2945 while (1) { 2946 oq_pi = *event_queue->oq_pi; 2947 if (oq_pi == oq_ci) 2948 break; 2949 2950 num_events++; 2951 response = event_queue->oq_element_array + 2952 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 2953 2954 event_index = 2955 pqi_event_type_to_event_index(response->event_type); 2956 2957 if (event_index >= 0) { 2958 if (response->request_acknowlege) { 2959 event = &ctrl_info->events[event_index]; 2960 event->pending = true; 2961 event->event_type = response->event_type; 2962 event->event_id = response->event_id; 2963 event->additional_event_id = 2964 response->additional_event_id; 2965 } 2966 } 2967 2968 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 2969 } 2970 2971 if (num_events) { 2972 event_queue->oq_ci_copy = oq_ci; 2973 writel(oq_ci, event_queue->oq_ci); 2974 schedule_work(&ctrl_info->event_work); 2975 } 2976 2977 return num_events; 2978 } 2979 2980 #define PQI_LEGACY_INTX_MASK 0x1 2981 2982 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 2983 bool enable_intx) 2984 { 2985 u32 intx_mask; 2986 struct pqi_device_registers __iomem *pqi_registers; 2987 volatile void __iomem *register_addr; 2988 2989 pqi_registers = ctrl_info->pqi_registers; 2990 2991 if (enable_intx) 2992 register_addr = &pqi_registers->legacy_intx_mask_clear; 2993 else 2994 register_addr = &pqi_registers->legacy_intx_mask_set; 2995 2996 intx_mask = readl(register_addr); 2997 intx_mask |= PQI_LEGACY_INTX_MASK; 2998 writel(intx_mask, register_addr); 2999 } 3000 3001 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3002 enum pqi_irq_mode new_mode) 3003 { 3004 switch (ctrl_info->irq_mode) { 3005 case IRQ_MODE_MSIX: 3006 switch (new_mode) { 3007 case IRQ_MODE_MSIX: 3008 break; 3009 case IRQ_MODE_INTX: 3010 pqi_configure_legacy_intx(ctrl_info, true); 3011 sis_enable_intx(ctrl_info); 3012 break; 3013 case IRQ_MODE_NONE: 3014 break; 3015 } 3016 break; 3017 case IRQ_MODE_INTX: 3018 switch (new_mode) { 3019 case IRQ_MODE_MSIX: 3020 pqi_configure_legacy_intx(ctrl_info, false); 3021 sis_enable_msix(ctrl_info); 3022 break; 3023 case IRQ_MODE_INTX: 3024 break; 3025 case IRQ_MODE_NONE: 3026 pqi_configure_legacy_intx(ctrl_info, false); 3027 break; 3028 } 3029 break; 3030 case IRQ_MODE_NONE: 3031 switch (new_mode) { 3032 case IRQ_MODE_MSIX: 3033 sis_enable_msix(ctrl_info); 3034 break; 3035 case IRQ_MODE_INTX: 3036 pqi_configure_legacy_intx(ctrl_info, true); 3037 sis_enable_intx(ctrl_info); 3038 break; 3039 case IRQ_MODE_NONE: 3040 break; 3041 } 3042 break; 3043 } 3044 3045 ctrl_info->irq_mode = new_mode; 3046 } 3047 3048 #define PQI_LEGACY_INTX_PENDING 0x1 3049 3050 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3051 { 3052 bool valid_irq; 3053 u32 intx_status; 3054 3055 switch (ctrl_info->irq_mode) { 3056 case IRQ_MODE_MSIX: 3057 valid_irq = true; 3058 break; 3059 case IRQ_MODE_INTX: 3060 intx_status = 3061 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3062 if (intx_status & PQI_LEGACY_INTX_PENDING) 3063 valid_irq = true; 3064 else 3065 valid_irq = false; 3066 break; 3067 case IRQ_MODE_NONE: 3068 default: 3069 valid_irq = false; 3070 break; 3071 } 3072 3073 return valid_irq; 3074 } 3075 3076 static irqreturn_t pqi_irq_handler(int irq, void *data) 3077 { 3078 struct pqi_ctrl_info *ctrl_info; 3079 struct pqi_queue_group *queue_group; 3080 unsigned int num_responses_handled; 3081 3082 queue_group = data; 3083 ctrl_info = queue_group->ctrl_info; 3084 3085 if (!pqi_is_valid_irq(ctrl_info)) 3086 return IRQ_NONE; 3087 3088 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3089 3090 if (irq == ctrl_info->event_irq) 3091 num_responses_handled += pqi_process_event_intr(ctrl_info); 3092 3093 if (num_responses_handled) 3094 atomic_inc(&ctrl_info->num_interrupts); 3095 3096 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3097 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3098 3099 return IRQ_HANDLED; 3100 } 3101 3102 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3103 { 3104 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3105 int i; 3106 int rc; 3107 3108 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3109 3110 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3111 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3112 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3113 if (rc) { 3114 dev_err(&pci_dev->dev, 3115 "irq %u init failed with error %d\n", 3116 pci_irq_vector(pci_dev, i), rc); 3117 return rc; 3118 } 3119 ctrl_info->num_msix_vectors_initialized++; 3120 } 3121 3122 return 0; 3123 } 3124 3125 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3126 { 3127 int i; 3128 3129 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3130 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3131 &ctrl_info->queue_groups[i]); 3132 3133 ctrl_info->num_msix_vectors_initialized = 0; 3134 } 3135 3136 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3137 { 3138 int num_vectors_enabled; 3139 3140 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3141 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3142 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3143 if (num_vectors_enabled < 0) { 3144 dev_err(&ctrl_info->pci_dev->dev, 3145 "MSI-X init failed with error %d\n", 3146 num_vectors_enabled); 3147 return num_vectors_enabled; 3148 } 3149 3150 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3151 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3152 return 0; 3153 } 3154 3155 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3156 { 3157 if (ctrl_info->num_msix_vectors_enabled) { 3158 pci_free_irq_vectors(ctrl_info->pci_dev); 3159 ctrl_info->num_msix_vectors_enabled = 0; 3160 } 3161 } 3162 3163 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3164 { 3165 unsigned int i; 3166 size_t alloc_length; 3167 size_t element_array_length_per_iq; 3168 size_t element_array_length_per_oq; 3169 void *element_array; 3170 void *next_queue_index; 3171 void *aligned_pointer; 3172 unsigned int num_inbound_queues; 3173 unsigned int num_outbound_queues; 3174 unsigned int num_queue_indexes; 3175 struct pqi_queue_group *queue_group; 3176 3177 element_array_length_per_iq = 3178 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3179 ctrl_info->num_elements_per_iq; 3180 element_array_length_per_oq = 3181 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3182 ctrl_info->num_elements_per_oq; 3183 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3184 num_outbound_queues = ctrl_info->num_queue_groups; 3185 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3186 3187 aligned_pointer = NULL; 3188 3189 for (i = 0; i < num_inbound_queues; i++) { 3190 aligned_pointer = PTR_ALIGN(aligned_pointer, 3191 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3192 aligned_pointer += element_array_length_per_iq; 3193 } 3194 3195 for (i = 0; i < num_outbound_queues; i++) { 3196 aligned_pointer = PTR_ALIGN(aligned_pointer, 3197 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3198 aligned_pointer += element_array_length_per_oq; 3199 } 3200 3201 aligned_pointer = PTR_ALIGN(aligned_pointer, 3202 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3203 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3204 PQI_EVENT_OQ_ELEMENT_LENGTH; 3205 3206 for (i = 0; i < num_queue_indexes; i++) { 3207 aligned_pointer = PTR_ALIGN(aligned_pointer, 3208 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3209 aligned_pointer += sizeof(pqi_index_t); 3210 } 3211 3212 alloc_length = (size_t)aligned_pointer + 3213 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3214 3215 alloc_length += PQI_EXTRA_SGL_MEMORY; 3216 3217 ctrl_info->queue_memory_base = 3218 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3219 alloc_length, 3220 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3221 3222 if (!ctrl_info->queue_memory_base) 3223 return -ENOMEM; 3224 3225 ctrl_info->queue_memory_length = alloc_length; 3226 3227 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3228 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3229 3230 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3231 queue_group = &ctrl_info->queue_groups[i]; 3232 queue_group->iq_element_array[RAID_PATH] = element_array; 3233 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3234 ctrl_info->queue_memory_base_dma_handle + 3235 (element_array - ctrl_info->queue_memory_base); 3236 element_array += element_array_length_per_iq; 3237 element_array = PTR_ALIGN(element_array, 3238 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3239 queue_group->iq_element_array[AIO_PATH] = element_array; 3240 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3241 ctrl_info->queue_memory_base_dma_handle + 3242 (element_array - ctrl_info->queue_memory_base); 3243 element_array += element_array_length_per_iq; 3244 element_array = PTR_ALIGN(element_array, 3245 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3246 } 3247 3248 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3249 queue_group = &ctrl_info->queue_groups[i]; 3250 queue_group->oq_element_array = element_array; 3251 queue_group->oq_element_array_bus_addr = 3252 ctrl_info->queue_memory_base_dma_handle + 3253 (element_array - ctrl_info->queue_memory_base); 3254 element_array += element_array_length_per_oq; 3255 element_array = PTR_ALIGN(element_array, 3256 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3257 } 3258 3259 ctrl_info->event_queue.oq_element_array = element_array; 3260 ctrl_info->event_queue.oq_element_array_bus_addr = 3261 ctrl_info->queue_memory_base_dma_handle + 3262 (element_array - ctrl_info->queue_memory_base); 3263 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3264 PQI_EVENT_OQ_ELEMENT_LENGTH; 3265 3266 next_queue_index = PTR_ALIGN(element_array, 3267 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3268 3269 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3270 queue_group = &ctrl_info->queue_groups[i]; 3271 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3272 queue_group->iq_ci_bus_addr[RAID_PATH] = 3273 ctrl_info->queue_memory_base_dma_handle + 3274 (next_queue_index - ctrl_info->queue_memory_base); 3275 next_queue_index += sizeof(pqi_index_t); 3276 next_queue_index = PTR_ALIGN(next_queue_index, 3277 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3278 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3279 queue_group->iq_ci_bus_addr[AIO_PATH] = 3280 ctrl_info->queue_memory_base_dma_handle + 3281 (next_queue_index - ctrl_info->queue_memory_base); 3282 next_queue_index += sizeof(pqi_index_t); 3283 next_queue_index = PTR_ALIGN(next_queue_index, 3284 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3285 queue_group->oq_pi = next_queue_index; 3286 queue_group->oq_pi_bus_addr = 3287 ctrl_info->queue_memory_base_dma_handle + 3288 (next_queue_index - ctrl_info->queue_memory_base); 3289 next_queue_index += sizeof(pqi_index_t); 3290 next_queue_index = PTR_ALIGN(next_queue_index, 3291 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3292 } 3293 3294 ctrl_info->event_queue.oq_pi = next_queue_index; 3295 ctrl_info->event_queue.oq_pi_bus_addr = 3296 ctrl_info->queue_memory_base_dma_handle + 3297 (next_queue_index - ctrl_info->queue_memory_base); 3298 3299 return 0; 3300 } 3301 3302 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3303 { 3304 unsigned int i; 3305 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3306 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3307 3308 /* 3309 * Initialize the backpointers to the controller structure in 3310 * each operational queue group structure. 3311 */ 3312 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3313 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3314 3315 /* 3316 * Assign IDs to all operational queues. Note that the IDs 3317 * assigned to operational IQs are independent of the IDs 3318 * assigned to operational OQs. 3319 */ 3320 ctrl_info->event_queue.oq_id = next_oq_id++; 3321 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3322 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3323 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3324 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3325 } 3326 3327 /* 3328 * Assign MSI-X table entry indexes to all queues. Note that the 3329 * interrupt for the event queue is shared with the first queue group. 3330 */ 3331 ctrl_info->event_queue.int_msg_num = 0; 3332 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3333 ctrl_info->queue_groups[i].int_msg_num = i; 3334 3335 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3336 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3337 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3338 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3339 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3340 } 3341 } 3342 3343 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3344 { 3345 size_t alloc_length; 3346 struct pqi_admin_queues_aligned *admin_queues_aligned; 3347 struct pqi_admin_queues *admin_queues; 3348 3349 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3350 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3351 3352 ctrl_info->admin_queue_memory_base = 3353 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3354 alloc_length, 3355 &ctrl_info->admin_queue_memory_base_dma_handle, 3356 GFP_KERNEL); 3357 3358 if (!ctrl_info->admin_queue_memory_base) 3359 return -ENOMEM; 3360 3361 ctrl_info->admin_queue_memory_length = alloc_length; 3362 3363 admin_queues = &ctrl_info->admin_queues; 3364 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3365 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3366 admin_queues->iq_element_array = 3367 &admin_queues_aligned->iq_element_array; 3368 admin_queues->oq_element_array = 3369 &admin_queues_aligned->oq_element_array; 3370 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3371 admin_queues->oq_pi = &admin_queues_aligned->oq_pi; 3372 3373 admin_queues->iq_element_array_bus_addr = 3374 ctrl_info->admin_queue_memory_base_dma_handle + 3375 (admin_queues->iq_element_array - 3376 ctrl_info->admin_queue_memory_base); 3377 admin_queues->oq_element_array_bus_addr = 3378 ctrl_info->admin_queue_memory_base_dma_handle + 3379 (admin_queues->oq_element_array - 3380 ctrl_info->admin_queue_memory_base); 3381 admin_queues->iq_ci_bus_addr = 3382 ctrl_info->admin_queue_memory_base_dma_handle + 3383 ((void *)admin_queues->iq_ci - 3384 ctrl_info->admin_queue_memory_base); 3385 admin_queues->oq_pi_bus_addr = 3386 ctrl_info->admin_queue_memory_base_dma_handle + 3387 ((void *)admin_queues->oq_pi - 3388 ctrl_info->admin_queue_memory_base); 3389 3390 return 0; 3391 } 3392 3393 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3394 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3395 3396 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3397 { 3398 struct pqi_device_registers __iomem *pqi_registers; 3399 struct pqi_admin_queues *admin_queues; 3400 unsigned long timeout; 3401 u8 status; 3402 u32 reg; 3403 3404 pqi_registers = ctrl_info->pqi_registers; 3405 admin_queues = &ctrl_info->admin_queues; 3406 3407 writeq((u64)admin_queues->iq_element_array_bus_addr, 3408 &pqi_registers->admin_iq_element_array_addr); 3409 writeq((u64)admin_queues->oq_element_array_bus_addr, 3410 &pqi_registers->admin_oq_element_array_addr); 3411 writeq((u64)admin_queues->iq_ci_bus_addr, 3412 &pqi_registers->admin_iq_ci_addr); 3413 writeq((u64)admin_queues->oq_pi_bus_addr, 3414 &pqi_registers->admin_oq_pi_addr); 3415 3416 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3417 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3418 (admin_queues->int_msg_num << 16); 3419 writel(reg, &pqi_registers->admin_iq_num_elements); 3420 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3421 &pqi_registers->function_and_status_code); 3422 3423 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3424 while (1) { 3425 status = readb(&pqi_registers->function_and_status_code); 3426 if (status == PQI_STATUS_IDLE) 3427 break; 3428 if (time_after(jiffies, timeout)) 3429 return -ETIMEDOUT; 3430 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3431 } 3432 3433 /* 3434 * The offset registers are not initialized to the correct 3435 * offsets until *after* the create admin queue pair command 3436 * completes successfully. 3437 */ 3438 admin_queues->iq_pi = ctrl_info->iomem_base + 3439 PQI_DEVICE_REGISTERS_OFFSET + 3440 readq(&pqi_registers->admin_iq_pi_offset); 3441 admin_queues->oq_ci = ctrl_info->iomem_base + 3442 PQI_DEVICE_REGISTERS_OFFSET + 3443 readq(&pqi_registers->admin_oq_ci_offset); 3444 3445 return 0; 3446 } 3447 3448 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3449 struct pqi_general_admin_request *request) 3450 { 3451 struct pqi_admin_queues *admin_queues; 3452 void *next_element; 3453 pqi_index_t iq_pi; 3454 3455 admin_queues = &ctrl_info->admin_queues; 3456 iq_pi = admin_queues->iq_pi_copy; 3457 3458 next_element = admin_queues->iq_element_array + 3459 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3460 3461 memcpy(next_element, request, sizeof(*request)); 3462 3463 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3464 admin_queues->iq_pi_copy = iq_pi; 3465 3466 /* 3467 * This write notifies the controller that an IU is available to be 3468 * processed. 3469 */ 3470 writel(iq_pi, admin_queues->iq_pi); 3471 } 3472 3473 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3474 3475 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3476 struct pqi_general_admin_response *response) 3477 { 3478 struct pqi_admin_queues *admin_queues; 3479 pqi_index_t oq_pi; 3480 pqi_index_t oq_ci; 3481 unsigned long timeout; 3482 3483 admin_queues = &ctrl_info->admin_queues; 3484 oq_ci = admin_queues->oq_ci_copy; 3485 3486 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3487 3488 while (1) { 3489 oq_pi = *admin_queues->oq_pi; 3490 if (oq_pi != oq_ci) 3491 break; 3492 if (time_after(jiffies, timeout)) { 3493 dev_err(&ctrl_info->pci_dev->dev, 3494 "timed out waiting for admin response\n"); 3495 return -ETIMEDOUT; 3496 } 3497 if (!sis_is_firmware_running(ctrl_info)) 3498 return -ENXIO; 3499 usleep_range(1000, 2000); 3500 } 3501 3502 memcpy(response, admin_queues->oq_element_array + 3503 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3504 3505 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3506 admin_queues->oq_ci_copy = oq_ci; 3507 writel(oq_ci, admin_queues->oq_ci); 3508 3509 return 0; 3510 } 3511 3512 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3513 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3514 struct pqi_io_request *io_request) 3515 { 3516 struct pqi_io_request *next; 3517 void *next_element; 3518 pqi_index_t iq_pi; 3519 pqi_index_t iq_ci; 3520 size_t iu_length; 3521 unsigned long flags; 3522 unsigned int num_elements_needed; 3523 unsigned int num_elements_to_end_of_queue; 3524 size_t copy_count; 3525 struct pqi_iu_header *request; 3526 3527 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3528 3529 if (io_request) { 3530 io_request->queue_group = queue_group; 3531 list_add_tail(&io_request->request_list_entry, 3532 &queue_group->request_list[path]); 3533 } 3534 3535 iq_pi = queue_group->iq_pi_copy[path]; 3536 3537 list_for_each_entry_safe(io_request, next, 3538 &queue_group->request_list[path], request_list_entry) { 3539 3540 request = io_request->iu; 3541 3542 iu_length = get_unaligned_le16(&request->iu_length) + 3543 PQI_REQUEST_HEADER_LENGTH; 3544 num_elements_needed = 3545 DIV_ROUND_UP(iu_length, 3546 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3547 3548 iq_ci = *queue_group->iq_ci[path]; 3549 3550 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3551 ctrl_info->num_elements_per_iq)) 3552 break; 3553 3554 put_unaligned_le16(queue_group->oq_id, 3555 &request->response_queue_id); 3556 3557 next_element = queue_group->iq_element_array[path] + 3558 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3559 3560 num_elements_to_end_of_queue = 3561 ctrl_info->num_elements_per_iq - iq_pi; 3562 3563 if (num_elements_needed <= num_elements_to_end_of_queue) { 3564 memcpy(next_element, request, iu_length); 3565 } else { 3566 copy_count = num_elements_to_end_of_queue * 3567 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3568 memcpy(next_element, request, copy_count); 3569 memcpy(queue_group->iq_element_array[path], 3570 (u8 *)request + copy_count, 3571 iu_length - copy_count); 3572 } 3573 3574 iq_pi = (iq_pi + num_elements_needed) % 3575 ctrl_info->num_elements_per_iq; 3576 3577 list_del(&io_request->request_list_entry); 3578 } 3579 3580 if (iq_pi != queue_group->iq_pi_copy[path]) { 3581 queue_group->iq_pi_copy[path] = iq_pi; 3582 /* 3583 * This write notifies the controller that one or more IUs are 3584 * available to be processed. 3585 */ 3586 writel(iq_pi, queue_group->iq_pi[path]); 3587 } 3588 3589 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3590 } 3591 3592 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3593 3594 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3595 struct completion *wait) 3596 { 3597 int rc; 3598 3599 while (1) { 3600 if (wait_for_completion_io_timeout(wait, 3601 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3602 rc = 0; 3603 break; 3604 } 3605 3606 pqi_check_ctrl_health(ctrl_info); 3607 if (pqi_ctrl_offline(ctrl_info)) { 3608 rc = -ENXIO; 3609 break; 3610 } 3611 } 3612 3613 return rc; 3614 } 3615 3616 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3617 void *context) 3618 { 3619 struct completion *waiting = context; 3620 3621 complete(waiting); 3622 } 3623 3624 static int pqi_submit_raid_request_synchronous_with_io_request( 3625 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 3626 unsigned long timeout_msecs) 3627 { 3628 int rc = 0; 3629 DECLARE_COMPLETION_ONSTACK(wait); 3630 3631 io_request->io_complete_callback = pqi_raid_synchronous_complete; 3632 io_request->context = &wait; 3633 3634 pqi_start_io(ctrl_info, 3635 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 3636 io_request); 3637 3638 if (timeout_msecs == NO_TIMEOUT) { 3639 pqi_wait_for_completion_io(ctrl_info, &wait); 3640 } else { 3641 if (!wait_for_completion_io_timeout(&wait, 3642 msecs_to_jiffies(timeout_msecs))) { 3643 dev_warn(&ctrl_info->pci_dev->dev, 3644 "command timed out\n"); 3645 rc = -ETIMEDOUT; 3646 } 3647 } 3648 3649 return rc; 3650 } 3651 3652 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 3653 struct pqi_iu_header *request, unsigned int flags, 3654 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 3655 { 3656 int rc; 3657 struct pqi_io_request *io_request; 3658 unsigned long start_jiffies; 3659 unsigned long msecs_blocked; 3660 size_t iu_length; 3661 3662 /* 3663 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 3664 * are mutually exclusive. 3665 */ 3666 3667 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 3668 if (down_interruptible(&ctrl_info->sync_request_sem)) 3669 return -ERESTARTSYS; 3670 } else { 3671 if (timeout_msecs == NO_TIMEOUT) { 3672 down(&ctrl_info->sync_request_sem); 3673 } else { 3674 start_jiffies = jiffies; 3675 if (down_timeout(&ctrl_info->sync_request_sem, 3676 msecs_to_jiffies(timeout_msecs))) 3677 return -ETIMEDOUT; 3678 msecs_blocked = 3679 jiffies_to_msecs(jiffies - start_jiffies); 3680 if (msecs_blocked >= timeout_msecs) 3681 return -ETIMEDOUT; 3682 timeout_msecs -= msecs_blocked; 3683 } 3684 } 3685 3686 pqi_ctrl_busy(ctrl_info); 3687 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 3688 if (timeout_msecs == 0) { 3689 rc = -ETIMEDOUT; 3690 goto out; 3691 } 3692 3693 if (pqi_ctrl_offline(ctrl_info)) { 3694 rc = -ENXIO; 3695 goto out; 3696 } 3697 3698 io_request = pqi_alloc_io_request(ctrl_info); 3699 3700 put_unaligned_le16(io_request->index, 3701 &(((struct pqi_raid_path_request *)request)->request_id)); 3702 3703 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 3704 ((struct pqi_raid_path_request *)request)->error_index = 3705 ((struct pqi_raid_path_request *)request)->request_id; 3706 3707 iu_length = get_unaligned_le16(&request->iu_length) + 3708 PQI_REQUEST_HEADER_LENGTH; 3709 memcpy(io_request->iu, request, iu_length); 3710 3711 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, 3712 io_request, timeout_msecs); 3713 3714 if (error_info) { 3715 if (io_request->error_info) 3716 memcpy(error_info, io_request->error_info, 3717 sizeof(*error_info)); 3718 else 3719 memset(error_info, 0, sizeof(*error_info)); 3720 } else if (rc == 0 && io_request->error_info) { 3721 u8 scsi_status; 3722 struct pqi_raid_error_info *raid_error_info; 3723 3724 raid_error_info = io_request->error_info; 3725 scsi_status = raid_error_info->status; 3726 3727 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3728 raid_error_info->data_out_result == 3729 PQI_DATA_IN_OUT_UNDERFLOW) 3730 scsi_status = SAM_STAT_GOOD; 3731 3732 if (scsi_status != SAM_STAT_GOOD) 3733 rc = -EIO; 3734 } 3735 3736 pqi_free_io_request(io_request); 3737 3738 out: 3739 pqi_ctrl_unbusy(ctrl_info); 3740 up(&ctrl_info->sync_request_sem); 3741 3742 return rc; 3743 } 3744 3745 static int pqi_validate_admin_response( 3746 struct pqi_general_admin_response *response, u8 expected_function_code) 3747 { 3748 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 3749 return -EINVAL; 3750 3751 if (get_unaligned_le16(&response->header.iu_length) != 3752 PQI_GENERAL_ADMIN_IU_LENGTH) 3753 return -EINVAL; 3754 3755 if (response->function_code != expected_function_code) 3756 return -EINVAL; 3757 3758 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 3759 return -EINVAL; 3760 3761 return 0; 3762 } 3763 3764 static int pqi_submit_admin_request_synchronous( 3765 struct pqi_ctrl_info *ctrl_info, 3766 struct pqi_general_admin_request *request, 3767 struct pqi_general_admin_response *response) 3768 { 3769 int rc; 3770 3771 pqi_submit_admin_request(ctrl_info, request); 3772 3773 rc = pqi_poll_for_admin_response(ctrl_info, response); 3774 3775 if (rc == 0) 3776 rc = pqi_validate_admin_response(response, 3777 request->function_code); 3778 3779 return rc; 3780 } 3781 3782 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 3783 { 3784 int rc; 3785 struct pqi_general_admin_request request; 3786 struct pqi_general_admin_response response; 3787 struct pqi_device_capability *capability; 3788 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 3789 3790 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 3791 if (!capability) 3792 return -ENOMEM; 3793 3794 memset(&request, 0, sizeof(request)); 3795 3796 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3797 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3798 &request.header.iu_length); 3799 request.function_code = 3800 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 3801 put_unaligned_le32(sizeof(*capability), 3802 &request.data.report_device_capability.buffer_length); 3803 3804 rc = pqi_map_single(ctrl_info->pci_dev, 3805 &request.data.report_device_capability.sg_descriptor, 3806 capability, sizeof(*capability), 3807 PCI_DMA_FROMDEVICE); 3808 if (rc) 3809 goto out; 3810 3811 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3812 &response); 3813 3814 pqi_pci_unmap(ctrl_info->pci_dev, 3815 &request.data.report_device_capability.sg_descriptor, 1, 3816 PCI_DMA_FROMDEVICE); 3817 3818 if (rc) 3819 goto out; 3820 3821 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 3822 rc = -EIO; 3823 goto out; 3824 } 3825 3826 ctrl_info->max_inbound_queues = 3827 get_unaligned_le16(&capability->max_inbound_queues); 3828 ctrl_info->max_elements_per_iq = 3829 get_unaligned_le16(&capability->max_elements_per_iq); 3830 ctrl_info->max_iq_element_length = 3831 get_unaligned_le16(&capability->max_iq_element_length) 3832 * 16; 3833 ctrl_info->max_outbound_queues = 3834 get_unaligned_le16(&capability->max_outbound_queues); 3835 ctrl_info->max_elements_per_oq = 3836 get_unaligned_le16(&capability->max_elements_per_oq); 3837 ctrl_info->max_oq_element_length = 3838 get_unaligned_le16(&capability->max_oq_element_length) 3839 * 16; 3840 3841 sop_iu_layer_descriptor = 3842 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 3843 3844 ctrl_info->max_inbound_iu_length_per_firmware = 3845 get_unaligned_le16( 3846 &sop_iu_layer_descriptor->max_inbound_iu_length); 3847 ctrl_info->inbound_spanning_supported = 3848 sop_iu_layer_descriptor->inbound_spanning_supported; 3849 ctrl_info->outbound_spanning_supported = 3850 sop_iu_layer_descriptor->outbound_spanning_supported; 3851 3852 out: 3853 kfree(capability); 3854 3855 return rc; 3856 } 3857 3858 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 3859 { 3860 if (ctrl_info->max_iq_element_length < 3861 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3862 dev_err(&ctrl_info->pci_dev->dev, 3863 "max. inbound queue element length of %d is less than the required length of %d\n", 3864 ctrl_info->max_iq_element_length, 3865 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3866 return -EINVAL; 3867 } 3868 3869 if (ctrl_info->max_oq_element_length < 3870 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 3871 dev_err(&ctrl_info->pci_dev->dev, 3872 "max. outbound queue element length of %d is less than the required length of %d\n", 3873 ctrl_info->max_oq_element_length, 3874 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3875 return -EINVAL; 3876 } 3877 3878 if (ctrl_info->max_inbound_iu_length_per_firmware < 3879 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3880 dev_err(&ctrl_info->pci_dev->dev, 3881 "max. inbound IU length of %u is less than the min. required length of %d\n", 3882 ctrl_info->max_inbound_iu_length_per_firmware, 3883 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3884 return -EINVAL; 3885 } 3886 3887 if (!ctrl_info->inbound_spanning_supported) { 3888 dev_err(&ctrl_info->pci_dev->dev, 3889 "the controller does not support inbound spanning\n"); 3890 return -EINVAL; 3891 } 3892 3893 if (ctrl_info->outbound_spanning_supported) { 3894 dev_err(&ctrl_info->pci_dev->dev, 3895 "the controller supports outbound spanning but this driver does not\n"); 3896 return -EINVAL; 3897 } 3898 3899 return 0; 3900 } 3901 3902 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 3903 { 3904 int rc; 3905 struct pqi_event_queue *event_queue; 3906 struct pqi_general_admin_request request; 3907 struct pqi_general_admin_response response; 3908 3909 event_queue = &ctrl_info->event_queue; 3910 3911 /* 3912 * Create OQ (Outbound Queue - device to host queue) to dedicate 3913 * to events. 3914 */ 3915 memset(&request, 0, sizeof(request)); 3916 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3917 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3918 &request.header.iu_length); 3919 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 3920 put_unaligned_le16(event_queue->oq_id, 3921 &request.data.create_operational_oq.queue_id); 3922 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 3923 &request.data.create_operational_oq.element_array_addr); 3924 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 3925 &request.data.create_operational_oq.pi_addr); 3926 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 3927 &request.data.create_operational_oq.num_elements); 3928 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 3929 &request.data.create_operational_oq.element_length); 3930 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 3931 put_unaligned_le16(event_queue->int_msg_num, 3932 &request.data.create_operational_oq.int_msg_num); 3933 3934 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3935 &response); 3936 if (rc) 3937 return rc; 3938 3939 event_queue->oq_ci = ctrl_info->iomem_base + 3940 PQI_DEVICE_REGISTERS_OFFSET + 3941 get_unaligned_le64( 3942 &response.data.create_operational_oq.oq_ci_offset); 3943 3944 return 0; 3945 } 3946 3947 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 3948 unsigned int group_number) 3949 { 3950 int rc; 3951 struct pqi_queue_group *queue_group; 3952 struct pqi_general_admin_request request; 3953 struct pqi_general_admin_response response; 3954 3955 queue_group = &ctrl_info->queue_groups[group_number]; 3956 3957 /* 3958 * Create IQ (Inbound Queue - host to device queue) for 3959 * RAID path. 3960 */ 3961 memset(&request, 0, sizeof(request)); 3962 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3963 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3964 &request.header.iu_length); 3965 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 3966 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 3967 &request.data.create_operational_iq.queue_id); 3968 put_unaligned_le64( 3969 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 3970 &request.data.create_operational_iq.element_array_addr); 3971 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 3972 &request.data.create_operational_iq.ci_addr); 3973 put_unaligned_le16(ctrl_info->num_elements_per_iq, 3974 &request.data.create_operational_iq.num_elements); 3975 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 3976 &request.data.create_operational_iq.element_length); 3977 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 3978 3979 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3980 &response); 3981 if (rc) { 3982 dev_err(&ctrl_info->pci_dev->dev, 3983 "error creating inbound RAID queue\n"); 3984 return rc; 3985 } 3986 3987 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 3988 PQI_DEVICE_REGISTERS_OFFSET + 3989 get_unaligned_le64( 3990 &response.data.create_operational_iq.iq_pi_offset); 3991 3992 /* 3993 * Create IQ (Inbound Queue - host to device queue) for 3994 * Advanced I/O (AIO) path. 3995 */ 3996 memset(&request, 0, sizeof(request)); 3997 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3998 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3999 &request.header.iu_length); 4000 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4001 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4002 &request.data.create_operational_iq.queue_id); 4003 put_unaligned_le64((u64)queue_group-> 4004 iq_element_array_bus_addr[AIO_PATH], 4005 &request.data.create_operational_iq.element_array_addr); 4006 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4007 &request.data.create_operational_iq.ci_addr); 4008 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4009 &request.data.create_operational_iq.num_elements); 4010 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4011 &request.data.create_operational_iq.element_length); 4012 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4013 4014 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4015 &response); 4016 if (rc) { 4017 dev_err(&ctrl_info->pci_dev->dev, 4018 "error creating inbound AIO queue\n"); 4019 return rc; 4020 } 4021 4022 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4023 PQI_DEVICE_REGISTERS_OFFSET + 4024 get_unaligned_le64( 4025 &response.data.create_operational_iq.iq_pi_offset); 4026 4027 /* 4028 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4029 * assumed to be for RAID path I/O unless we change the queue's 4030 * property. 4031 */ 4032 memset(&request, 0, sizeof(request)); 4033 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4034 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4035 &request.header.iu_length); 4036 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4037 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4038 &request.data.change_operational_iq_properties.queue_id); 4039 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4040 &request.data.change_operational_iq_properties.vendor_specific); 4041 4042 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4043 &response); 4044 if (rc) { 4045 dev_err(&ctrl_info->pci_dev->dev, 4046 "error changing queue property\n"); 4047 return rc; 4048 } 4049 4050 /* 4051 * Create OQ (Outbound Queue - device to host queue). 4052 */ 4053 memset(&request, 0, sizeof(request)); 4054 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4055 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4056 &request.header.iu_length); 4057 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4058 put_unaligned_le16(queue_group->oq_id, 4059 &request.data.create_operational_oq.queue_id); 4060 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4061 &request.data.create_operational_oq.element_array_addr); 4062 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4063 &request.data.create_operational_oq.pi_addr); 4064 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4065 &request.data.create_operational_oq.num_elements); 4066 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4067 &request.data.create_operational_oq.element_length); 4068 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4069 put_unaligned_le16(queue_group->int_msg_num, 4070 &request.data.create_operational_oq.int_msg_num); 4071 4072 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4073 &response); 4074 if (rc) { 4075 dev_err(&ctrl_info->pci_dev->dev, 4076 "error creating outbound queue\n"); 4077 return rc; 4078 } 4079 4080 queue_group->oq_ci = ctrl_info->iomem_base + 4081 PQI_DEVICE_REGISTERS_OFFSET + 4082 get_unaligned_le64( 4083 &response.data.create_operational_oq.oq_ci_offset); 4084 4085 return 0; 4086 } 4087 4088 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4089 { 4090 int rc; 4091 unsigned int i; 4092 4093 rc = pqi_create_event_queue(ctrl_info); 4094 if (rc) { 4095 dev_err(&ctrl_info->pci_dev->dev, 4096 "error creating event queue\n"); 4097 return rc; 4098 } 4099 4100 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4101 rc = pqi_create_queue_group(ctrl_info, i); 4102 if (rc) { 4103 dev_err(&ctrl_info->pci_dev->dev, 4104 "error creating queue group number %u/%u\n", 4105 i, ctrl_info->num_queue_groups); 4106 return rc; 4107 } 4108 } 4109 4110 return 0; 4111 } 4112 4113 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4114 (offsetof(struct pqi_event_config, descriptors) + \ 4115 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4116 4117 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4118 bool enable_events) 4119 { 4120 int rc; 4121 unsigned int i; 4122 struct pqi_event_config *event_config; 4123 struct pqi_event_descriptor *event_descriptor; 4124 struct pqi_general_management_request request; 4125 4126 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4127 GFP_KERNEL); 4128 if (!event_config) 4129 return -ENOMEM; 4130 4131 memset(&request, 0, sizeof(request)); 4132 4133 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4134 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4135 data.report_event_configuration.sg_descriptors[1]) - 4136 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4137 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4138 &request.data.report_event_configuration.buffer_length); 4139 4140 rc = pqi_map_single(ctrl_info->pci_dev, 4141 request.data.report_event_configuration.sg_descriptors, 4142 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4143 PCI_DMA_FROMDEVICE); 4144 if (rc) 4145 goto out; 4146 4147 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4148 0, NULL, NO_TIMEOUT); 4149 4150 pqi_pci_unmap(ctrl_info->pci_dev, 4151 request.data.report_event_configuration.sg_descriptors, 1, 4152 PCI_DMA_FROMDEVICE); 4153 4154 if (rc) 4155 goto out; 4156 4157 for (i = 0; i < event_config->num_event_descriptors; i++) { 4158 event_descriptor = &event_config->descriptors[i]; 4159 if (enable_events && 4160 pqi_is_supported_event(event_descriptor->event_type)) 4161 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4162 &event_descriptor->oq_id); 4163 else 4164 put_unaligned_le16(0, &event_descriptor->oq_id); 4165 } 4166 4167 memset(&request, 0, sizeof(request)); 4168 4169 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4170 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4171 data.report_event_configuration.sg_descriptors[1]) - 4172 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4173 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4174 &request.data.report_event_configuration.buffer_length); 4175 4176 rc = pqi_map_single(ctrl_info->pci_dev, 4177 request.data.report_event_configuration.sg_descriptors, 4178 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4179 PCI_DMA_TODEVICE); 4180 if (rc) 4181 goto out; 4182 4183 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4184 NULL, NO_TIMEOUT); 4185 4186 pqi_pci_unmap(ctrl_info->pci_dev, 4187 request.data.report_event_configuration.sg_descriptors, 1, 4188 PCI_DMA_TODEVICE); 4189 4190 out: 4191 kfree(event_config); 4192 4193 return rc; 4194 } 4195 4196 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4197 { 4198 return pqi_configure_events(ctrl_info, true); 4199 } 4200 4201 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4202 { 4203 return pqi_configure_events(ctrl_info, false); 4204 } 4205 4206 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4207 { 4208 unsigned int i; 4209 struct device *dev; 4210 size_t sg_chain_buffer_length; 4211 struct pqi_io_request *io_request; 4212 4213 if (!ctrl_info->io_request_pool) 4214 return; 4215 4216 dev = &ctrl_info->pci_dev->dev; 4217 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4218 io_request = ctrl_info->io_request_pool; 4219 4220 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4221 kfree(io_request->iu); 4222 if (!io_request->sg_chain_buffer) 4223 break; 4224 dma_free_coherent(dev, sg_chain_buffer_length, 4225 io_request->sg_chain_buffer, 4226 io_request->sg_chain_buffer_dma_handle); 4227 io_request++; 4228 } 4229 4230 kfree(ctrl_info->io_request_pool); 4231 ctrl_info->io_request_pool = NULL; 4232 } 4233 4234 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4235 { 4236 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4237 ctrl_info->error_buffer_length, 4238 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4239 4240 if (!ctrl_info->error_buffer) 4241 return -ENOMEM; 4242 4243 return 0; 4244 } 4245 4246 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4247 { 4248 unsigned int i; 4249 void *sg_chain_buffer; 4250 size_t sg_chain_buffer_length; 4251 dma_addr_t sg_chain_buffer_dma_handle; 4252 struct device *dev; 4253 struct pqi_io_request *io_request; 4254 4255 ctrl_info->io_request_pool = 4256 kcalloc(ctrl_info->max_io_slots, 4257 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4258 4259 if (!ctrl_info->io_request_pool) { 4260 dev_err(&ctrl_info->pci_dev->dev, 4261 "failed to allocate I/O request pool\n"); 4262 goto error; 4263 } 4264 4265 dev = &ctrl_info->pci_dev->dev; 4266 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4267 io_request = ctrl_info->io_request_pool; 4268 4269 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4270 io_request->iu = 4271 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4272 4273 if (!io_request->iu) { 4274 dev_err(&ctrl_info->pci_dev->dev, 4275 "failed to allocate IU buffers\n"); 4276 goto error; 4277 } 4278 4279 sg_chain_buffer = dma_alloc_coherent(dev, 4280 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4281 GFP_KERNEL); 4282 4283 if (!sg_chain_buffer) { 4284 dev_err(&ctrl_info->pci_dev->dev, 4285 "failed to allocate PQI scatter-gather chain buffers\n"); 4286 goto error; 4287 } 4288 4289 io_request->index = i; 4290 io_request->sg_chain_buffer = sg_chain_buffer; 4291 io_request->sg_chain_buffer_dma_handle = 4292 sg_chain_buffer_dma_handle; 4293 io_request++; 4294 } 4295 4296 return 0; 4297 4298 error: 4299 pqi_free_all_io_requests(ctrl_info); 4300 4301 return -ENOMEM; 4302 } 4303 4304 /* 4305 * Calculate required resources that are sized based on max. outstanding 4306 * requests and max. transfer size. 4307 */ 4308 4309 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4310 { 4311 u32 max_transfer_size; 4312 u32 max_sg_entries; 4313 4314 ctrl_info->scsi_ml_can_queue = 4315 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4316 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4317 4318 ctrl_info->error_buffer_length = 4319 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4320 4321 if (reset_devices) 4322 max_transfer_size = min(ctrl_info->max_transfer_size, 4323 PQI_MAX_TRANSFER_SIZE_KDUMP); 4324 else 4325 max_transfer_size = min(ctrl_info->max_transfer_size, 4326 PQI_MAX_TRANSFER_SIZE); 4327 4328 max_sg_entries = max_transfer_size / PAGE_SIZE; 4329 4330 /* +1 to cover when the buffer is not page-aligned. */ 4331 max_sg_entries++; 4332 4333 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4334 4335 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4336 4337 ctrl_info->sg_chain_buffer_length = 4338 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4339 PQI_EXTRA_SGL_MEMORY; 4340 ctrl_info->sg_tablesize = max_sg_entries; 4341 ctrl_info->max_sectors = max_transfer_size / 512; 4342 } 4343 4344 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4345 { 4346 int num_queue_groups; 4347 u16 num_elements_per_iq; 4348 u16 num_elements_per_oq; 4349 4350 if (reset_devices) { 4351 num_queue_groups = 1; 4352 } else { 4353 int num_cpus; 4354 int max_queue_groups; 4355 4356 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4357 ctrl_info->max_outbound_queues - 1); 4358 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4359 4360 num_cpus = num_online_cpus(); 4361 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4362 num_queue_groups = min(num_queue_groups, max_queue_groups); 4363 } 4364 4365 ctrl_info->num_queue_groups = num_queue_groups; 4366 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4367 4368 /* 4369 * Make sure that the max. inbound IU length is an even multiple 4370 * of our inbound element length. 4371 */ 4372 ctrl_info->max_inbound_iu_length = 4373 (ctrl_info->max_inbound_iu_length_per_firmware / 4374 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4375 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4376 4377 num_elements_per_iq = 4378 (ctrl_info->max_inbound_iu_length / 4379 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4380 4381 /* Add one because one element in each queue is unusable. */ 4382 num_elements_per_iq++; 4383 4384 num_elements_per_iq = min(num_elements_per_iq, 4385 ctrl_info->max_elements_per_iq); 4386 4387 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4388 num_elements_per_oq = min(num_elements_per_oq, 4389 ctrl_info->max_elements_per_oq); 4390 4391 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4392 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4393 4394 ctrl_info->max_sg_per_iu = 4395 ((ctrl_info->max_inbound_iu_length - 4396 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4397 sizeof(struct pqi_sg_descriptor)) + 4398 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4399 } 4400 4401 static inline void pqi_set_sg_descriptor( 4402 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4403 { 4404 u64 address = (u64)sg_dma_address(sg); 4405 unsigned int length = sg_dma_len(sg); 4406 4407 put_unaligned_le64(address, &sg_descriptor->address); 4408 put_unaligned_le32(length, &sg_descriptor->length); 4409 put_unaligned_le32(0, &sg_descriptor->flags); 4410 } 4411 4412 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4413 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4414 struct pqi_io_request *io_request) 4415 { 4416 int i; 4417 u16 iu_length; 4418 int sg_count; 4419 bool chained; 4420 unsigned int num_sg_in_iu; 4421 unsigned int max_sg_per_iu; 4422 struct scatterlist *sg; 4423 struct pqi_sg_descriptor *sg_descriptor; 4424 4425 sg_count = scsi_dma_map(scmd); 4426 if (sg_count < 0) 4427 return sg_count; 4428 4429 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4430 PQI_REQUEST_HEADER_LENGTH; 4431 4432 if (sg_count == 0) 4433 goto out; 4434 4435 sg = scsi_sglist(scmd); 4436 sg_descriptor = request->sg_descriptors; 4437 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4438 chained = false; 4439 num_sg_in_iu = 0; 4440 i = 0; 4441 4442 while (1) { 4443 pqi_set_sg_descriptor(sg_descriptor, sg); 4444 if (!chained) 4445 num_sg_in_iu++; 4446 i++; 4447 if (i == sg_count) 4448 break; 4449 sg_descriptor++; 4450 if (i == max_sg_per_iu) { 4451 put_unaligned_le64( 4452 (u64)io_request->sg_chain_buffer_dma_handle, 4453 &sg_descriptor->address); 4454 put_unaligned_le32((sg_count - num_sg_in_iu) 4455 * sizeof(*sg_descriptor), 4456 &sg_descriptor->length); 4457 put_unaligned_le32(CISS_SG_CHAIN, 4458 &sg_descriptor->flags); 4459 chained = true; 4460 num_sg_in_iu++; 4461 sg_descriptor = io_request->sg_chain_buffer; 4462 } 4463 sg = sg_next(sg); 4464 } 4465 4466 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4467 request->partial = chained; 4468 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4469 4470 out: 4471 put_unaligned_le16(iu_length, &request->header.iu_length); 4472 4473 return 0; 4474 } 4475 4476 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4477 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4478 struct pqi_io_request *io_request) 4479 { 4480 int i; 4481 u16 iu_length; 4482 int sg_count; 4483 bool chained; 4484 unsigned int num_sg_in_iu; 4485 unsigned int max_sg_per_iu; 4486 struct scatterlist *sg; 4487 struct pqi_sg_descriptor *sg_descriptor; 4488 4489 sg_count = scsi_dma_map(scmd); 4490 if (sg_count < 0) 4491 return sg_count; 4492 4493 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4494 PQI_REQUEST_HEADER_LENGTH; 4495 num_sg_in_iu = 0; 4496 4497 if (sg_count == 0) 4498 goto out; 4499 4500 sg = scsi_sglist(scmd); 4501 sg_descriptor = request->sg_descriptors; 4502 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4503 chained = false; 4504 i = 0; 4505 4506 while (1) { 4507 pqi_set_sg_descriptor(sg_descriptor, sg); 4508 if (!chained) 4509 num_sg_in_iu++; 4510 i++; 4511 if (i == sg_count) 4512 break; 4513 sg_descriptor++; 4514 if (i == max_sg_per_iu) { 4515 put_unaligned_le64( 4516 (u64)io_request->sg_chain_buffer_dma_handle, 4517 &sg_descriptor->address); 4518 put_unaligned_le32((sg_count - num_sg_in_iu) 4519 * sizeof(*sg_descriptor), 4520 &sg_descriptor->length); 4521 put_unaligned_le32(CISS_SG_CHAIN, 4522 &sg_descriptor->flags); 4523 chained = true; 4524 num_sg_in_iu++; 4525 sg_descriptor = io_request->sg_chain_buffer; 4526 } 4527 sg = sg_next(sg); 4528 } 4529 4530 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4531 request->partial = chained; 4532 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4533 4534 out: 4535 put_unaligned_le16(iu_length, &request->header.iu_length); 4536 request->num_sg_descriptors = num_sg_in_iu; 4537 4538 return 0; 4539 } 4540 4541 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4542 void *context) 4543 { 4544 struct scsi_cmnd *scmd; 4545 4546 scmd = io_request->scmd; 4547 pqi_free_io_request(io_request); 4548 scsi_dma_unmap(scmd); 4549 pqi_scsi_done(scmd); 4550 } 4551 4552 static int pqi_raid_submit_scsi_cmd_with_io_request( 4553 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4554 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4555 struct pqi_queue_group *queue_group) 4556 { 4557 int rc; 4558 size_t cdb_length; 4559 struct pqi_raid_path_request *request; 4560 4561 io_request->io_complete_callback = pqi_raid_io_complete; 4562 io_request->scmd = scmd; 4563 4564 request = io_request->iu; 4565 memset(request, 0, 4566 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4567 4568 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4569 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4570 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4571 put_unaligned_le16(io_request->index, &request->request_id); 4572 request->error_index = request->request_id; 4573 memcpy(request->lun_number, device->scsi3addr, 4574 sizeof(request->lun_number)); 4575 4576 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4577 memcpy(request->cdb, scmd->cmnd, cdb_length); 4578 4579 switch (cdb_length) { 4580 case 6: 4581 case 10: 4582 case 12: 4583 case 16: 4584 /* No bytes in the Additional CDB bytes field */ 4585 request->additional_cdb_bytes_usage = 4586 SOP_ADDITIONAL_CDB_BYTES_0; 4587 break; 4588 case 20: 4589 /* 4 bytes in the Additional cdb field */ 4590 request->additional_cdb_bytes_usage = 4591 SOP_ADDITIONAL_CDB_BYTES_4; 4592 break; 4593 case 24: 4594 /* 8 bytes in the Additional cdb field */ 4595 request->additional_cdb_bytes_usage = 4596 SOP_ADDITIONAL_CDB_BYTES_8; 4597 break; 4598 case 28: 4599 /* 12 bytes in the Additional cdb field */ 4600 request->additional_cdb_bytes_usage = 4601 SOP_ADDITIONAL_CDB_BYTES_12; 4602 break; 4603 case 32: 4604 default: 4605 /* 16 bytes in the Additional cdb field */ 4606 request->additional_cdb_bytes_usage = 4607 SOP_ADDITIONAL_CDB_BYTES_16; 4608 break; 4609 } 4610 4611 switch (scmd->sc_data_direction) { 4612 case DMA_TO_DEVICE: 4613 request->data_direction = SOP_READ_FLAG; 4614 break; 4615 case DMA_FROM_DEVICE: 4616 request->data_direction = SOP_WRITE_FLAG; 4617 break; 4618 case DMA_NONE: 4619 request->data_direction = SOP_NO_DIRECTION_FLAG; 4620 break; 4621 case DMA_BIDIRECTIONAL: 4622 request->data_direction = SOP_BIDIRECTIONAL; 4623 break; 4624 default: 4625 dev_err(&ctrl_info->pci_dev->dev, 4626 "unknown data direction: %d\n", 4627 scmd->sc_data_direction); 4628 break; 4629 } 4630 4631 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 4632 if (rc) { 4633 pqi_free_io_request(io_request); 4634 return SCSI_MLQUEUE_HOST_BUSY; 4635 } 4636 4637 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 4638 4639 return 0; 4640 } 4641 4642 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4643 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4644 struct pqi_queue_group *queue_group) 4645 { 4646 struct pqi_io_request *io_request; 4647 4648 io_request = pqi_alloc_io_request(ctrl_info); 4649 4650 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4651 device, scmd, queue_group); 4652 } 4653 4654 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 4655 { 4656 if (!pqi_ctrl_blocked(ctrl_info)) 4657 schedule_work(&ctrl_info->raid_bypass_retry_work); 4658 } 4659 4660 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 4661 { 4662 struct scsi_cmnd *scmd; 4663 struct pqi_scsi_dev *device; 4664 struct pqi_ctrl_info *ctrl_info; 4665 4666 if (!io_request->raid_bypass) 4667 return false; 4668 4669 scmd = io_request->scmd; 4670 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 4671 return false; 4672 if (host_byte(scmd->result) == DID_NO_CONNECT) 4673 return false; 4674 4675 device = scmd->device->hostdata; 4676 if (pqi_device_offline(device)) 4677 return false; 4678 4679 ctrl_info = shost_to_hba(scmd->device->host); 4680 if (pqi_ctrl_offline(ctrl_info)) 4681 return false; 4682 4683 return true; 4684 } 4685 4686 static inline void pqi_add_to_raid_bypass_retry_list( 4687 struct pqi_ctrl_info *ctrl_info, 4688 struct pqi_io_request *io_request, bool at_head) 4689 { 4690 unsigned long flags; 4691 4692 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4693 if (at_head) 4694 list_add(&io_request->request_list_entry, 4695 &ctrl_info->raid_bypass_retry_list); 4696 else 4697 list_add_tail(&io_request->request_list_entry, 4698 &ctrl_info->raid_bypass_retry_list); 4699 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4700 } 4701 4702 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 4703 void *context) 4704 { 4705 struct scsi_cmnd *scmd; 4706 4707 scmd = io_request->scmd; 4708 pqi_free_io_request(io_request); 4709 pqi_scsi_done(scmd); 4710 } 4711 4712 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 4713 { 4714 struct scsi_cmnd *scmd; 4715 struct pqi_ctrl_info *ctrl_info; 4716 4717 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 4718 scmd = io_request->scmd; 4719 scmd->result = 0; 4720 ctrl_info = shost_to_hba(scmd->device->host); 4721 4722 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 4723 pqi_schedule_bypass_retry(ctrl_info); 4724 } 4725 4726 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 4727 { 4728 struct scsi_cmnd *scmd; 4729 struct pqi_scsi_dev *device; 4730 struct pqi_ctrl_info *ctrl_info; 4731 struct pqi_queue_group *queue_group; 4732 4733 scmd = io_request->scmd; 4734 device = scmd->device->hostdata; 4735 if (pqi_device_in_reset(device)) { 4736 pqi_free_io_request(io_request); 4737 set_host_byte(scmd, DID_RESET); 4738 pqi_scsi_done(scmd); 4739 return 0; 4740 } 4741 4742 ctrl_info = shost_to_hba(scmd->device->host); 4743 queue_group = io_request->queue_group; 4744 4745 pqi_reinit_io_request(io_request); 4746 4747 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4748 device, scmd, queue_group); 4749 } 4750 4751 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 4752 struct pqi_ctrl_info *ctrl_info) 4753 { 4754 unsigned long flags; 4755 struct pqi_io_request *io_request; 4756 4757 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4758 io_request = list_first_entry_or_null( 4759 &ctrl_info->raid_bypass_retry_list, 4760 struct pqi_io_request, request_list_entry); 4761 if (io_request) 4762 list_del(&io_request->request_list_entry); 4763 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4764 4765 return io_request; 4766 } 4767 4768 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 4769 { 4770 int rc; 4771 struct pqi_io_request *io_request; 4772 4773 pqi_ctrl_busy(ctrl_info); 4774 4775 while (1) { 4776 if (pqi_ctrl_blocked(ctrl_info)) 4777 break; 4778 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 4779 if (!io_request) 4780 break; 4781 rc = pqi_retry_raid_bypass(io_request); 4782 if (rc) { 4783 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 4784 true); 4785 pqi_schedule_bypass_retry(ctrl_info); 4786 break; 4787 } 4788 } 4789 4790 pqi_ctrl_unbusy(ctrl_info); 4791 } 4792 4793 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 4794 { 4795 struct pqi_ctrl_info *ctrl_info; 4796 4797 ctrl_info = container_of(work, struct pqi_ctrl_info, 4798 raid_bypass_retry_work); 4799 pqi_retry_raid_bypass_requests(ctrl_info); 4800 } 4801 4802 static void pqi_clear_all_queued_raid_bypass_retries( 4803 struct pqi_ctrl_info *ctrl_info) 4804 { 4805 unsigned long flags; 4806 4807 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4808 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 4809 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4810 } 4811 4812 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 4813 void *context) 4814 { 4815 struct scsi_cmnd *scmd; 4816 4817 scmd = io_request->scmd; 4818 scsi_dma_unmap(scmd); 4819 if (io_request->status == -EAGAIN) 4820 set_host_byte(scmd, DID_IMM_RETRY); 4821 else if (pqi_raid_bypass_retry_needed(io_request)) { 4822 pqi_queue_raid_bypass_retry(io_request); 4823 return; 4824 } 4825 pqi_free_io_request(io_request); 4826 pqi_scsi_done(scmd); 4827 } 4828 4829 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4830 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4831 struct pqi_queue_group *queue_group) 4832 { 4833 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 4834 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 4835 } 4836 4837 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 4838 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 4839 unsigned int cdb_length, struct pqi_queue_group *queue_group, 4840 struct pqi_encryption_info *encryption_info, bool raid_bypass) 4841 { 4842 int rc; 4843 struct pqi_io_request *io_request; 4844 struct pqi_aio_path_request *request; 4845 4846 io_request = pqi_alloc_io_request(ctrl_info); 4847 io_request->io_complete_callback = pqi_aio_io_complete; 4848 io_request->scmd = scmd; 4849 io_request->raid_bypass = raid_bypass; 4850 4851 request = io_request->iu; 4852 memset(request, 0, 4853 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4854 4855 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 4856 put_unaligned_le32(aio_handle, &request->nexus_id); 4857 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4858 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4859 put_unaligned_le16(io_request->index, &request->request_id); 4860 request->error_index = request->request_id; 4861 if (cdb_length > sizeof(request->cdb)) 4862 cdb_length = sizeof(request->cdb); 4863 request->cdb_length = cdb_length; 4864 memcpy(request->cdb, cdb, cdb_length); 4865 4866 switch (scmd->sc_data_direction) { 4867 case DMA_TO_DEVICE: 4868 request->data_direction = SOP_READ_FLAG; 4869 break; 4870 case DMA_FROM_DEVICE: 4871 request->data_direction = SOP_WRITE_FLAG; 4872 break; 4873 case DMA_NONE: 4874 request->data_direction = SOP_NO_DIRECTION_FLAG; 4875 break; 4876 case DMA_BIDIRECTIONAL: 4877 request->data_direction = SOP_BIDIRECTIONAL; 4878 break; 4879 default: 4880 dev_err(&ctrl_info->pci_dev->dev, 4881 "unknown data direction: %d\n", 4882 scmd->sc_data_direction); 4883 break; 4884 } 4885 4886 if (encryption_info) { 4887 request->encryption_enable = true; 4888 put_unaligned_le16(encryption_info->data_encryption_key_index, 4889 &request->data_encryption_key_index); 4890 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 4891 &request->encrypt_tweak_lower); 4892 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 4893 &request->encrypt_tweak_upper); 4894 } 4895 4896 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 4897 if (rc) { 4898 pqi_free_io_request(io_request); 4899 return SCSI_MLQUEUE_HOST_BUSY; 4900 } 4901 4902 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 4903 4904 return 0; 4905 } 4906 4907 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 4908 struct scsi_cmnd *scmd) 4909 { 4910 u16 hw_queue; 4911 4912 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 4913 if (hw_queue > ctrl_info->max_hw_queue_index) 4914 hw_queue = 0; 4915 4916 return hw_queue; 4917 } 4918 4919 /* 4920 * This function gets called just before we hand the completed SCSI request 4921 * back to the SML. 4922 */ 4923 4924 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 4925 { 4926 struct pqi_scsi_dev *device; 4927 4928 device = scmd->device->hostdata; 4929 atomic_dec(&device->scsi_cmds_outstanding); 4930 } 4931 4932 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 4933 struct scsi_cmnd *scmd) 4934 { 4935 int rc; 4936 struct pqi_ctrl_info *ctrl_info; 4937 struct pqi_scsi_dev *device; 4938 u16 hw_queue; 4939 struct pqi_queue_group *queue_group; 4940 bool raid_bypassed; 4941 4942 device = scmd->device->hostdata; 4943 ctrl_info = shost_to_hba(shost); 4944 4945 atomic_inc(&device->scsi_cmds_outstanding); 4946 4947 if (pqi_ctrl_offline(ctrl_info)) { 4948 set_host_byte(scmd, DID_NO_CONNECT); 4949 pqi_scsi_done(scmd); 4950 return 0; 4951 } 4952 4953 pqi_ctrl_busy(ctrl_info); 4954 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 4955 rc = SCSI_MLQUEUE_HOST_BUSY; 4956 goto out; 4957 } 4958 4959 /* 4960 * This is necessary because the SML doesn't zero out this field during 4961 * error recovery. 4962 */ 4963 scmd->result = 0; 4964 4965 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 4966 queue_group = &ctrl_info->queue_groups[hw_queue]; 4967 4968 if (pqi_is_logical_device(device)) { 4969 raid_bypassed = false; 4970 if (device->raid_bypass_enabled && 4971 !blk_rq_is_passthrough(scmd->request)) { 4972 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 4973 scmd, queue_group); 4974 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 4975 raid_bypassed = true; 4976 } 4977 if (!raid_bypassed) 4978 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4979 queue_group); 4980 } else { 4981 if (device->aio_enabled) 4982 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 4983 queue_group); 4984 else 4985 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4986 queue_group); 4987 } 4988 4989 out: 4990 pqi_ctrl_unbusy(ctrl_info); 4991 if (rc) 4992 atomic_dec(&device->scsi_cmds_outstanding); 4993 4994 return rc; 4995 } 4996 4997 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 4998 struct pqi_queue_group *queue_group) 4999 { 5000 unsigned int path; 5001 unsigned long flags; 5002 bool list_is_empty; 5003 5004 for (path = 0; path < 2; path++) { 5005 while (1) { 5006 spin_lock_irqsave( 5007 &queue_group->submit_lock[path], flags); 5008 list_is_empty = 5009 list_empty(&queue_group->request_list[path]); 5010 spin_unlock_irqrestore( 5011 &queue_group->submit_lock[path], flags); 5012 if (list_is_empty) 5013 break; 5014 pqi_check_ctrl_health(ctrl_info); 5015 if (pqi_ctrl_offline(ctrl_info)) 5016 return -ENXIO; 5017 usleep_range(1000, 2000); 5018 } 5019 } 5020 5021 return 0; 5022 } 5023 5024 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5025 { 5026 int rc; 5027 unsigned int i; 5028 unsigned int path; 5029 struct pqi_queue_group *queue_group; 5030 pqi_index_t iq_pi; 5031 pqi_index_t iq_ci; 5032 5033 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5034 queue_group = &ctrl_info->queue_groups[i]; 5035 5036 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5037 if (rc) 5038 return rc; 5039 5040 for (path = 0; path < 2; path++) { 5041 iq_pi = queue_group->iq_pi_copy[path]; 5042 5043 while (1) { 5044 iq_ci = *queue_group->iq_ci[path]; 5045 if (iq_ci == iq_pi) 5046 break; 5047 pqi_check_ctrl_health(ctrl_info); 5048 if (pqi_ctrl_offline(ctrl_info)) 5049 return -ENXIO; 5050 usleep_range(1000, 2000); 5051 } 5052 } 5053 } 5054 5055 return 0; 5056 } 5057 5058 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5059 struct pqi_scsi_dev *device) 5060 { 5061 unsigned int i; 5062 unsigned int path; 5063 struct pqi_queue_group *queue_group; 5064 unsigned long flags; 5065 struct pqi_io_request *io_request; 5066 struct pqi_io_request *next; 5067 struct scsi_cmnd *scmd; 5068 struct pqi_scsi_dev *scsi_device; 5069 5070 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5071 queue_group = &ctrl_info->queue_groups[i]; 5072 5073 for (path = 0; path < 2; path++) { 5074 spin_lock_irqsave( 5075 &queue_group->submit_lock[path], flags); 5076 5077 list_for_each_entry_safe(io_request, next, 5078 &queue_group->request_list[path], 5079 request_list_entry) { 5080 scmd = io_request->scmd; 5081 if (!scmd) 5082 continue; 5083 5084 scsi_device = scmd->device->hostdata; 5085 if (scsi_device != device) 5086 continue; 5087 5088 list_del(&io_request->request_list_entry); 5089 set_host_byte(scmd, DID_RESET); 5090 pqi_scsi_done(scmd); 5091 } 5092 5093 spin_unlock_irqrestore( 5094 &queue_group->submit_lock[path], flags); 5095 } 5096 } 5097 } 5098 5099 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5100 struct pqi_scsi_dev *device) 5101 { 5102 while (atomic_read(&device->scsi_cmds_outstanding)) { 5103 pqi_check_ctrl_health(ctrl_info); 5104 if (pqi_ctrl_offline(ctrl_info)) 5105 return -ENXIO; 5106 usleep_range(1000, 2000); 5107 } 5108 5109 return 0; 5110 } 5111 5112 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5113 { 5114 bool io_pending; 5115 unsigned long flags; 5116 struct pqi_scsi_dev *device; 5117 5118 while (1) { 5119 io_pending = false; 5120 5121 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5122 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5123 scsi_device_list_entry) { 5124 if (atomic_read(&device->scsi_cmds_outstanding)) { 5125 io_pending = true; 5126 break; 5127 } 5128 } 5129 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5130 flags); 5131 5132 if (!io_pending) 5133 break; 5134 5135 pqi_check_ctrl_health(ctrl_info); 5136 if (pqi_ctrl_offline(ctrl_info)) 5137 return -ENXIO; 5138 5139 usleep_range(1000, 2000); 5140 } 5141 5142 return 0; 5143 } 5144 5145 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5146 void *context) 5147 { 5148 struct completion *waiting = context; 5149 5150 complete(waiting); 5151 } 5152 5153 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5154 5155 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5156 struct pqi_scsi_dev *device, struct completion *wait) 5157 { 5158 int rc; 5159 5160 while (1) { 5161 if (wait_for_completion_io_timeout(wait, 5162 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5163 rc = 0; 5164 break; 5165 } 5166 5167 pqi_check_ctrl_health(ctrl_info); 5168 if (pqi_ctrl_offline(ctrl_info)) { 5169 rc = -ENXIO; 5170 break; 5171 } 5172 } 5173 5174 return rc; 5175 } 5176 5177 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5178 struct pqi_scsi_dev *device) 5179 { 5180 int rc; 5181 struct pqi_io_request *io_request; 5182 DECLARE_COMPLETION_ONSTACK(wait); 5183 struct pqi_task_management_request *request; 5184 5185 io_request = pqi_alloc_io_request(ctrl_info); 5186 io_request->io_complete_callback = pqi_lun_reset_complete; 5187 io_request->context = &wait; 5188 5189 request = io_request->iu; 5190 memset(request, 0, sizeof(*request)); 5191 5192 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5193 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5194 &request->header.iu_length); 5195 put_unaligned_le16(io_request->index, &request->request_id); 5196 memcpy(request->lun_number, device->scsi3addr, 5197 sizeof(request->lun_number)); 5198 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5199 5200 pqi_start_io(ctrl_info, 5201 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5202 io_request); 5203 5204 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5205 if (rc == 0) 5206 rc = io_request->status; 5207 5208 pqi_free_io_request(io_request); 5209 5210 return rc; 5211 } 5212 5213 /* Performs a reset at the LUN level. */ 5214 5215 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5216 struct pqi_scsi_dev *device) 5217 { 5218 int rc; 5219 5220 rc = pqi_lun_reset(ctrl_info, device); 5221 if (rc == 0) 5222 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5223 5224 return rc == 0 ? SUCCESS : FAILED; 5225 } 5226 5227 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5228 { 5229 int rc; 5230 struct Scsi_Host *shost; 5231 struct pqi_ctrl_info *ctrl_info; 5232 struct pqi_scsi_dev *device; 5233 5234 shost = scmd->device->host; 5235 ctrl_info = shost_to_hba(shost); 5236 device = scmd->device->hostdata; 5237 5238 dev_err(&ctrl_info->pci_dev->dev, 5239 "resetting scsi %d:%d:%d:%d\n", 5240 shost->host_no, device->bus, device->target, device->lun); 5241 5242 pqi_check_ctrl_health(ctrl_info); 5243 if (pqi_ctrl_offline(ctrl_info)) { 5244 rc = FAILED; 5245 goto out; 5246 } 5247 5248 mutex_lock(&ctrl_info->lun_reset_mutex); 5249 5250 pqi_ctrl_block_requests(ctrl_info); 5251 pqi_ctrl_wait_until_quiesced(ctrl_info); 5252 pqi_fail_io_queued_for_device(ctrl_info, device); 5253 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5254 pqi_device_reset_start(device); 5255 pqi_ctrl_unblock_requests(ctrl_info); 5256 5257 if (rc) 5258 rc = FAILED; 5259 else 5260 rc = pqi_device_reset(ctrl_info, device); 5261 5262 pqi_device_reset_done(device); 5263 5264 mutex_unlock(&ctrl_info->lun_reset_mutex); 5265 5266 out: 5267 dev_err(&ctrl_info->pci_dev->dev, 5268 "reset of scsi %d:%d:%d:%d: %s\n", 5269 shost->host_no, device->bus, device->target, device->lun, 5270 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5271 5272 return rc; 5273 } 5274 5275 static int pqi_slave_alloc(struct scsi_device *sdev) 5276 { 5277 struct pqi_scsi_dev *device; 5278 unsigned long flags; 5279 struct pqi_ctrl_info *ctrl_info; 5280 struct scsi_target *starget; 5281 struct sas_rphy *rphy; 5282 5283 ctrl_info = shost_to_hba(sdev->host); 5284 5285 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5286 5287 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5288 starget = scsi_target(sdev); 5289 rphy = target_to_rphy(starget); 5290 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5291 if (device) { 5292 device->target = sdev_id(sdev); 5293 device->lun = sdev->lun; 5294 device->target_lun_valid = true; 5295 } 5296 } else { 5297 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5298 sdev_id(sdev), sdev->lun); 5299 } 5300 5301 if (device) { 5302 sdev->hostdata = device; 5303 device->sdev = sdev; 5304 if (device->queue_depth) { 5305 device->advertised_queue_depth = device->queue_depth; 5306 scsi_change_queue_depth(sdev, 5307 device->advertised_queue_depth); 5308 } 5309 } 5310 5311 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5312 5313 return 0; 5314 } 5315 5316 static int pqi_map_queues(struct Scsi_Host *shost) 5317 { 5318 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5319 5320 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0); 5321 } 5322 5323 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5324 void __user *arg) 5325 { 5326 struct pci_dev *pci_dev; 5327 u32 subsystem_vendor; 5328 u32 subsystem_device; 5329 cciss_pci_info_struct pciinfo; 5330 5331 if (!arg) 5332 return -EINVAL; 5333 5334 pci_dev = ctrl_info->pci_dev; 5335 5336 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5337 pciinfo.bus = pci_dev->bus->number; 5338 pciinfo.dev_fn = pci_dev->devfn; 5339 subsystem_vendor = pci_dev->subsystem_vendor; 5340 subsystem_device = pci_dev->subsystem_device; 5341 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5342 subsystem_vendor; 5343 5344 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5345 return -EFAULT; 5346 5347 return 0; 5348 } 5349 5350 static int pqi_getdrivver_ioctl(void __user *arg) 5351 { 5352 u32 version; 5353 5354 if (!arg) 5355 return -EINVAL; 5356 5357 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5358 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5359 5360 if (copy_to_user(arg, &version, sizeof(version))) 5361 return -EFAULT; 5362 5363 return 0; 5364 } 5365 5366 struct ciss_error_info { 5367 u8 scsi_status; 5368 int command_status; 5369 size_t sense_data_length; 5370 }; 5371 5372 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5373 struct ciss_error_info *ciss_error_info) 5374 { 5375 int ciss_cmd_status; 5376 size_t sense_data_length; 5377 5378 switch (pqi_error_info->data_out_result) { 5379 case PQI_DATA_IN_OUT_GOOD: 5380 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5381 break; 5382 case PQI_DATA_IN_OUT_UNDERFLOW: 5383 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5384 break; 5385 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5386 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5387 break; 5388 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5389 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5390 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5391 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5392 case PQI_DATA_IN_OUT_ERROR: 5393 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5394 break; 5395 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5396 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5397 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5398 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5399 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5400 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5401 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5402 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5403 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5404 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5405 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5406 break; 5407 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5408 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5409 break; 5410 case PQI_DATA_IN_OUT_ABORTED: 5411 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5412 break; 5413 case PQI_DATA_IN_OUT_TIMEOUT: 5414 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5415 break; 5416 default: 5417 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5418 break; 5419 } 5420 5421 sense_data_length = 5422 get_unaligned_le16(&pqi_error_info->sense_data_length); 5423 if (sense_data_length == 0) 5424 sense_data_length = 5425 get_unaligned_le16(&pqi_error_info->response_data_length); 5426 if (sense_data_length) 5427 if (sense_data_length > sizeof(pqi_error_info->data)) 5428 sense_data_length = sizeof(pqi_error_info->data); 5429 5430 ciss_error_info->scsi_status = pqi_error_info->status; 5431 ciss_error_info->command_status = ciss_cmd_status; 5432 ciss_error_info->sense_data_length = sense_data_length; 5433 } 5434 5435 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5436 { 5437 int rc; 5438 char *kernel_buffer = NULL; 5439 u16 iu_length; 5440 size_t sense_data_length; 5441 IOCTL_Command_struct iocommand; 5442 struct pqi_raid_path_request request; 5443 struct pqi_raid_error_info pqi_error_info; 5444 struct ciss_error_info ciss_error_info; 5445 5446 if (pqi_ctrl_offline(ctrl_info)) 5447 return -ENXIO; 5448 if (!arg) 5449 return -EINVAL; 5450 if (!capable(CAP_SYS_RAWIO)) 5451 return -EPERM; 5452 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5453 return -EFAULT; 5454 if (iocommand.buf_size < 1 && 5455 iocommand.Request.Type.Direction != XFER_NONE) 5456 return -EINVAL; 5457 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5458 return -EINVAL; 5459 if (iocommand.Request.Type.Type != TYPE_CMD) 5460 return -EINVAL; 5461 5462 switch (iocommand.Request.Type.Direction) { 5463 case XFER_NONE: 5464 case XFER_WRITE: 5465 case XFER_READ: 5466 case XFER_READ | XFER_WRITE: 5467 break; 5468 default: 5469 return -EINVAL; 5470 } 5471 5472 if (iocommand.buf_size > 0) { 5473 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5474 if (!kernel_buffer) 5475 return -ENOMEM; 5476 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5477 if (copy_from_user(kernel_buffer, iocommand.buf, 5478 iocommand.buf_size)) { 5479 rc = -EFAULT; 5480 goto out; 5481 } 5482 } else { 5483 memset(kernel_buffer, 0, iocommand.buf_size); 5484 } 5485 } 5486 5487 memset(&request, 0, sizeof(request)); 5488 5489 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5490 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5491 PQI_REQUEST_HEADER_LENGTH; 5492 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5493 sizeof(request.lun_number)); 5494 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5495 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5496 5497 switch (iocommand.Request.Type.Direction) { 5498 case XFER_NONE: 5499 request.data_direction = SOP_NO_DIRECTION_FLAG; 5500 break; 5501 case XFER_WRITE: 5502 request.data_direction = SOP_WRITE_FLAG; 5503 break; 5504 case XFER_READ: 5505 request.data_direction = SOP_READ_FLAG; 5506 break; 5507 case XFER_READ | XFER_WRITE: 5508 request.data_direction = SOP_BIDIRECTIONAL; 5509 break; 5510 } 5511 5512 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5513 5514 if (iocommand.buf_size > 0) { 5515 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5516 5517 rc = pqi_map_single(ctrl_info->pci_dev, 5518 &request.sg_descriptors[0], kernel_buffer, 5519 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 5520 if (rc) 5521 goto out; 5522 5523 iu_length += sizeof(request.sg_descriptors[0]); 5524 } 5525 5526 put_unaligned_le16(iu_length, &request.header.iu_length); 5527 5528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 5529 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 5530 5531 if (iocommand.buf_size > 0) 5532 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5533 PCI_DMA_BIDIRECTIONAL); 5534 5535 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5536 5537 if (rc == 0) { 5538 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 5539 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 5540 iocommand.error_info.CommandStatus = 5541 ciss_error_info.command_status; 5542 sense_data_length = ciss_error_info.sense_data_length; 5543 if (sense_data_length) { 5544 if (sense_data_length > 5545 sizeof(iocommand.error_info.SenseInfo)) 5546 sense_data_length = 5547 sizeof(iocommand.error_info.SenseInfo); 5548 memcpy(iocommand.error_info.SenseInfo, 5549 pqi_error_info.data, sense_data_length); 5550 iocommand.error_info.SenseLen = sense_data_length; 5551 } 5552 } 5553 5554 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 5555 rc = -EFAULT; 5556 goto out; 5557 } 5558 5559 if (rc == 0 && iocommand.buf_size > 0 && 5560 (iocommand.Request.Type.Direction & XFER_READ)) { 5561 if (copy_to_user(iocommand.buf, kernel_buffer, 5562 iocommand.buf_size)) { 5563 rc = -EFAULT; 5564 } 5565 } 5566 5567 out: 5568 kfree(kernel_buffer); 5569 5570 return rc; 5571 } 5572 5573 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5574 { 5575 int rc; 5576 struct pqi_ctrl_info *ctrl_info; 5577 5578 ctrl_info = shost_to_hba(sdev->host); 5579 5580 switch (cmd) { 5581 case CCISS_DEREGDISK: 5582 case CCISS_REGNEWDISK: 5583 case CCISS_REGNEWD: 5584 rc = pqi_scan_scsi_devices(ctrl_info); 5585 break; 5586 case CCISS_GETPCIINFO: 5587 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 5588 break; 5589 case CCISS_GETDRIVVER: 5590 rc = pqi_getdrivver_ioctl(arg); 5591 break; 5592 case CCISS_PASSTHRU: 5593 rc = pqi_passthru_ioctl(ctrl_info, arg); 5594 break; 5595 default: 5596 rc = -EINVAL; 5597 break; 5598 } 5599 5600 return rc; 5601 } 5602 5603 static ssize_t pqi_version_show(struct device *dev, 5604 struct device_attribute *attr, char *buffer) 5605 { 5606 ssize_t count = 0; 5607 struct Scsi_Host *shost; 5608 struct pqi_ctrl_info *ctrl_info; 5609 5610 shost = class_to_shost(dev); 5611 ctrl_info = shost_to_hba(shost); 5612 5613 count += snprintf(buffer + count, PAGE_SIZE - count, 5614 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 5615 5616 count += snprintf(buffer + count, PAGE_SIZE - count, 5617 "firmware: %s\n", ctrl_info->firmware_version); 5618 5619 return count; 5620 } 5621 5622 static ssize_t pqi_host_rescan_store(struct device *dev, 5623 struct device_attribute *attr, const char *buffer, size_t count) 5624 { 5625 struct Scsi_Host *shost = class_to_shost(dev); 5626 5627 pqi_scan_start(shost); 5628 5629 return count; 5630 } 5631 5632 static ssize_t pqi_lockup_action_show(struct device *dev, 5633 struct device_attribute *attr, char *buffer) 5634 { 5635 int count = 0; 5636 unsigned int i; 5637 5638 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5639 if (pqi_lockup_actions[i].action == pqi_lockup_action) 5640 count += snprintf(buffer + count, PAGE_SIZE - count, 5641 "[%s] ", pqi_lockup_actions[i].name); 5642 else 5643 count += snprintf(buffer + count, PAGE_SIZE - count, 5644 "%s ", pqi_lockup_actions[i].name); 5645 } 5646 5647 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 5648 5649 return count; 5650 } 5651 5652 static ssize_t pqi_lockup_action_store(struct device *dev, 5653 struct device_attribute *attr, const char *buffer, size_t count) 5654 { 5655 unsigned int i; 5656 char *action_name; 5657 char action_name_buffer[32]; 5658 5659 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 5660 action_name = strstrip(action_name_buffer); 5661 5662 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5663 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 5664 pqi_lockup_action = pqi_lockup_actions[i].action; 5665 return count; 5666 } 5667 } 5668 5669 return -EINVAL; 5670 } 5671 5672 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 5673 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 5674 static DEVICE_ATTR(lockup_action, 0644, 5675 pqi_lockup_action_show, pqi_lockup_action_store); 5676 5677 static struct device_attribute *pqi_shost_attrs[] = { 5678 &dev_attr_version, 5679 &dev_attr_rescan, 5680 &dev_attr_lockup_action, 5681 NULL 5682 }; 5683 5684 static ssize_t pqi_sas_address_show(struct device *dev, 5685 struct device_attribute *attr, char *buffer) 5686 { 5687 struct pqi_ctrl_info *ctrl_info; 5688 struct scsi_device *sdev; 5689 struct pqi_scsi_dev *device; 5690 unsigned long flags; 5691 u64 sas_address; 5692 5693 sdev = to_scsi_device(dev); 5694 ctrl_info = shost_to_hba(sdev->host); 5695 5696 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5697 5698 device = sdev->hostdata; 5699 if (pqi_is_logical_device(device)) { 5700 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5701 flags); 5702 return -ENODEV; 5703 } 5704 sas_address = device->sas_address; 5705 5706 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5707 5708 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 5709 } 5710 5711 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 5712 struct device_attribute *attr, char *buffer) 5713 { 5714 struct pqi_ctrl_info *ctrl_info; 5715 struct scsi_device *sdev; 5716 struct pqi_scsi_dev *device; 5717 unsigned long flags; 5718 5719 sdev = to_scsi_device(dev); 5720 ctrl_info = shost_to_hba(sdev->host); 5721 5722 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5723 5724 device = sdev->hostdata; 5725 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 5726 buffer[1] = '\n'; 5727 buffer[2] = '\0'; 5728 5729 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5730 5731 return 2; 5732 } 5733 5734 static ssize_t pqi_raid_level_show(struct device *dev, 5735 struct device_attribute *attr, char *buffer) 5736 { 5737 struct pqi_ctrl_info *ctrl_info; 5738 struct scsi_device *sdev; 5739 struct pqi_scsi_dev *device; 5740 unsigned long flags; 5741 char *raid_level; 5742 5743 sdev = to_scsi_device(dev); 5744 ctrl_info = shost_to_hba(sdev->host); 5745 5746 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5747 5748 device = sdev->hostdata; 5749 5750 if (pqi_is_logical_device(device)) 5751 raid_level = pqi_raid_level_to_string(device->raid_level); 5752 else 5753 raid_level = "N/A"; 5754 5755 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5756 5757 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 5758 } 5759 5760 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 5761 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 5762 pqi_ssd_smart_path_enabled_show, NULL); 5763 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 5764 5765 static struct device_attribute *pqi_sdev_attrs[] = { 5766 &dev_attr_sas_address, 5767 &dev_attr_ssd_smart_path_enabled, 5768 &dev_attr_raid_level, 5769 NULL 5770 }; 5771 5772 static struct scsi_host_template pqi_driver_template = { 5773 .module = THIS_MODULE, 5774 .name = DRIVER_NAME_SHORT, 5775 .proc_name = DRIVER_NAME_SHORT, 5776 .queuecommand = pqi_scsi_queue_command, 5777 .scan_start = pqi_scan_start, 5778 .scan_finished = pqi_scan_finished, 5779 .this_id = -1, 5780 .use_clustering = ENABLE_CLUSTERING, 5781 .eh_device_reset_handler = pqi_eh_device_reset_handler, 5782 .ioctl = pqi_ioctl, 5783 .slave_alloc = pqi_slave_alloc, 5784 .map_queues = pqi_map_queues, 5785 .sdev_attrs = pqi_sdev_attrs, 5786 .shost_attrs = pqi_shost_attrs, 5787 }; 5788 5789 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 5790 { 5791 int rc; 5792 struct Scsi_Host *shost; 5793 5794 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 5795 if (!shost) { 5796 dev_err(&ctrl_info->pci_dev->dev, 5797 "scsi_host_alloc failed for controller %u\n", 5798 ctrl_info->ctrl_id); 5799 return -ENOMEM; 5800 } 5801 5802 shost->io_port = 0; 5803 shost->n_io_port = 0; 5804 shost->this_id = -1; 5805 shost->max_channel = PQI_MAX_BUS; 5806 shost->max_cmd_len = MAX_COMMAND_SIZE; 5807 shost->max_lun = ~0; 5808 shost->max_id = ~0; 5809 shost->max_sectors = ctrl_info->max_sectors; 5810 shost->can_queue = ctrl_info->scsi_ml_can_queue; 5811 shost->cmd_per_lun = shost->can_queue; 5812 shost->sg_tablesize = ctrl_info->sg_tablesize; 5813 shost->transportt = pqi_sas_transport_template; 5814 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 5815 shost->unique_id = shost->irq; 5816 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5817 shost->hostdata[0] = (unsigned long)ctrl_info; 5818 5819 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 5820 if (rc) { 5821 dev_err(&ctrl_info->pci_dev->dev, 5822 "scsi_add_host failed for controller %u\n", 5823 ctrl_info->ctrl_id); 5824 goto free_host; 5825 } 5826 5827 rc = pqi_add_sas_host(shost, ctrl_info); 5828 if (rc) { 5829 dev_err(&ctrl_info->pci_dev->dev, 5830 "add SAS host failed for controller %u\n", 5831 ctrl_info->ctrl_id); 5832 goto remove_host; 5833 } 5834 5835 ctrl_info->scsi_host = shost; 5836 5837 return 0; 5838 5839 remove_host: 5840 scsi_remove_host(shost); 5841 free_host: 5842 scsi_host_put(shost); 5843 5844 return rc; 5845 } 5846 5847 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 5848 { 5849 struct Scsi_Host *shost; 5850 5851 pqi_delete_sas_host(ctrl_info); 5852 5853 shost = ctrl_info->scsi_host; 5854 if (!shost) 5855 return; 5856 5857 scsi_remove_host(shost); 5858 scsi_host_put(shost); 5859 } 5860 5861 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 5862 { 5863 int rc = 0; 5864 struct pqi_device_registers __iomem *pqi_registers; 5865 unsigned long timeout; 5866 unsigned int timeout_msecs; 5867 union pqi_reset_register reset_reg; 5868 5869 pqi_registers = ctrl_info->pqi_registers; 5870 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 5871 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 5872 5873 while (1) { 5874 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 5875 reset_reg.all_bits = readl(&pqi_registers->device_reset); 5876 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 5877 break; 5878 pqi_check_ctrl_health(ctrl_info); 5879 if (pqi_ctrl_offline(ctrl_info)) { 5880 rc = -ENXIO; 5881 break; 5882 } 5883 if (time_after(jiffies, timeout)) { 5884 rc = -ETIMEDOUT; 5885 break; 5886 } 5887 } 5888 5889 return rc; 5890 } 5891 5892 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 5893 { 5894 int rc; 5895 union pqi_reset_register reset_reg; 5896 5897 if (ctrl_info->pqi_reset_quiesce_supported) { 5898 rc = sis_pqi_reset_quiesce(ctrl_info); 5899 if (rc) { 5900 dev_err(&ctrl_info->pci_dev->dev, 5901 "PQI reset failed during quiesce with error %d\n", 5902 rc); 5903 return rc; 5904 } 5905 } 5906 5907 reset_reg.all_bits = 0; 5908 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 5909 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 5910 5911 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 5912 5913 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 5914 if (rc) 5915 dev_err(&ctrl_info->pci_dev->dev, 5916 "PQI reset failed with error %d\n", rc); 5917 5918 return rc; 5919 } 5920 5921 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 5922 { 5923 int rc; 5924 struct bmic_identify_controller *identify; 5925 5926 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 5927 if (!identify) 5928 return -ENOMEM; 5929 5930 rc = pqi_identify_controller(ctrl_info, identify); 5931 if (rc) 5932 goto out; 5933 5934 memcpy(ctrl_info->firmware_version, identify->firmware_version, 5935 sizeof(identify->firmware_version)); 5936 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 5937 snprintf(ctrl_info->firmware_version + 5938 strlen(ctrl_info->firmware_version), 5939 sizeof(ctrl_info->firmware_version), 5940 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 5941 5942 out: 5943 kfree(identify); 5944 5945 return rc; 5946 } 5947 5948 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 5949 { 5950 u32 table_length; 5951 u32 section_offset; 5952 void __iomem *table_iomem_addr; 5953 struct pqi_config_table *config_table; 5954 struct pqi_config_table_section_header *section; 5955 5956 table_length = ctrl_info->config_table_length; 5957 5958 config_table = kmalloc(table_length, GFP_KERNEL); 5959 if (!config_table) { 5960 dev_err(&ctrl_info->pci_dev->dev, 5961 "failed to allocate memory for PQI configuration table\n"); 5962 return -ENOMEM; 5963 } 5964 5965 /* 5966 * Copy the config table contents from I/O memory space into the 5967 * temporary buffer. 5968 */ 5969 table_iomem_addr = ctrl_info->iomem_base + 5970 ctrl_info->config_table_offset; 5971 memcpy_fromio(config_table, table_iomem_addr, table_length); 5972 5973 section_offset = 5974 get_unaligned_le32(&config_table->first_section_offset); 5975 5976 while (section_offset) { 5977 section = (void *)config_table + section_offset; 5978 5979 switch (get_unaligned_le16(§ion->section_id)) { 5980 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 5981 if (pqi_disable_heartbeat) 5982 dev_warn(&ctrl_info->pci_dev->dev, 5983 "heartbeat disabled by module parameter\n"); 5984 else 5985 ctrl_info->heartbeat_counter = 5986 table_iomem_addr + 5987 section_offset + 5988 offsetof( 5989 struct pqi_config_table_heartbeat, 5990 heartbeat_counter); 5991 break; 5992 } 5993 5994 section_offset = 5995 get_unaligned_le16(§ion->next_section_offset); 5996 } 5997 5998 kfree(config_table); 5999 6000 return 0; 6001 } 6002 6003 /* Switches the controller from PQI mode back into SIS mode. */ 6004 6005 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6006 { 6007 int rc; 6008 6009 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6010 rc = pqi_reset(ctrl_info); 6011 if (rc) 6012 return rc; 6013 rc = sis_reenable_sis_mode(ctrl_info); 6014 if (rc) { 6015 dev_err(&ctrl_info->pci_dev->dev, 6016 "re-enabling SIS mode failed with error %d\n", rc); 6017 return rc; 6018 } 6019 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6020 6021 return 0; 6022 } 6023 6024 /* 6025 * If the controller isn't already in SIS mode, this function forces it into 6026 * SIS mode. 6027 */ 6028 6029 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6030 { 6031 if (!sis_is_firmware_running(ctrl_info)) 6032 return -ENXIO; 6033 6034 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6035 return 0; 6036 6037 if (sis_is_kernel_up(ctrl_info)) { 6038 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6039 return 0; 6040 } 6041 6042 return pqi_revert_to_sis_mode(ctrl_info); 6043 } 6044 6045 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6046 { 6047 int rc; 6048 6049 rc = pqi_force_sis_mode(ctrl_info); 6050 if (rc) 6051 return rc; 6052 6053 /* 6054 * Wait until the controller is ready to start accepting SIS 6055 * commands. 6056 */ 6057 rc = sis_wait_for_ctrl_ready(ctrl_info); 6058 if (rc) 6059 return rc; 6060 6061 /* 6062 * Get the controller properties. This allows us to determine 6063 * whether or not it supports PQI mode. 6064 */ 6065 rc = sis_get_ctrl_properties(ctrl_info); 6066 if (rc) { 6067 dev_err(&ctrl_info->pci_dev->dev, 6068 "error obtaining controller properties\n"); 6069 return rc; 6070 } 6071 6072 rc = sis_get_pqi_capabilities(ctrl_info); 6073 if (rc) { 6074 dev_err(&ctrl_info->pci_dev->dev, 6075 "error obtaining controller capabilities\n"); 6076 return rc; 6077 } 6078 6079 if (reset_devices) { 6080 if (ctrl_info->max_outstanding_requests > 6081 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6082 ctrl_info->max_outstanding_requests = 6083 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6084 } else { 6085 if (ctrl_info->max_outstanding_requests > 6086 PQI_MAX_OUTSTANDING_REQUESTS) 6087 ctrl_info->max_outstanding_requests = 6088 PQI_MAX_OUTSTANDING_REQUESTS; 6089 } 6090 6091 pqi_calculate_io_resources(ctrl_info); 6092 6093 rc = pqi_alloc_error_buffer(ctrl_info); 6094 if (rc) { 6095 dev_err(&ctrl_info->pci_dev->dev, 6096 "failed to allocate PQI error buffer\n"); 6097 return rc; 6098 } 6099 6100 /* 6101 * If the function we are about to call succeeds, the 6102 * controller will transition from legacy SIS mode 6103 * into PQI mode. 6104 */ 6105 rc = sis_init_base_struct_addr(ctrl_info); 6106 if (rc) { 6107 dev_err(&ctrl_info->pci_dev->dev, 6108 "error initializing PQI mode\n"); 6109 return rc; 6110 } 6111 6112 /* Wait for the controller to complete the SIS -> PQI transition. */ 6113 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6114 if (rc) { 6115 dev_err(&ctrl_info->pci_dev->dev, 6116 "transition to PQI mode failed\n"); 6117 return rc; 6118 } 6119 6120 /* From here on, we are running in PQI mode. */ 6121 ctrl_info->pqi_mode_enabled = true; 6122 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6123 6124 rc = pqi_process_config_table(ctrl_info); 6125 if (rc) 6126 return rc; 6127 6128 rc = pqi_alloc_admin_queues(ctrl_info); 6129 if (rc) { 6130 dev_err(&ctrl_info->pci_dev->dev, 6131 "failed to allocate admin queues\n"); 6132 return rc; 6133 } 6134 6135 rc = pqi_create_admin_queues(ctrl_info); 6136 if (rc) { 6137 dev_err(&ctrl_info->pci_dev->dev, 6138 "error creating admin queues\n"); 6139 return rc; 6140 } 6141 6142 rc = pqi_report_device_capability(ctrl_info); 6143 if (rc) { 6144 dev_err(&ctrl_info->pci_dev->dev, 6145 "obtaining device capability failed\n"); 6146 return rc; 6147 } 6148 6149 rc = pqi_validate_device_capability(ctrl_info); 6150 if (rc) 6151 return rc; 6152 6153 pqi_calculate_queue_resources(ctrl_info); 6154 6155 rc = pqi_enable_msix_interrupts(ctrl_info); 6156 if (rc) 6157 return rc; 6158 6159 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 6160 ctrl_info->max_msix_vectors = 6161 ctrl_info->num_msix_vectors_enabled; 6162 pqi_calculate_queue_resources(ctrl_info); 6163 } 6164 6165 rc = pqi_alloc_io_resources(ctrl_info); 6166 if (rc) 6167 return rc; 6168 6169 rc = pqi_alloc_operational_queues(ctrl_info); 6170 if (rc) { 6171 dev_err(&ctrl_info->pci_dev->dev, 6172 "failed to allocate operational queues\n"); 6173 return rc; 6174 } 6175 6176 pqi_init_operational_queues(ctrl_info); 6177 6178 rc = pqi_request_irqs(ctrl_info); 6179 if (rc) 6180 return rc; 6181 6182 rc = pqi_create_queues(ctrl_info); 6183 if (rc) 6184 return rc; 6185 6186 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6187 6188 ctrl_info->controller_online = true; 6189 pqi_start_heartbeat_timer(ctrl_info); 6190 6191 rc = pqi_enable_events(ctrl_info); 6192 if (rc) { 6193 dev_err(&ctrl_info->pci_dev->dev, 6194 "error enabling events\n"); 6195 return rc; 6196 } 6197 6198 /* Register with the SCSI subsystem. */ 6199 rc = pqi_register_scsi(ctrl_info); 6200 if (rc) 6201 return rc; 6202 6203 rc = pqi_get_ctrl_firmware_version(ctrl_info); 6204 if (rc) { 6205 dev_err(&ctrl_info->pci_dev->dev, 6206 "error obtaining firmware version\n"); 6207 return rc; 6208 } 6209 6210 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6211 if (rc) { 6212 dev_err(&ctrl_info->pci_dev->dev, 6213 "error updating host wellness\n"); 6214 return rc; 6215 } 6216 6217 pqi_schedule_update_time_worker(ctrl_info); 6218 6219 pqi_scan_scsi_devices(ctrl_info); 6220 6221 return 0; 6222 } 6223 6224 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 6225 { 6226 unsigned int i; 6227 struct pqi_admin_queues *admin_queues; 6228 struct pqi_event_queue *event_queue; 6229 6230 admin_queues = &ctrl_info->admin_queues; 6231 admin_queues->iq_pi_copy = 0; 6232 admin_queues->oq_ci_copy = 0; 6233 *admin_queues->oq_pi = 0; 6234 6235 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6236 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 6237 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 6238 ctrl_info->queue_groups[i].oq_ci_copy = 0; 6239 6240 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; 6241 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; 6242 *ctrl_info->queue_groups[i].oq_pi = 0; 6243 } 6244 6245 event_queue = &ctrl_info->event_queue; 6246 *event_queue->oq_pi = 0; 6247 event_queue->oq_ci_copy = 0; 6248 } 6249 6250 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 6251 { 6252 int rc; 6253 6254 rc = pqi_force_sis_mode(ctrl_info); 6255 if (rc) 6256 return rc; 6257 6258 /* 6259 * Wait until the controller is ready to start accepting SIS 6260 * commands. 6261 */ 6262 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 6263 if (rc) 6264 return rc; 6265 6266 /* 6267 * If the function we are about to call succeeds, the 6268 * controller will transition from legacy SIS mode 6269 * into PQI mode. 6270 */ 6271 rc = sis_init_base_struct_addr(ctrl_info); 6272 if (rc) { 6273 dev_err(&ctrl_info->pci_dev->dev, 6274 "error initializing PQI mode\n"); 6275 return rc; 6276 } 6277 6278 /* Wait for the controller to complete the SIS -> PQI transition. */ 6279 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6280 if (rc) { 6281 dev_err(&ctrl_info->pci_dev->dev, 6282 "transition to PQI mode failed\n"); 6283 return rc; 6284 } 6285 6286 /* From here on, we are running in PQI mode. */ 6287 ctrl_info->pqi_mode_enabled = true; 6288 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6289 6290 pqi_reinit_queues(ctrl_info); 6291 6292 rc = pqi_create_admin_queues(ctrl_info); 6293 if (rc) { 6294 dev_err(&ctrl_info->pci_dev->dev, 6295 "error creating admin queues\n"); 6296 return rc; 6297 } 6298 6299 rc = pqi_create_queues(ctrl_info); 6300 if (rc) 6301 return rc; 6302 6303 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6304 6305 ctrl_info->controller_online = true; 6306 pqi_start_heartbeat_timer(ctrl_info); 6307 pqi_ctrl_unblock_requests(ctrl_info); 6308 6309 rc = pqi_enable_events(ctrl_info); 6310 if (rc) { 6311 dev_err(&ctrl_info->pci_dev->dev, 6312 "error enabling events\n"); 6313 return rc; 6314 } 6315 6316 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6317 if (rc) { 6318 dev_err(&ctrl_info->pci_dev->dev, 6319 "error updating host wellness\n"); 6320 return rc; 6321 } 6322 6323 pqi_schedule_update_time_worker(ctrl_info); 6324 6325 pqi_scan_scsi_devices(ctrl_info); 6326 6327 return 0; 6328 } 6329 6330 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 6331 u16 timeout) 6332 { 6333 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 6334 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 6335 } 6336 6337 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 6338 { 6339 int rc; 6340 u64 mask; 6341 6342 rc = pci_enable_device(ctrl_info->pci_dev); 6343 if (rc) { 6344 dev_err(&ctrl_info->pci_dev->dev, 6345 "failed to enable PCI device\n"); 6346 return rc; 6347 } 6348 6349 if (sizeof(dma_addr_t) > 4) 6350 mask = DMA_BIT_MASK(64); 6351 else 6352 mask = DMA_BIT_MASK(32); 6353 6354 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 6355 if (rc) { 6356 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 6357 goto disable_device; 6358 } 6359 6360 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 6361 if (rc) { 6362 dev_err(&ctrl_info->pci_dev->dev, 6363 "failed to obtain PCI resources\n"); 6364 goto disable_device; 6365 } 6366 6367 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 6368 ctrl_info->pci_dev, 0), 6369 sizeof(struct pqi_ctrl_registers)); 6370 if (!ctrl_info->iomem_base) { 6371 dev_err(&ctrl_info->pci_dev->dev, 6372 "failed to map memory for controller registers\n"); 6373 rc = -ENOMEM; 6374 goto release_regions; 6375 } 6376 6377 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 6378 6379 /* Increase the PCIe completion timeout. */ 6380 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 6381 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 6382 if (rc) { 6383 dev_err(&ctrl_info->pci_dev->dev, 6384 "failed to set PCIe completion timeout\n"); 6385 goto release_regions; 6386 } 6387 6388 /* Enable bus mastering. */ 6389 pci_set_master(ctrl_info->pci_dev); 6390 6391 ctrl_info->registers = ctrl_info->iomem_base; 6392 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 6393 6394 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 6395 6396 return 0; 6397 6398 release_regions: 6399 pci_release_regions(ctrl_info->pci_dev); 6400 disable_device: 6401 pci_disable_device(ctrl_info->pci_dev); 6402 6403 return rc; 6404 } 6405 6406 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 6407 { 6408 iounmap(ctrl_info->iomem_base); 6409 pci_release_regions(ctrl_info->pci_dev); 6410 if (pci_is_enabled(ctrl_info->pci_dev)) 6411 pci_disable_device(ctrl_info->pci_dev); 6412 pci_set_drvdata(ctrl_info->pci_dev, NULL); 6413 } 6414 6415 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 6416 { 6417 struct pqi_ctrl_info *ctrl_info; 6418 6419 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 6420 GFP_KERNEL, numa_node); 6421 if (!ctrl_info) 6422 return NULL; 6423 6424 mutex_init(&ctrl_info->scan_mutex); 6425 mutex_init(&ctrl_info->lun_reset_mutex); 6426 6427 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 6428 spin_lock_init(&ctrl_info->scsi_device_list_lock); 6429 6430 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 6431 atomic_set(&ctrl_info->num_interrupts, 0); 6432 6433 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6434 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6435 6436 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 6437 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6438 6439 sema_init(&ctrl_info->sync_request_sem, 6440 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 6441 init_waitqueue_head(&ctrl_info->block_requests_wait); 6442 6443 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 6444 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 6445 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 6446 pqi_raid_bypass_retry_worker); 6447 6448 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 6449 ctrl_info->irq_mode = IRQ_MODE_NONE; 6450 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 6451 6452 return ctrl_info; 6453 } 6454 6455 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 6456 { 6457 kfree(ctrl_info); 6458 } 6459 6460 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 6461 { 6462 pqi_free_irqs(ctrl_info); 6463 pqi_disable_msix_interrupts(ctrl_info); 6464 } 6465 6466 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 6467 { 6468 pqi_stop_heartbeat_timer(ctrl_info); 6469 pqi_free_interrupts(ctrl_info); 6470 if (ctrl_info->queue_memory_base) 6471 dma_free_coherent(&ctrl_info->pci_dev->dev, 6472 ctrl_info->queue_memory_length, 6473 ctrl_info->queue_memory_base, 6474 ctrl_info->queue_memory_base_dma_handle); 6475 if (ctrl_info->admin_queue_memory_base) 6476 dma_free_coherent(&ctrl_info->pci_dev->dev, 6477 ctrl_info->admin_queue_memory_length, 6478 ctrl_info->admin_queue_memory_base, 6479 ctrl_info->admin_queue_memory_base_dma_handle); 6480 pqi_free_all_io_requests(ctrl_info); 6481 if (ctrl_info->error_buffer) 6482 dma_free_coherent(&ctrl_info->pci_dev->dev, 6483 ctrl_info->error_buffer_length, 6484 ctrl_info->error_buffer, 6485 ctrl_info->error_buffer_dma_handle); 6486 if (ctrl_info->iomem_base) 6487 pqi_cleanup_pci_init(ctrl_info); 6488 pqi_free_ctrl_info(ctrl_info); 6489 } 6490 6491 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 6492 { 6493 pqi_cancel_rescan_worker(ctrl_info); 6494 pqi_cancel_update_time_worker(ctrl_info); 6495 pqi_remove_all_scsi_devices(ctrl_info); 6496 pqi_unregister_scsi(ctrl_info); 6497 if (ctrl_info->pqi_mode_enabled) 6498 pqi_revert_to_sis_mode(ctrl_info); 6499 pqi_free_ctrl_resources(ctrl_info); 6500 } 6501 6502 static void pqi_perform_lockup_action(void) 6503 { 6504 switch (pqi_lockup_action) { 6505 case PANIC: 6506 panic("FATAL: Smart Family Controller lockup detected"); 6507 break; 6508 case REBOOT: 6509 emergency_restart(); 6510 break; 6511 case NONE: 6512 default: 6513 break; 6514 } 6515 } 6516 6517 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 6518 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 6519 .status = SAM_STAT_CHECK_CONDITION, 6520 }; 6521 6522 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 6523 { 6524 unsigned int i; 6525 struct pqi_io_request *io_request; 6526 struct scsi_cmnd *scmd; 6527 6528 for (i = 0; i < ctrl_info->max_io_slots; i++) { 6529 io_request = &ctrl_info->io_request_pool[i]; 6530 if (atomic_read(&io_request->refcount) == 0) 6531 continue; 6532 6533 scmd = io_request->scmd; 6534 if (scmd) { 6535 set_host_byte(scmd, DID_NO_CONNECT); 6536 } else { 6537 io_request->status = -ENXIO; 6538 io_request->error_info = 6539 &pqi_ctrl_offline_raid_error_info; 6540 } 6541 6542 io_request->io_complete_callback(io_request, 6543 io_request->context); 6544 } 6545 } 6546 6547 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 6548 { 6549 pqi_perform_lockup_action(); 6550 pqi_stop_heartbeat_timer(ctrl_info); 6551 pqi_free_interrupts(ctrl_info); 6552 pqi_cancel_rescan_worker(ctrl_info); 6553 pqi_cancel_update_time_worker(ctrl_info); 6554 pqi_ctrl_wait_until_quiesced(ctrl_info); 6555 pqi_fail_all_outstanding_requests(ctrl_info); 6556 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 6557 pqi_ctrl_unblock_requests(ctrl_info); 6558 } 6559 6560 static void pqi_ctrl_offline_worker(struct work_struct *work) 6561 { 6562 struct pqi_ctrl_info *ctrl_info; 6563 6564 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 6565 pqi_take_ctrl_offline_deferred(ctrl_info); 6566 } 6567 6568 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 6569 { 6570 if (!ctrl_info->controller_online) 6571 return; 6572 6573 ctrl_info->controller_online = false; 6574 ctrl_info->pqi_mode_enabled = false; 6575 pqi_ctrl_block_requests(ctrl_info); 6576 if (!pqi_disable_ctrl_shutdown) 6577 sis_shutdown_ctrl(ctrl_info); 6578 pci_disable_device(ctrl_info->pci_dev); 6579 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 6580 schedule_work(&ctrl_info->ctrl_offline_work); 6581 } 6582 6583 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 6584 const struct pci_device_id *id) 6585 { 6586 char *ctrl_description; 6587 6588 if (id->driver_data) 6589 ctrl_description = (char *)id->driver_data; 6590 else 6591 ctrl_description = "Microsemi Smart Family Controller"; 6592 6593 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 6594 } 6595 6596 static int pqi_pci_probe(struct pci_dev *pci_dev, 6597 const struct pci_device_id *id) 6598 { 6599 int rc; 6600 int node; 6601 struct pqi_ctrl_info *ctrl_info; 6602 6603 pqi_print_ctrl_info(pci_dev, id); 6604 6605 if (pqi_disable_device_id_wildcards && 6606 id->subvendor == PCI_ANY_ID && 6607 id->subdevice == PCI_ANY_ID) { 6608 dev_warn(&pci_dev->dev, 6609 "controller not probed because device ID wildcards are disabled\n"); 6610 return -ENODEV; 6611 } 6612 6613 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 6614 dev_warn(&pci_dev->dev, 6615 "controller device ID matched using wildcards\n"); 6616 6617 node = dev_to_node(&pci_dev->dev); 6618 if (node == NUMA_NO_NODE) 6619 set_dev_node(&pci_dev->dev, 0); 6620 6621 ctrl_info = pqi_alloc_ctrl_info(node); 6622 if (!ctrl_info) { 6623 dev_err(&pci_dev->dev, 6624 "failed to allocate controller info block\n"); 6625 return -ENOMEM; 6626 } 6627 6628 ctrl_info->pci_dev = pci_dev; 6629 6630 rc = pqi_pci_init(ctrl_info); 6631 if (rc) 6632 goto error; 6633 6634 rc = pqi_ctrl_init(ctrl_info); 6635 if (rc) 6636 goto error; 6637 6638 return 0; 6639 6640 error: 6641 pqi_remove_ctrl(ctrl_info); 6642 6643 return rc; 6644 } 6645 6646 static void pqi_pci_remove(struct pci_dev *pci_dev) 6647 { 6648 struct pqi_ctrl_info *ctrl_info; 6649 6650 ctrl_info = pci_get_drvdata(pci_dev); 6651 if (!ctrl_info) 6652 return; 6653 6654 pqi_remove_ctrl(ctrl_info); 6655 } 6656 6657 static void pqi_shutdown(struct pci_dev *pci_dev) 6658 { 6659 int rc; 6660 struct pqi_ctrl_info *ctrl_info; 6661 6662 ctrl_info = pci_get_drvdata(pci_dev); 6663 if (!ctrl_info) 6664 goto error; 6665 6666 /* 6667 * Write all data in the controller's battery-backed cache to 6668 * storage. 6669 */ 6670 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 6671 pqi_reset(ctrl_info); 6672 if (rc == 0) 6673 return; 6674 6675 error: 6676 dev_warn(&pci_dev->dev, 6677 "unable to flush controller cache\n"); 6678 } 6679 6680 static void pqi_process_lockup_action_param(void) 6681 { 6682 unsigned int i; 6683 6684 if (!pqi_lockup_action_param) 6685 return; 6686 6687 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6688 if (strcmp(pqi_lockup_action_param, 6689 pqi_lockup_actions[i].name) == 0) { 6690 pqi_lockup_action = pqi_lockup_actions[i].action; 6691 return; 6692 } 6693 } 6694 6695 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 6696 DRIVER_NAME_SHORT, pqi_lockup_action_param); 6697 } 6698 6699 static void pqi_process_module_params(void) 6700 { 6701 pqi_process_lockup_action_param(); 6702 } 6703 6704 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 6705 { 6706 struct pqi_ctrl_info *ctrl_info; 6707 6708 ctrl_info = pci_get_drvdata(pci_dev); 6709 6710 pqi_disable_events(ctrl_info); 6711 pqi_cancel_update_time_worker(ctrl_info); 6712 pqi_cancel_rescan_worker(ctrl_info); 6713 pqi_wait_until_scan_finished(ctrl_info); 6714 pqi_wait_until_lun_reset_finished(ctrl_info); 6715 pqi_flush_cache(ctrl_info, SUSPEND); 6716 pqi_ctrl_block_requests(ctrl_info); 6717 pqi_ctrl_wait_until_quiesced(ctrl_info); 6718 pqi_wait_until_inbound_queues_empty(ctrl_info); 6719 pqi_ctrl_wait_for_pending_io(ctrl_info); 6720 pqi_stop_heartbeat_timer(ctrl_info); 6721 6722 if (state.event == PM_EVENT_FREEZE) 6723 return 0; 6724 6725 pci_save_state(pci_dev); 6726 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 6727 6728 ctrl_info->controller_online = false; 6729 ctrl_info->pqi_mode_enabled = false; 6730 6731 return 0; 6732 } 6733 6734 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 6735 { 6736 int rc; 6737 struct pqi_ctrl_info *ctrl_info; 6738 6739 ctrl_info = pci_get_drvdata(pci_dev); 6740 6741 if (pci_dev->current_state != PCI_D0) { 6742 ctrl_info->max_hw_queue_index = 0; 6743 pqi_free_interrupts(ctrl_info); 6744 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 6745 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 6746 IRQF_SHARED, DRIVER_NAME_SHORT, 6747 &ctrl_info->queue_groups[0]); 6748 if (rc) { 6749 dev_err(&ctrl_info->pci_dev->dev, 6750 "irq %u init failed with error %d\n", 6751 pci_dev->irq, rc); 6752 return rc; 6753 } 6754 pqi_start_heartbeat_timer(ctrl_info); 6755 pqi_ctrl_unblock_requests(ctrl_info); 6756 return 0; 6757 } 6758 6759 pci_set_power_state(pci_dev, PCI_D0); 6760 pci_restore_state(pci_dev); 6761 6762 return pqi_ctrl_init_resume(ctrl_info); 6763 } 6764 6765 /* Define the PCI IDs for the controllers that we support. */ 6766 static const struct pci_device_id pqi_pci_id_table[] = { 6767 { 6768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6769 0x105b, 0x1211) 6770 }, 6771 { 6772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6773 0x105b, 0x1321) 6774 }, 6775 { 6776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6777 0x152d, 0x8a22) 6778 }, 6779 { 6780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6781 0x152d, 0x8a23) 6782 }, 6783 { 6784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6785 0x152d, 0x8a24) 6786 }, 6787 { 6788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6789 0x152d, 0x8a36) 6790 }, 6791 { 6792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6793 0x152d, 0x8a37) 6794 }, 6795 { 6796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6797 0x193d, 0x8460) 6798 }, 6799 { 6800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6801 0x193d, 0x8461) 6802 }, 6803 { 6804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6805 0x193d, 0xf460) 6806 }, 6807 { 6808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6809 0x193d, 0xf461) 6810 }, 6811 { 6812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6813 0x1bd4, 0x0045) 6814 }, 6815 { 6816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6817 0x1bd4, 0x0046) 6818 }, 6819 { 6820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6821 0x1bd4, 0x0047) 6822 }, 6823 { 6824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6825 0x1bd4, 0x0048) 6826 }, 6827 { 6828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6829 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 6830 }, 6831 { 6832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6833 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 6834 }, 6835 { 6836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6837 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 6838 }, 6839 { 6840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6841 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 6842 }, 6843 { 6844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6845 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 6846 }, 6847 { 6848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6849 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 6850 }, 6851 { 6852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6853 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 6854 }, 6855 { 6856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6857 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 6858 }, 6859 { 6860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6861 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 6862 }, 6863 { 6864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6865 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 6866 }, 6867 { 6868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6869 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 6870 }, 6871 { 6872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6873 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 6874 }, 6875 { 6876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6877 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 6878 }, 6879 { 6880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6881 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 6882 }, 6883 { 6884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6885 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 6886 }, 6887 { 6888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6889 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 6890 }, 6891 { 6892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6893 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 6894 }, 6895 { 6896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6897 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 6898 }, 6899 { 6900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6901 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 6902 }, 6903 { 6904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6905 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 6906 }, 6907 { 6908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6909 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 6910 }, 6911 { 6912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6913 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 6914 }, 6915 { 6916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6917 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 6918 }, 6919 { 6920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6921 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 6922 }, 6923 { 6924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6925 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 6926 }, 6927 { 6928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6929 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 6930 }, 6931 { 6932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6933 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 6934 }, 6935 { 6936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6937 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 6938 }, 6939 { 6940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6941 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 6942 }, 6943 { 6944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6945 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 6946 }, 6947 { 6948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6949 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 6950 }, 6951 { 6952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6953 PCI_VENDOR_ID_DELL, 0x1fe0) 6954 }, 6955 { 6956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6957 PCI_VENDOR_ID_HP, 0x0600) 6958 }, 6959 { 6960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6961 PCI_VENDOR_ID_HP, 0x0601) 6962 }, 6963 { 6964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6965 PCI_VENDOR_ID_HP, 0x0602) 6966 }, 6967 { 6968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6969 PCI_VENDOR_ID_HP, 0x0603) 6970 }, 6971 { 6972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6973 PCI_VENDOR_ID_HP, 0x0609) 6974 }, 6975 { 6976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6977 PCI_VENDOR_ID_HP, 0x0650) 6978 }, 6979 { 6980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6981 PCI_VENDOR_ID_HP, 0x0651) 6982 }, 6983 { 6984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6985 PCI_VENDOR_ID_HP, 0x0652) 6986 }, 6987 { 6988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6989 PCI_VENDOR_ID_HP, 0x0653) 6990 }, 6991 { 6992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6993 PCI_VENDOR_ID_HP, 0x0654) 6994 }, 6995 { 6996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6997 PCI_VENDOR_ID_HP, 0x0655) 6998 }, 6999 { 7000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7001 PCI_VENDOR_ID_HP, 0x0700) 7002 }, 7003 { 7004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7005 PCI_VENDOR_ID_HP, 0x0701) 7006 }, 7007 { 7008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7009 PCI_VENDOR_ID_HP, 0x1001) 7010 }, 7011 { 7012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7013 PCI_VENDOR_ID_HP, 0x1100) 7014 }, 7015 { 7016 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7017 PCI_VENDOR_ID_HP, 0x1101) 7018 }, 7019 { 7020 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7021 PCI_ANY_ID, PCI_ANY_ID) 7022 }, 7023 { 0 } 7024 }; 7025 7026 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 7027 7028 static struct pci_driver pqi_pci_driver = { 7029 .name = DRIVER_NAME_SHORT, 7030 .id_table = pqi_pci_id_table, 7031 .probe = pqi_pci_probe, 7032 .remove = pqi_pci_remove, 7033 .shutdown = pqi_shutdown, 7034 #if defined(CONFIG_PM) 7035 .suspend = pqi_suspend, 7036 .resume = pqi_resume, 7037 #endif 7038 }; 7039 7040 static int __init pqi_init(void) 7041 { 7042 int rc; 7043 7044 pr_info(DRIVER_NAME "\n"); 7045 7046 pqi_sas_transport_template = 7047 sas_attach_transport(&pqi_sas_transport_functions); 7048 if (!pqi_sas_transport_template) 7049 return -ENODEV; 7050 7051 pqi_process_module_params(); 7052 7053 rc = pci_register_driver(&pqi_pci_driver); 7054 if (rc) 7055 sas_release_transport(pqi_sas_transport_template); 7056 7057 return rc; 7058 } 7059 7060 static void __exit pqi_cleanup(void) 7061 { 7062 pci_unregister_driver(&pqi_pci_driver); 7063 sas_release_transport(pqi_sas_transport_template); 7064 } 7065 7066 module_init(pqi_init); 7067 module_exit(pqi_cleanup); 7068 7069 static void __attribute__((unused)) verify_structures(void) 7070 { 7071 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7072 sis_host_to_ctrl_doorbell) != 0x20); 7073 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7074 sis_interrupt_mask) != 0x34); 7075 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7076 sis_ctrl_to_host_doorbell) != 0x9c); 7077 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7078 sis_ctrl_to_host_doorbell_clear) != 0xa0); 7079 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7080 sis_driver_scratch) != 0xb0); 7081 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7082 sis_firmware_status) != 0xbc); 7083 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7084 sis_mailbox) != 0x1000); 7085 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7086 pqi_registers) != 0x4000); 7087 7088 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7089 iu_type) != 0x0); 7090 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7091 iu_length) != 0x2); 7092 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7093 response_queue_id) != 0x4); 7094 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7095 work_area) != 0x6); 7096 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 7097 7098 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7099 status) != 0x0); 7100 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7101 service_response) != 0x1); 7102 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7103 data_present) != 0x2); 7104 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7105 reserved) != 0x3); 7106 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7107 residual_count) != 0x4); 7108 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7109 data_length) != 0x8); 7110 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7111 reserved1) != 0xa); 7112 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7113 data) != 0xc); 7114 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 7115 7116 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7117 data_in_result) != 0x0); 7118 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7119 data_out_result) != 0x1); 7120 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7121 reserved) != 0x2); 7122 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7123 status) != 0x5); 7124 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7125 status_qualifier) != 0x6); 7126 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7127 sense_data_length) != 0x8); 7128 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7129 response_data_length) != 0xa); 7130 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7131 data_in_transferred) != 0xc); 7132 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7133 data_out_transferred) != 0x10); 7134 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7135 data) != 0x14); 7136 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 7137 7138 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7139 signature) != 0x0); 7140 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7141 function_and_status_code) != 0x8); 7142 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7143 max_admin_iq_elements) != 0x10); 7144 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7145 max_admin_oq_elements) != 0x11); 7146 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7147 admin_iq_element_length) != 0x12); 7148 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7149 admin_oq_element_length) != 0x13); 7150 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7151 max_reset_timeout) != 0x14); 7152 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7153 legacy_intx_status) != 0x18); 7154 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7155 legacy_intx_mask_set) != 0x1c); 7156 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7157 legacy_intx_mask_clear) != 0x20); 7158 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7159 device_status) != 0x40); 7160 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7161 admin_iq_pi_offset) != 0x48); 7162 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7163 admin_oq_ci_offset) != 0x50); 7164 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7165 admin_iq_element_array_addr) != 0x58); 7166 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7167 admin_oq_element_array_addr) != 0x60); 7168 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7169 admin_iq_ci_addr) != 0x68); 7170 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7171 admin_oq_pi_addr) != 0x70); 7172 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7173 admin_iq_num_elements) != 0x78); 7174 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7175 admin_oq_num_elements) != 0x79); 7176 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7177 admin_queue_int_msg_num) != 0x7a); 7178 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7179 device_error) != 0x80); 7180 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7181 error_details) != 0x88); 7182 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7183 device_reset) != 0x90); 7184 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7185 power_action) != 0x94); 7186 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 7187 7188 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7189 header.iu_type) != 0); 7190 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7191 header.iu_length) != 2); 7192 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7193 header.work_area) != 6); 7194 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7195 request_id) != 8); 7196 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7197 function_code) != 10); 7198 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7199 data.report_device_capability.buffer_length) != 44); 7200 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7201 data.report_device_capability.sg_descriptor) != 48); 7202 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7203 data.create_operational_iq.queue_id) != 12); 7204 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7205 data.create_operational_iq.element_array_addr) != 16); 7206 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7207 data.create_operational_iq.ci_addr) != 24); 7208 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7209 data.create_operational_iq.num_elements) != 32); 7210 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7211 data.create_operational_iq.element_length) != 34); 7212 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7213 data.create_operational_iq.queue_protocol) != 36); 7214 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7215 data.create_operational_oq.queue_id) != 12); 7216 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7217 data.create_operational_oq.element_array_addr) != 16); 7218 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7219 data.create_operational_oq.pi_addr) != 24); 7220 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7221 data.create_operational_oq.num_elements) != 32); 7222 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7223 data.create_operational_oq.element_length) != 34); 7224 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7225 data.create_operational_oq.queue_protocol) != 36); 7226 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7227 data.create_operational_oq.int_msg_num) != 40); 7228 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7229 data.create_operational_oq.coalescing_count) != 42); 7230 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7231 data.create_operational_oq.min_coalescing_time) != 44); 7232 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7233 data.create_operational_oq.max_coalescing_time) != 48); 7234 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7235 data.delete_operational_queue.queue_id) != 12); 7236 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 7237 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7238 data.create_operational_iq) != 64 - 11); 7239 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7240 data.create_operational_oq) != 64 - 11); 7241 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7242 data.delete_operational_queue) != 64 - 11); 7243 7244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7245 header.iu_type) != 0); 7246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7247 header.iu_length) != 2); 7248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7249 header.work_area) != 6); 7250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7251 request_id) != 8); 7252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7253 function_code) != 10); 7254 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7255 status) != 11); 7256 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7257 data.create_operational_iq.status_descriptor) != 12); 7258 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7259 data.create_operational_iq.iq_pi_offset) != 16); 7260 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7261 data.create_operational_oq.status_descriptor) != 12); 7262 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7263 data.create_operational_oq.oq_ci_offset) != 16); 7264 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 7265 7266 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7267 header.iu_type) != 0); 7268 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7269 header.iu_length) != 2); 7270 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7271 header.response_queue_id) != 4); 7272 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7273 header.work_area) != 6); 7274 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7275 request_id) != 8); 7276 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7277 nexus_id) != 10); 7278 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7279 buffer_length) != 12); 7280 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7281 lun_number) != 16); 7282 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7283 protocol_specific) != 24); 7284 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7285 error_index) != 27); 7286 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7287 cdb) != 32); 7288 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7289 sg_descriptors) != 64); 7290 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 7291 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7292 7293 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7294 header.iu_type) != 0); 7295 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7296 header.iu_length) != 2); 7297 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7298 header.response_queue_id) != 4); 7299 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7300 header.work_area) != 6); 7301 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7302 request_id) != 8); 7303 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7304 nexus_id) != 12); 7305 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7306 buffer_length) != 16); 7307 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7308 data_encryption_key_index) != 22); 7309 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7310 encrypt_tweak_lower) != 24); 7311 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7312 encrypt_tweak_upper) != 28); 7313 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7314 cdb) != 32); 7315 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7316 error_index) != 48); 7317 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7318 num_sg_descriptors) != 50); 7319 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7320 cdb_length) != 51); 7321 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7322 lun_number) != 52); 7323 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7324 sg_descriptors) != 64); 7325 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 7326 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7327 7328 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7329 header.iu_type) != 0); 7330 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7331 header.iu_length) != 2); 7332 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7333 request_id) != 8); 7334 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7335 error_index) != 10); 7336 7337 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7338 header.iu_type) != 0); 7339 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7340 header.iu_length) != 2); 7341 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7342 header.response_queue_id) != 4); 7343 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7344 request_id) != 8); 7345 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7346 data.report_event_configuration.buffer_length) != 12); 7347 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7348 data.report_event_configuration.sg_descriptors) != 16); 7349 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7350 data.set_event_configuration.global_event_oq_id) != 10); 7351 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7352 data.set_event_configuration.buffer_length) != 12); 7353 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7354 data.set_event_configuration.sg_descriptors) != 16); 7355 7356 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7357 max_inbound_iu_length) != 6); 7358 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7359 max_outbound_iu_length) != 14); 7360 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 7361 7362 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7363 data_length) != 0); 7364 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7365 iq_arbitration_priority_support_bitmask) != 8); 7366 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7367 maximum_aw_a) != 9); 7368 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7369 maximum_aw_b) != 10); 7370 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7371 maximum_aw_c) != 11); 7372 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7373 max_inbound_queues) != 16); 7374 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7375 max_elements_per_iq) != 18); 7376 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7377 max_iq_element_length) != 24); 7378 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7379 min_iq_element_length) != 26); 7380 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7381 max_outbound_queues) != 30); 7382 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7383 max_elements_per_oq) != 32); 7384 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7385 intr_coalescing_time_granularity) != 34); 7386 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7387 max_oq_element_length) != 36); 7388 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7389 min_oq_element_length) != 38); 7390 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7391 iu_layer_descriptors) != 64); 7392 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 7393 7394 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7395 event_type) != 0); 7396 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7397 oq_id) != 2); 7398 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 7399 7400 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7401 num_event_descriptors) != 2); 7402 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7403 descriptors) != 4); 7404 7405 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 7406 ARRAY_SIZE(pqi_supported_event_types)); 7407 7408 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7409 header.iu_type) != 0); 7410 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7411 header.iu_length) != 2); 7412 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7413 event_type) != 8); 7414 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7415 event_id) != 10); 7416 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7417 additional_event_id) != 12); 7418 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7419 data) != 16); 7420 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 7421 7422 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7423 header.iu_type) != 0); 7424 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7425 header.iu_length) != 2); 7426 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7427 event_type) != 8); 7428 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7429 event_id) != 10); 7430 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7431 additional_event_id) != 12); 7432 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 7433 7434 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7435 header.iu_type) != 0); 7436 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7437 header.iu_length) != 2); 7438 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7439 request_id) != 8); 7440 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7441 nexus_id) != 10); 7442 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7443 lun_number) != 16); 7444 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7445 protocol_specific) != 24); 7446 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7447 outbound_queue_id_to_manage) != 26); 7448 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7449 request_id_to_manage) != 28); 7450 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7451 task_management_function) != 30); 7452 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 7453 7454 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7455 header.iu_type) != 0); 7456 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7457 header.iu_length) != 2); 7458 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7459 request_id) != 8); 7460 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7461 nexus_id) != 10); 7462 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7463 additional_response_info) != 12); 7464 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7465 response_code) != 15); 7466 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 7467 7468 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7469 configured_logical_drive_count) != 0); 7470 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7471 configuration_signature) != 1); 7472 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7473 firmware_version) != 5); 7474 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7475 extended_logical_unit_count) != 154); 7476 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7477 firmware_build_number) != 190); 7478 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7479 controller_mode) != 292); 7480 7481 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7482 phys_bay_in_box) != 115); 7483 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7484 device_type) != 120); 7485 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7486 redundant_path_present_map) != 1736); 7487 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7488 active_path_number) != 1738); 7489 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7490 alternate_paths_phys_connector) != 1739); 7491 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7492 alternate_paths_phys_box_on_port) != 1755); 7493 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7494 current_queue_depth_limit) != 1796); 7495 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 7496 7497 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 7498 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 7499 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 7500 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7501 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 7502 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7503 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 7504 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 7505 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7506 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 7507 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 7508 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7509 7510 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 7511 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 7512 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 7513 } 7514