1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.0.4-100" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 0 46 #define DRIVER_RELEASE 4 47 #define DRIVER_REVISION 100 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 78 /* for flags argument to pqi_submit_raid_request_synchronous() */ 79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 80 81 static struct scsi_transport_template *pqi_sas_transport_template; 82 83 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 84 85 enum pqi_lockup_action { 86 NONE, 87 REBOOT, 88 PANIC 89 }; 90 91 static enum pqi_lockup_action pqi_lockup_action = NONE; 92 93 static struct { 94 enum pqi_lockup_action action; 95 char *name; 96 } pqi_lockup_actions[] = { 97 { 98 .action = NONE, 99 .name = "none", 100 }, 101 { 102 .action = REBOOT, 103 .name = "reboot", 104 }, 105 { 106 .action = PANIC, 107 .name = "panic", 108 }, 109 }; 110 111 static unsigned int pqi_supported_event_types[] = { 112 PQI_EVENT_TYPE_HOTPLUG, 113 PQI_EVENT_TYPE_HARDWARE, 114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 118 }; 119 120 static int pqi_disable_device_id_wildcards; 121 module_param_named(disable_device_id_wildcards, 122 pqi_disable_device_id_wildcards, int, 0644); 123 MODULE_PARM_DESC(disable_device_id_wildcards, 124 "Disable device ID wildcards."); 125 126 static int pqi_disable_heartbeat; 127 module_param_named(disable_heartbeat, 128 pqi_disable_heartbeat, int, 0644); 129 MODULE_PARM_DESC(disable_heartbeat, 130 "Disable heartbeat."); 131 132 static int pqi_disable_ctrl_shutdown; 133 module_param_named(disable_ctrl_shutdown, 134 pqi_disable_ctrl_shutdown, int, 0644); 135 MODULE_PARM_DESC(disable_ctrl_shutdown, 136 "Disable controller shutdown when controller locked up."); 137 138 static char *pqi_lockup_action_param; 139 module_param_named(lockup_action, 140 pqi_lockup_action_param, charp, 0644); 141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 142 "\t\tSupported: none, reboot, panic\n" 143 "\t\tDefault: none"); 144 145 static char *raid_levels[] = { 146 "RAID-0", 147 "RAID-4", 148 "RAID-1(1+0)", 149 "RAID-5", 150 "RAID-5+1", 151 "RAID-ADG", 152 "RAID-1(ADM)", 153 }; 154 155 static char *pqi_raid_level_to_string(u8 raid_level) 156 { 157 if (raid_level < ARRAY_SIZE(raid_levels)) 158 return raid_levels[raid_level]; 159 160 return "RAID UNKNOWN"; 161 } 162 163 #define SA_RAID_0 0 164 #define SA_RAID_4 1 165 #define SA_RAID_1 2 /* also used for RAID 10 */ 166 #define SA_RAID_5 3 /* also used for RAID 50 */ 167 #define SA_RAID_51 4 168 #define SA_RAID_6 5 /* also used for RAID 60 */ 169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 170 #define SA_RAID_MAX SA_RAID_ADM 171 #define SA_RAID_UNKNOWN 0xff 172 173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 174 { 175 pqi_prep_for_scsi_done(scmd); 176 scmd->scsi_done(scmd); 177 } 178 179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 180 { 181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 182 } 183 184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 185 { 186 void *hostdata = shost_priv(shost); 187 188 return *((struct pqi_ctrl_info **)hostdata); 189 } 190 191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 192 { 193 return !device->is_physical_device; 194 } 195 196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 197 { 198 return scsi3addr[2] != 0; 199 } 200 201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 202 { 203 return !ctrl_info->controller_online; 204 } 205 206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 207 { 208 if (ctrl_info->controller_online) 209 if (!sis_is_firmware_running(ctrl_info)) 210 pqi_take_ctrl_offline(ctrl_info); 211 } 212 213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 214 { 215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 216 } 217 218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 219 struct pqi_ctrl_info *ctrl_info) 220 { 221 return sis_read_driver_scratch(ctrl_info); 222 } 223 224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 225 enum pqi_ctrl_mode mode) 226 { 227 sis_write_driver_scratch(ctrl_info, mode); 228 } 229 230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 231 { 232 ctrl_info->block_requests = true; 233 scsi_block_requests(ctrl_info->scsi_host); 234 } 235 236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 237 { 238 ctrl_info->block_requests = false; 239 wake_up_all(&ctrl_info->block_requests_wait); 240 pqi_retry_raid_bypass_requests(ctrl_info); 241 scsi_unblock_requests(ctrl_info->scsi_host); 242 } 243 244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 245 { 246 return ctrl_info->block_requests; 247 } 248 249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 250 unsigned long timeout_msecs) 251 { 252 unsigned long remaining_msecs; 253 254 if (!pqi_ctrl_blocked(ctrl_info)) 255 return timeout_msecs; 256 257 atomic_inc(&ctrl_info->num_blocked_threads); 258 259 if (timeout_msecs == NO_TIMEOUT) { 260 wait_event(ctrl_info->block_requests_wait, 261 !pqi_ctrl_blocked(ctrl_info)); 262 remaining_msecs = timeout_msecs; 263 } else { 264 unsigned long remaining_jiffies; 265 266 remaining_jiffies = 267 wait_event_timeout(ctrl_info->block_requests_wait, 268 !pqi_ctrl_blocked(ctrl_info), 269 msecs_to_jiffies(timeout_msecs)); 270 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 271 } 272 273 atomic_dec(&ctrl_info->num_blocked_threads); 274 275 return remaining_msecs; 276 } 277 278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 279 { 280 atomic_inc(&ctrl_info->num_busy_threads); 281 } 282 283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 284 { 285 atomic_dec(&ctrl_info->num_busy_threads); 286 } 287 288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 289 { 290 while (atomic_read(&ctrl_info->num_busy_threads) > 291 atomic_read(&ctrl_info->num_blocked_threads)) 292 usleep_range(1000, 2000); 293 } 294 295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 296 { 297 return device->device_offline; 298 } 299 300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 301 { 302 device->in_reset = true; 303 } 304 305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 306 { 307 device->in_reset = false; 308 } 309 310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 311 { 312 return device->in_reset; 313 } 314 315 static inline void pqi_schedule_rescan_worker_with_delay( 316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 317 { 318 if (pqi_ctrl_offline(ctrl_info)) 319 return; 320 321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 322 } 323 324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 325 { 326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 327 } 328 329 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 330 331 static inline void pqi_schedule_rescan_worker_delayed( 332 struct pqi_ctrl_info *ctrl_info) 333 { 334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 335 } 336 337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 338 { 339 cancel_delayed_work_sync(&ctrl_info->rescan_work); 340 } 341 342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 343 { 344 if (!ctrl_info->heartbeat_counter) 345 return 0; 346 347 return readl(ctrl_info->heartbeat_counter); 348 } 349 350 static int pqi_map_single(struct pci_dev *pci_dev, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 352 size_t buffer_length, int data_direction) 353 { 354 dma_addr_t bus_address; 355 356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE) 357 return 0; 358 359 bus_address = pci_map_single(pci_dev, buffer, buffer_length, 360 data_direction); 361 if (pci_dma_mapping_error(pci_dev, bus_address)) 362 return -ENOMEM; 363 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 365 put_unaligned_le32(buffer_length, &sg_descriptor->length); 366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 367 368 return 0; 369 } 370 371 static void pqi_pci_unmap(struct pci_dev *pci_dev, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 373 int data_direction) 374 { 375 int i; 376 377 if (data_direction == PCI_DMA_NONE) 378 return; 379 380 for (i = 0; i < num_descriptors; i++) 381 pci_unmap_single(pci_dev, 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 383 get_unaligned_le32(&descriptors[i].length), 384 data_direction); 385 } 386 387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 388 struct pqi_raid_path_request *request, u8 cmd, 389 u8 *scsi3addr, void *buffer, size_t buffer_length, 390 u16 vpd_page, int *pci_direction) 391 { 392 u8 *cdb; 393 int pci_dir; 394 395 memset(request, 0, sizeof(*request)); 396 397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 398 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 400 &request->header.iu_length); 401 put_unaligned_le32(buffer_length, &request->buffer_length); 402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 405 406 cdb = request->cdb; 407 408 switch (cmd) { 409 case INQUIRY: 410 request->data_direction = SOP_READ_FLAG; 411 cdb[0] = INQUIRY; 412 if (vpd_page & VPD_PAGE) { 413 cdb[1] = 0x1; 414 cdb[2] = (u8)vpd_page; 415 } 416 cdb[4] = (u8)buffer_length; 417 break; 418 case CISS_REPORT_LOG: 419 case CISS_REPORT_PHYS: 420 request->data_direction = SOP_READ_FLAG; 421 cdb[0] = cmd; 422 if (cmd == CISS_REPORT_PHYS) 423 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 424 else 425 cdb[1] = CISS_REPORT_LOG_EXTENDED; 426 put_unaligned_be32(buffer_length, &cdb[6]); 427 break; 428 case CISS_GET_RAID_MAP: 429 request->data_direction = SOP_READ_FLAG; 430 cdb[0] = CISS_READ; 431 cdb[1] = CISS_GET_RAID_MAP; 432 put_unaligned_be32(buffer_length, &cdb[6]); 433 break; 434 case SA_CACHE_FLUSH: 435 request->data_direction = SOP_WRITE_FLAG; 436 cdb[0] = BMIC_WRITE; 437 cdb[6] = BMIC_CACHE_FLUSH; 438 put_unaligned_be16(buffer_length, &cdb[7]); 439 break; 440 case BMIC_IDENTIFY_CONTROLLER: 441 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 442 request->data_direction = SOP_READ_FLAG; 443 cdb[0] = BMIC_READ; 444 cdb[6] = cmd; 445 put_unaligned_be16(buffer_length, &cdb[7]); 446 break; 447 case BMIC_WRITE_HOST_WELLNESS: 448 request->data_direction = SOP_WRITE_FLAG; 449 cdb[0] = BMIC_WRITE; 450 cdb[6] = cmd; 451 put_unaligned_be16(buffer_length, &cdb[7]); 452 break; 453 default: 454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 455 cmd); 456 break; 457 } 458 459 switch (request->data_direction) { 460 case SOP_READ_FLAG: 461 pci_dir = PCI_DMA_FROMDEVICE; 462 break; 463 case SOP_WRITE_FLAG: 464 pci_dir = PCI_DMA_TODEVICE; 465 break; 466 case SOP_NO_DIRECTION_FLAG: 467 pci_dir = PCI_DMA_NONE; 468 break; 469 default: 470 pci_dir = PCI_DMA_BIDIRECTIONAL; 471 break; 472 } 473 474 *pci_direction = pci_dir; 475 476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 477 buffer, buffer_length, pci_dir); 478 } 479 480 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 481 { 482 io_request->scmd = NULL; 483 io_request->status = 0; 484 io_request->error_info = NULL; 485 io_request->raid_bypass = false; 486 } 487 488 static struct pqi_io_request *pqi_alloc_io_request( 489 struct pqi_ctrl_info *ctrl_info) 490 { 491 struct pqi_io_request *io_request; 492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 493 494 while (1) { 495 io_request = &ctrl_info->io_request_pool[i]; 496 if (atomic_inc_return(&io_request->refcount) == 1) 497 break; 498 atomic_dec(&io_request->refcount); 499 i = (i + 1) % ctrl_info->max_io_slots; 500 } 501 502 /* benignly racy */ 503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 504 505 pqi_reinit_io_request(io_request); 506 507 return io_request; 508 } 509 510 static void pqi_free_io_request(struct pqi_io_request *io_request) 511 { 512 atomic_dec(&io_request->refcount); 513 } 514 515 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 516 struct bmic_identify_controller *buffer) 517 { 518 int rc; 519 int pci_direction; 520 struct pqi_raid_path_request request; 521 522 rc = pqi_build_raid_path_request(ctrl_info, &request, 523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 524 sizeof(*buffer), 0, &pci_direction); 525 if (rc) 526 return rc; 527 528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 529 NULL, NO_TIMEOUT); 530 531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 532 pci_direction); 533 534 return rc; 535 } 536 537 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 539 { 540 int rc; 541 int pci_direction; 542 struct pqi_raid_path_request request; 543 544 rc = pqi_build_raid_path_request(ctrl_info, &request, 545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 546 &pci_direction); 547 if (rc) 548 return rc; 549 550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 551 NULL, NO_TIMEOUT); 552 553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 554 pci_direction); 555 556 return rc; 557 } 558 559 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 560 struct pqi_scsi_dev *device, 561 struct bmic_identify_physical_device *buffer, 562 size_t buffer_length) 563 { 564 int rc; 565 int pci_direction; 566 u16 bmic_device_index; 567 struct pqi_raid_path_request request; 568 569 rc = pqi_build_raid_path_request(ctrl_info, &request, 570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 571 buffer_length, 0, &pci_direction); 572 if (rc) 573 return rc; 574 575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 576 request.cdb[2] = (u8)bmic_device_index; 577 request.cdb[9] = (u8)(bmic_device_index >> 8); 578 579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 580 0, NULL, NO_TIMEOUT); 581 582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 583 pci_direction); 584 585 return rc; 586 } 587 588 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4 589 590 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info) 591 { 592 int rc; 593 struct pqi_raid_path_request request; 594 int pci_direction; 595 u8 *buffer; 596 597 /* 598 * Don't bother trying to flush the cache if the controller is 599 * locked up. 600 */ 601 if (pqi_ctrl_offline(ctrl_info)) 602 return -ENXIO; 603 604 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL); 605 if (!buffer) 606 return -ENOMEM; 607 608 rc = pqi_build_raid_path_request(ctrl_info, &request, 609 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer, 610 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction); 611 if (rc) 612 goto out; 613 614 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 615 0, NULL, NO_TIMEOUT); 616 617 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 618 pci_direction); 619 620 out: 621 kfree(buffer); 622 623 return rc; 624 } 625 626 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 627 void *buffer, size_t buffer_length) 628 { 629 int rc; 630 struct pqi_raid_path_request request; 631 int pci_direction; 632 633 rc = pqi_build_raid_path_request(ctrl_info, &request, 634 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 635 buffer_length, 0, &pci_direction); 636 if (rc) 637 return rc; 638 639 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 640 0, NULL, NO_TIMEOUT); 641 642 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 643 pci_direction); 644 645 return rc; 646 } 647 648 #pragma pack(1) 649 650 struct bmic_host_wellness_driver_version { 651 u8 start_tag[4]; 652 u8 driver_version_tag[2]; 653 __le16 driver_version_length; 654 char driver_version[32]; 655 u8 end_tag[2]; 656 }; 657 658 #pragma pack() 659 660 static int pqi_write_driver_version_to_host_wellness( 661 struct pqi_ctrl_info *ctrl_info) 662 { 663 int rc; 664 struct bmic_host_wellness_driver_version *buffer; 665 size_t buffer_length; 666 667 buffer_length = sizeof(*buffer); 668 669 buffer = kmalloc(buffer_length, GFP_KERNEL); 670 if (!buffer) 671 return -ENOMEM; 672 673 buffer->start_tag[0] = '<'; 674 buffer->start_tag[1] = 'H'; 675 buffer->start_tag[2] = 'W'; 676 buffer->start_tag[3] = '>'; 677 buffer->driver_version_tag[0] = 'D'; 678 buffer->driver_version_tag[1] = 'V'; 679 put_unaligned_le16(sizeof(buffer->driver_version), 680 &buffer->driver_version_length); 681 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 682 sizeof(buffer->driver_version) - 1); 683 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 684 buffer->end_tag[0] = 'Z'; 685 buffer->end_tag[1] = 'Z'; 686 687 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 688 689 kfree(buffer); 690 691 return rc; 692 } 693 694 #pragma pack(1) 695 696 struct bmic_host_wellness_time { 697 u8 start_tag[4]; 698 u8 time_tag[2]; 699 __le16 time_length; 700 u8 time[8]; 701 u8 dont_write_tag[2]; 702 u8 end_tag[2]; 703 }; 704 705 #pragma pack() 706 707 static int pqi_write_current_time_to_host_wellness( 708 struct pqi_ctrl_info *ctrl_info) 709 { 710 int rc; 711 struct bmic_host_wellness_time *buffer; 712 size_t buffer_length; 713 time64_t local_time; 714 unsigned int year; 715 struct tm tm; 716 717 buffer_length = sizeof(*buffer); 718 719 buffer = kmalloc(buffer_length, GFP_KERNEL); 720 if (!buffer) 721 return -ENOMEM; 722 723 buffer->start_tag[0] = '<'; 724 buffer->start_tag[1] = 'H'; 725 buffer->start_tag[2] = 'W'; 726 buffer->start_tag[3] = '>'; 727 buffer->time_tag[0] = 'T'; 728 buffer->time_tag[1] = 'D'; 729 put_unaligned_le16(sizeof(buffer->time), 730 &buffer->time_length); 731 732 local_time = ktime_get_real_seconds(); 733 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 734 year = tm.tm_year + 1900; 735 736 buffer->time[0] = bin2bcd(tm.tm_hour); 737 buffer->time[1] = bin2bcd(tm.tm_min); 738 buffer->time[2] = bin2bcd(tm.tm_sec); 739 buffer->time[3] = 0; 740 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 741 buffer->time[5] = bin2bcd(tm.tm_mday); 742 buffer->time[6] = bin2bcd(year / 100); 743 buffer->time[7] = bin2bcd(year % 100); 744 745 buffer->dont_write_tag[0] = 'D'; 746 buffer->dont_write_tag[1] = 'W'; 747 buffer->end_tag[0] = 'Z'; 748 buffer->end_tag[1] = 'Z'; 749 750 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 751 752 kfree(buffer); 753 754 return rc; 755 } 756 757 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 758 759 static void pqi_update_time_worker(struct work_struct *work) 760 { 761 int rc; 762 struct pqi_ctrl_info *ctrl_info; 763 764 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 765 update_time_work); 766 767 if (pqi_ctrl_offline(ctrl_info)) 768 return; 769 770 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 771 if (rc) 772 dev_warn(&ctrl_info->pci_dev->dev, 773 "error updating time on controller\n"); 774 775 schedule_delayed_work(&ctrl_info->update_time_work, 776 PQI_UPDATE_TIME_WORK_INTERVAL); 777 } 778 779 static inline void pqi_schedule_update_time_worker( 780 struct pqi_ctrl_info *ctrl_info) 781 { 782 schedule_delayed_work(&ctrl_info->update_time_work, 0); 783 } 784 785 static inline void pqi_cancel_update_time_worker( 786 struct pqi_ctrl_info *ctrl_info) 787 { 788 cancel_delayed_work_sync(&ctrl_info->update_time_work); 789 } 790 791 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 792 void *buffer, size_t buffer_length) 793 { 794 int rc; 795 int pci_direction; 796 struct pqi_raid_path_request request; 797 798 rc = pqi_build_raid_path_request(ctrl_info, &request, 799 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction); 800 if (rc) 801 return rc; 802 803 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 804 NULL, NO_TIMEOUT); 805 806 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 807 pci_direction); 808 809 return rc; 810 } 811 812 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 813 void **buffer) 814 { 815 int rc; 816 size_t lun_list_length; 817 size_t lun_data_length; 818 size_t new_lun_list_length; 819 void *lun_data = NULL; 820 struct report_lun_header *report_lun_header; 821 822 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 823 if (!report_lun_header) { 824 rc = -ENOMEM; 825 goto out; 826 } 827 828 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 829 sizeof(*report_lun_header)); 830 if (rc) 831 goto out; 832 833 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 834 835 again: 836 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 837 838 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 839 if (!lun_data) { 840 rc = -ENOMEM; 841 goto out; 842 } 843 844 if (lun_list_length == 0) { 845 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 846 goto out; 847 } 848 849 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 850 if (rc) 851 goto out; 852 853 new_lun_list_length = get_unaligned_be32( 854 &((struct report_lun_header *)lun_data)->list_length); 855 856 if (new_lun_list_length > lun_list_length) { 857 lun_list_length = new_lun_list_length; 858 kfree(lun_data); 859 goto again; 860 } 861 862 out: 863 kfree(report_lun_header); 864 865 if (rc) { 866 kfree(lun_data); 867 lun_data = NULL; 868 } 869 870 *buffer = lun_data; 871 872 return rc; 873 } 874 875 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 876 void **buffer) 877 { 878 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 879 buffer); 880 } 881 882 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 883 void **buffer) 884 { 885 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 886 } 887 888 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 889 struct report_phys_lun_extended **physdev_list, 890 struct report_log_lun_extended **logdev_list) 891 { 892 int rc; 893 size_t logdev_list_length; 894 size_t logdev_data_length; 895 struct report_log_lun_extended *internal_logdev_list; 896 struct report_log_lun_extended *logdev_data; 897 struct report_lun_header report_lun_header; 898 899 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 900 if (rc) 901 dev_err(&ctrl_info->pci_dev->dev, 902 "report physical LUNs failed\n"); 903 904 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 905 if (rc) 906 dev_err(&ctrl_info->pci_dev->dev, 907 "report logical LUNs failed\n"); 908 909 /* 910 * Tack the controller itself onto the end of the logical device list. 911 */ 912 913 logdev_data = *logdev_list; 914 915 if (logdev_data) { 916 logdev_list_length = 917 get_unaligned_be32(&logdev_data->header.list_length); 918 } else { 919 memset(&report_lun_header, 0, sizeof(report_lun_header)); 920 logdev_data = 921 (struct report_log_lun_extended *)&report_lun_header; 922 logdev_list_length = 0; 923 } 924 925 logdev_data_length = sizeof(struct report_lun_header) + 926 logdev_list_length; 927 928 internal_logdev_list = kmalloc(logdev_data_length + 929 sizeof(struct report_log_lun_extended), GFP_KERNEL); 930 if (!internal_logdev_list) { 931 kfree(*logdev_list); 932 *logdev_list = NULL; 933 return -ENOMEM; 934 } 935 936 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 937 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 938 sizeof(struct report_log_lun_extended_entry)); 939 put_unaligned_be32(logdev_list_length + 940 sizeof(struct report_log_lun_extended_entry), 941 &internal_logdev_list->header.list_length); 942 943 kfree(*logdev_list); 944 *logdev_list = internal_logdev_list; 945 946 return 0; 947 } 948 949 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 950 int bus, int target, int lun) 951 { 952 device->bus = bus; 953 device->target = target; 954 device->lun = lun; 955 } 956 957 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 958 { 959 u8 *scsi3addr; 960 u32 lunid; 961 int bus; 962 int target; 963 int lun; 964 965 scsi3addr = device->scsi3addr; 966 lunid = get_unaligned_le32(scsi3addr); 967 968 if (pqi_is_hba_lunid(scsi3addr)) { 969 /* The specified device is the controller. */ 970 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 971 device->target_lun_valid = true; 972 return; 973 } 974 975 if (pqi_is_logical_device(device)) { 976 if (device->is_external_raid_device) { 977 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 978 target = (lunid >> 16) & 0x3fff; 979 lun = lunid & 0xff; 980 } else { 981 bus = PQI_RAID_VOLUME_BUS; 982 target = 0; 983 lun = lunid & 0x3fff; 984 } 985 pqi_set_bus_target_lun(device, bus, target, lun); 986 device->target_lun_valid = true; 987 return; 988 } 989 990 /* 991 * Defer target and LUN assignment for non-controller physical devices 992 * because the SAS transport layer will make these assignments later. 993 */ 994 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 995 } 996 997 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 998 struct pqi_scsi_dev *device) 999 { 1000 int rc; 1001 u8 raid_level; 1002 u8 *buffer; 1003 1004 raid_level = SA_RAID_UNKNOWN; 1005 1006 buffer = kmalloc(64, GFP_KERNEL); 1007 if (buffer) { 1008 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1009 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1010 if (rc == 0) { 1011 raid_level = buffer[8]; 1012 if (raid_level > SA_RAID_MAX) 1013 raid_level = SA_RAID_UNKNOWN; 1014 } 1015 kfree(buffer); 1016 } 1017 1018 device->raid_level = raid_level; 1019 } 1020 1021 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1022 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1023 { 1024 char *err_msg; 1025 u32 raid_map_size; 1026 u32 r5or6_blocks_per_row; 1027 unsigned int num_phys_disks; 1028 unsigned int num_raid_map_entries; 1029 1030 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1031 1032 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1033 err_msg = "RAID map too small"; 1034 goto bad_raid_map; 1035 } 1036 1037 if (raid_map_size > sizeof(*raid_map)) { 1038 err_msg = "RAID map too large"; 1039 goto bad_raid_map; 1040 } 1041 1042 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * 1043 (get_unaligned_le16(&raid_map->data_disks_per_row) + 1044 get_unaligned_le16(&raid_map->metadata_disks_per_row)); 1045 num_raid_map_entries = num_phys_disks * 1046 get_unaligned_le16(&raid_map->row_cnt); 1047 1048 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { 1049 err_msg = "invalid number of map entries in RAID map"; 1050 goto bad_raid_map; 1051 } 1052 1053 if (device->raid_level == SA_RAID_1) { 1054 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1055 err_msg = "invalid RAID-1 map"; 1056 goto bad_raid_map; 1057 } 1058 } else if (device->raid_level == SA_RAID_ADM) { 1059 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1060 err_msg = "invalid RAID-1(ADM) map"; 1061 goto bad_raid_map; 1062 } 1063 } else if ((device->raid_level == SA_RAID_5 || 1064 device->raid_level == SA_RAID_6) && 1065 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1066 /* RAID 50/60 */ 1067 r5or6_blocks_per_row = 1068 get_unaligned_le16(&raid_map->strip_size) * 1069 get_unaligned_le16(&raid_map->data_disks_per_row); 1070 if (r5or6_blocks_per_row == 0) { 1071 err_msg = "invalid RAID-5 or RAID-6 map"; 1072 goto bad_raid_map; 1073 } 1074 } 1075 1076 return 0; 1077 1078 bad_raid_map: 1079 dev_warn(&ctrl_info->pci_dev->dev, 1080 "scsi %d:%d:%d:%d %s\n", 1081 ctrl_info->scsi_host->host_no, 1082 device->bus, device->target, device->lun, err_msg); 1083 1084 return -EINVAL; 1085 } 1086 1087 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1088 struct pqi_scsi_dev *device) 1089 { 1090 int rc; 1091 int pci_direction; 1092 struct pqi_raid_path_request request; 1093 struct raid_map *raid_map; 1094 1095 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1096 if (!raid_map) 1097 return -ENOMEM; 1098 1099 rc = pqi_build_raid_path_request(ctrl_info, &request, 1100 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1101 sizeof(*raid_map), 0, &pci_direction); 1102 if (rc) 1103 goto error; 1104 1105 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1106 NULL, NO_TIMEOUT); 1107 1108 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 1109 pci_direction); 1110 1111 if (rc) 1112 goto error; 1113 1114 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1115 if (rc) 1116 goto error; 1117 1118 device->raid_map = raid_map; 1119 1120 return 0; 1121 1122 error: 1123 kfree(raid_map); 1124 1125 return rc; 1126 } 1127 1128 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1129 struct pqi_scsi_dev *device) 1130 { 1131 int rc; 1132 u8 *buffer; 1133 u8 bypass_status; 1134 1135 buffer = kmalloc(64, GFP_KERNEL); 1136 if (!buffer) 1137 return; 1138 1139 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1140 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1141 if (rc) 1142 goto out; 1143 1144 #define RAID_BYPASS_STATUS 4 1145 #define RAID_BYPASS_CONFIGURED 0x1 1146 #define RAID_BYPASS_ENABLED 0x2 1147 1148 bypass_status = buffer[RAID_BYPASS_STATUS]; 1149 device->raid_bypass_configured = 1150 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1151 if (device->raid_bypass_configured && 1152 (bypass_status & RAID_BYPASS_ENABLED) && 1153 pqi_get_raid_map(ctrl_info, device) == 0) 1154 device->raid_bypass_enabled = true; 1155 1156 out: 1157 kfree(buffer); 1158 } 1159 1160 /* 1161 * Use vendor-specific VPD to determine online/offline status of a volume. 1162 */ 1163 1164 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1165 struct pqi_scsi_dev *device) 1166 { 1167 int rc; 1168 size_t page_length; 1169 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1170 bool volume_offline = true; 1171 u32 volume_flags; 1172 struct ciss_vpd_logical_volume_status *vpd; 1173 1174 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1175 if (!vpd) 1176 goto no_buffer; 1177 1178 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1179 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1180 if (rc) 1181 goto out; 1182 1183 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1184 volume_status) + vpd->page_length; 1185 if (page_length < sizeof(*vpd)) 1186 goto out; 1187 1188 volume_status = vpd->volume_status; 1189 volume_flags = get_unaligned_be32(&vpd->flags); 1190 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1191 1192 out: 1193 kfree(vpd); 1194 no_buffer: 1195 device->volume_status = volume_status; 1196 device->volume_offline = volume_offline; 1197 } 1198 1199 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1200 struct pqi_scsi_dev *device) 1201 { 1202 int rc; 1203 u8 *buffer; 1204 1205 buffer = kmalloc(64, GFP_KERNEL); 1206 if (!buffer) 1207 return -ENOMEM; 1208 1209 /* Send an inquiry to the device to see what it is. */ 1210 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1211 if (rc) 1212 goto out; 1213 1214 scsi_sanitize_inquiry_string(&buffer[8], 8); 1215 scsi_sanitize_inquiry_string(&buffer[16], 16); 1216 1217 device->devtype = buffer[0] & 0x1f; 1218 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1219 memcpy(device->model, &buffer[16], sizeof(device->model)); 1220 1221 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1222 if (device->is_external_raid_device) { 1223 device->raid_level = SA_RAID_UNKNOWN; 1224 device->volume_status = CISS_LV_OK; 1225 device->volume_offline = false; 1226 } else { 1227 pqi_get_raid_level(ctrl_info, device); 1228 pqi_get_raid_bypass_status(ctrl_info, device); 1229 pqi_get_volume_status(ctrl_info, device); 1230 } 1231 } 1232 1233 out: 1234 kfree(buffer); 1235 1236 return rc; 1237 } 1238 1239 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1240 struct pqi_scsi_dev *device, 1241 struct bmic_identify_physical_device *id_phys) 1242 { 1243 int rc; 1244 1245 memset(id_phys, 0, sizeof(*id_phys)); 1246 1247 rc = pqi_identify_physical_device(ctrl_info, device, 1248 id_phys, sizeof(*id_phys)); 1249 if (rc) { 1250 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1251 return; 1252 } 1253 1254 device->queue_depth = 1255 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1256 device->device_type = id_phys->device_type; 1257 device->active_path_index = id_phys->active_path_number; 1258 device->path_map = id_phys->redundant_path_present_map; 1259 memcpy(&device->box, 1260 &id_phys->alternate_paths_phys_box_on_port, 1261 sizeof(device->box)); 1262 memcpy(&device->phys_connector, 1263 &id_phys->alternate_paths_phys_connector, 1264 sizeof(device->phys_connector)); 1265 device->bay = id_phys->phys_bay_in_box; 1266 } 1267 1268 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1269 struct pqi_scsi_dev *device) 1270 { 1271 char *status; 1272 static const char unknown_state_str[] = 1273 "Volume is in an unknown state (%u)"; 1274 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1275 1276 switch (device->volume_status) { 1277 case CISS_LV_OK: 1278 status = "Volume online"; 1279 break; 1280 case CISS_LV_FAILED: 1281 status = "Volume failed"; 1282 break; 1283 case CISS_LV_NOT_CONFIGURED: 1284 status = "Volume not configured"; 1285 break; 1286 case CISS_LV_DEGRADED: 1287 status = "Volume degraded"; 1288 break; 1289 case CISS_LV_READY_FOR_RECOVERY: 1290 status = "Volume ready for recovery operation"; 1291 break; 1292 case CISS_LV_UNDERGOING_RECOVERY: 1293 status = "Volume undergoing recovery"; 1294 break; 1295 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1296 status = "Wrong physical drive was replaced"; 1297 break; 1298 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1299 status = "A physical drive not properly connected"; 1300 break; 1301 case CISS_LV_HARDWARE_OVERHEATING: 1302 status = "Hardware is overheating"; 1303 break; 1304 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1305 status = "Hardware has overheated"; 1306 break; 1307 case CISS_LV_UNDERGOING_EXPANSION: 1308 status = "Volume undergoing expansion"; 1309 break; 1310 case CISS_LV_NOT_AVAILABLE: 1311 status = "Volume waiting for transforming volume"; 1312 break; 1313 case CISS_LV_QUEUED_FOR_EXPANSION: 1314 status = "Volume queued for expansion"; 1315 break; 1316 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1317 status = "Volume disabled due to SCSI ID conflict"; 1318 break; 1319 case CISS_LV_EJECTED: 1320 status = "Volume has been ejected"; 1321 break; 1322 case CISS_LV_UNDERGOING_ERASE: 1323 status = "Volume undergoing background erase"; 1324 break; 1325 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1326 status = "Volume ready for predictive spare rebuild"; 1327 break; 1328 case CISS_LV_UNDERGOING_RPI: 1329 status = "Volume undergoing rapid parity initialization"; 1330 break; 1331 case CISS_LV_PENDING_RPI: 1332 status = "Volume queued for rapid parity initialization"; 1333 break; 1334 case CISS_LV_ENCRYPTED_NO_KEY: 1335 status = "Encrypted volume inaccessible - key not present"; 1336 break; 1337 case CISS_LV_UNDERGOING_ENCRYPTION: 1338 status = "Volume undergoing encryption process"; 1339 break; 1340 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1341 status = "Volume undergoing encryption re-keying process"; 1342 break; 1343 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1344 status = "Volume encrypted but encryption is disabled"; 1345 break; 1346 case CISS_LV_PENDING_ENCRYPTION: 1347 status = "Volume pending migration to encrypted state"; 1348 break; 1349 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1350 status = "Volume pending encryption rekeying"; 1351 break; 1352 case CISS_LV_NOT_SUPPORTED: 1353 status = "Volume not supported on this controller"; 1354 break; 1355 case CISS_LV_STATUS_UNAVAILABLE: 1356 status = "Volume status not available"; 1357 break; 1358 default: 1359 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1360 unknown_state_str, device->volume_status); 1361 status = unknown_state_buffer; 1362 break; 1363 } 1364 1365 dev_info(&ctrl_info->pci_dev->dev, 1366 "scsi %d:%d:%d:%d %s\n", 1367 ctrl_info->scsi_host->host_no, 1368 device->bus, device->target, device->lun, status); 1369 } 1370 1371 static void pqi_rescan_worker(struct work_struct *work) 1372 { 1373 struct pqi_ctrl_info *ctrl_info; 1374 1375 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1376 rescan_work); 1377 1378 pqi_scan_scsi_devices(ctrl_info); 1379 } 1380 1381 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1382 struct pqi_scsi_dev *device) 1383 { 1384 int rc; 1385 1386 if (pqi_is_logical_device(device)) 1387 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1388 device->target, device->lun); 1389 else 1390 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1391 1392 return rc; 1393 } 1394 1395 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1396 struct pqi_scsi_dev *device) 1397 { 1398 if (pqi_is_logical_device(device)) 1399 scsi_remove_device(device->sdev); 1400 else 1401 pqi_remove_sas_device(device); 1402 } 1403 1404 /* Assumes the SCSI device list lock is held. */ 1405 1406 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1407 int bus, int target, int lun) 1408 { 1409 struct pqi_scsi_dev *device; 1410 1411 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1412 scsi_device_list_entry) 1413 if (device->bus == bus && device->target == target && 1414 device->lun == lun) 1415 return device; 1416 1417 return NULL; 1418 } 1419 1420 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1421 struct pqi_scsi_dev *dev2) 1422 { 1423 if (dev1->is_physical_device != dev2->is_physical_device) 1424 return false; 1425 1426 if (dev1->is_physical_device) 1427 return dev1->wwid == dev2->wwid; 1428 1429 return memcmp(dev1->volume_id, dev2->volume_id, 1430 sizeof(dev1->volume_id)) == 0; 1431 } 1432 1433 enum pqi_find_result { 1434 DEVICE_NOT_FOUND, 1435 DEVICE_CHANGED, 1436 DEVICE_SAME, 1437 }; 1438 1439 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1440 struct pqi_scsi_dev *device_to_find, 1441 struct pqi_scsi_dev **matching_device) 1442 { 1443 struct pqi_scsi_dev *device; 1444 1445 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1446 scsi_device_list_entry) { 1447 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1448 device->scsi3addr)) { 1449 *matching_device = device; 1450 if (pqi_device_equal(device_to_find, device)) { 1451 if (device_to_find->volume_offline) 1452 return DEVICE_CHANGED; 1453 return DEVICE_SAME; 1454 } 1455 return DEVICE_CHANGED; 1456 } 1457 } 1458 1459 return DEVICE_NOT_FOUND; 1460 } 1461 1462 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1463 1464 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1465 char *action, struct pqi_scsi_dev *device) 1466 { 1467 ssize_t count; 1468 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1469 1470 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1471 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1472 1473 if (device->target_lun_valid) 1474 count += snprintf(buffer + count, 1475 PQI_DEV_INFO_BUFFER_LENGTH - count, 1476 "%d:%d", 1477 device->target, 1478 device->lun); 1479 else 1480 count += snprintf(buffer + count, 1481 PQI_DEV_INFO_BUFFER_LENGTH - count, 1482 "-:-"); 1483 1484 if (pqi_is_logical_device(device)) 1485 count += snprintf(buffer + count, 1486 PQI_DEV_INFO_BUFFER_LENGTH - count, 1487 " %08x%08x", 1488 *((u32 *)&device->scsi3addr), 1489 *((u32 *)&device->scsi3addr[4])); 1490 else 1491 count += snprintf(buffer + count, 1492 PQI_DEV_INFO_BUFFER_LENGTH - count, 1493 " %016llx", device->sas_address); 1494 1495 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1496 " %s %.8s %.16s ", 1497 scsi_device_type(device->devtype), 1498 device->vendor, 1499 device->model); 1500 1501 if (pqi_is_logical_device(device)) { 1502 if (device->devtype == TYPE_DISK) 1503 count += snprintf(buffer + count, 1504 PQI_DEV_INFO_BUFFER_LENGTH - count, 1505 "SSDSmartPathCap%c En%c %-12s", 1506 device->raid_bypass_configured ? '+' : '-', 1507 device->raid_bypass_enabled ? '+' : '-', 1508 pqi_raid_level_to_string(device->raid_level)); 1509 } else { 1510 count += snprintf(buffer + count, 1511 PQI_DEV_INFO_BUFFER_LENGTH - count, 1512 "AIO%c", device->aio_enabled ? '+' : '-'); 1513 if (device->devtype == TYPE_DISK || 1514 device->devtype == TYPE_ZBC) 1515 count += snprintf(buffer + count, 1516 PQI_DEV_INFO_BUFFER_LENGTH - count, 1517 " qd=%-6d", device->queue_depth); 1518 } 1519 1520 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1521 } 1522 1523 /* Assumes the SCSI device list lock is held. */ 1524 1525 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1526 struct pqi_scsi_dev *new_device) 1527 { 1528 existing_device->devtype = new_device->devtype; 1529 existing_device->device_type = new_device->device_type; 1530 existing_device->bus = new_device->bus; 1531 if (new_device->target_lun_valid) { 1532 existing_device->target = new_device->target; 1533 existing_device->lun = new_device->lun; 1534 existing_device->target_lun_valid = true; 1535 } 1536 1537 /* By definition, the scsi3addr and wwid fields are already the same. */ 1538 1539 existing_device->is_physical_device = new_device->is_physical_device; 1540 existing_device->is_external_raid_device = 1541 new_device->is_external_raid_device; 1542 existing_device->aio_enabled = new_device->aio_enabled; 1543 memcpy(existing_device->vendor, new_device->vendor, 1544 sizeof(existing_device->vendor)); 1545 memcpy(existing_device->model, new_device->model, 1546 sizeof(existing_device->model)); 1547 existing_device->sas_address = new_device->sas_address; 1548 existing_device->raid_level = new_device->raid_level; 1549 existing_device->queue_depth = new_device->queue_depth; 1550 existing_device->aio_handle = new_device->aio_handle; 1551 existing_device->volume_status = new_device->volume_status; 1552 existing_device->active_path_index = new_device->active_path_index; 1553 existing_device->path_map = new_device->path_map; 1554 existing_device->bay = new_device->bay; 1555 memcpy(existing_device->box, new_device->box, 1556 sizeof(existing_device->box)); 1557 memcpy(existing_device->phys_connector, new_device->phys_connector, 1558 sizeof(existing_device->phys_connector)); 1559 existing_device->offload_to_mirror = 0; 1560 kfree(existing_device->raid_map); 1561 existing_device->raid_map = new_device->raid_map; 1562 existing_device->raid_bypass_configured = 1563 new_device->raid_bypass_configured; 1564 existing_device->raid_bypass_enabled = 1565 new_device->raid_bypass_enabled; 1566 1567 /* To prevent this from being freed later. */ 1568 new_device->raid_map = NULL; 1569 } 1570 1571 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1572 { 1573 if (device) { 1574 kfree(device->raid_map); 1575 kfree(device); 1576 } 1577 } 1578 1579 /* 1580 * Called when exposing a new device to the OS fails in order to re-adjust 1581 * our internal SCSI device list to match the SCSI ML's view. 1582 */ 1583 1584 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1585 struct pqi_scsi_dev *device) 1586 { 1587 unsigned long flags; 1588 1589 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1590 list_del(&device->scsi_device_list_entry); 1591 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1592 1593 /* Allow the device structure to be freed later. */ 1594 device->keep_device = false; 1595 } 1596 1597 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1598 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1599 { 1600 int rc; 1601 unsigned int i; 1602 unsigned long flags; 1603 enum pqi_find_result find_result; 1604 struct pqi_scsi_dev *device; 1605 struct pqi_scsi_dev *next; 1606 struct pqi_scsi_dev *matching_device; 1607 LIST_HEAD(add_list); 1608 LIST_HEAD(delete_list); 1609 1610 /* 1611 * The idea here is to do as little work as possible while holding the 1612 * spinlock. That's why we go to great pains to defer anything other 1613 * than updating the internal device list until after we release the 1614 * spinlock. 1615 */ 1616 1617 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1618 1619 /* Assume that all devices in the existing list have gone away. */ 1620 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1621 scsi_device_list_entry) 1622 device->device_gone = true; 1623 1624 for (i = 0; i < num_new_devices; i++) { 1625 device = new_device_list[i]; 1626 1627 find_result = pqi_scsi_find_entry(ctrl_info, device, 1628 &matching_device); 1629 1630 switch (find_result) { 1631 case DEVICE_SAME: 1632 /* 1633 * The newly found device is already in the existing 1634 * device list. 1635 */ 1636 device->new_device = false; 1637 matching_device->device_gone = false; 1638 pqi_scsi_update_device(matching_device, device); 1639 break; 1640 case DEVICE_NOT_FOUND: 1641 /* 1642 * The newly found device is NOT in the existing device 1643 * list. 1644 */ 1645 device->new_device = true; 1646 break; 1647 case DEVICE_CHANGED: 1648 /* 1649 * The original device has gone away and we need to add 1650 * the new device. 1651 */ 1652 device->new_device = true; 1653 break; 1654 } 1655 } 1656 1657 /* Process all devices that have gone away. */ 1658 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1659 scsi_device_list_entry) { 1660 if (device->device_gone) { 1661 list_del(&device->scsi_device_list_entry); 1662 list_add_tail(&device->delete_list_entry, &delete_list); 1663 } 1664 } 1665 1666 /* Process all new devices. */ 1667 for (i = 0; i < num_new_devices; i++) { 1668 device = new_device_list[i]; 1669 if (!device->new_device) 1670 continue; 1671 if (device->volume_offline) 1672 continue; 1673 list_add_tail(&device->scsi_device_list_entry, 1674 &ctrl_info->scsi_device_list); 1675 list_add_tail(&device->add_list_entry, &add_list); 1676 /* To prevent this device structure from being freed later. */ 1677 device->keep_device = true; 1678 } 1679 1680 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1681 1682 /* Remove all devices that have gone away. */ 1683 list_for_each_entry_safe(device, next, &delete_list, 1684 delete_list_entry) { 1685 if (device->volume_offline) { 1686 pqi_dev_info(ctrl_info, "offline", device); 1687 pqi_show_volume_status(ctrl_info, device); 1688 } else { 1689 pqi_dev_info(ctrl_info, "removed", device); 1690 } 1691 if (device->sdev) 1692 pqi_remove_device(ctrl_info, device); 1693 list_del(&device->delete_list_entry); 1694 pqi_free_device(device); 1695 } 1696 1697 /* 1698 * Notify the SCSI ML if the queue depth of any existing device has 1699 * changed. 1700 */ 1701 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1702 scsi_device_list_entry) { 1703 if (device->sdev && device->queue_depth != 1704 device->advertised_queue_depth) { 1705 device->advertised_queue_depth = device->queue_depth; 1706 scsi_change_queue_depth(device->sdev, 1707 device->advertised_queue_depth); 1708 } 1709 } 1710 1711 /* Expose any new devices. */ 1712 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1713 if (!device->sdev) { 1714 pqi_dev_info(ctrl_info, "added", device); 1715 rc = pqi_add_device(ctrl_info, device); 1716 if (rc) { 1717 dev_warn(&ctrl_info->pci_dev->dev, 1718 "scsi %d:%d:%d:%d addition failed, device not added\n", 1719 ctrl_info->scsi_host->host_no, 1720 device->bus, device->target, 1721 device->lun); 1722 pqi_fixup_botched_add(ctrl_info, device); 1723 } 1724 } 1725 } 1726 } 1727 1728 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1729 { 1730 bool is_supported = false; 1731 1732 switch (device->devtype) { 1733 case TYPE_DISK: 1734 case TYPE_ZBC: 1735 case TYPE_TAPE: 1736 case TYPE_MEDIUM_CHANGER: 1737 case TYPE_ENCLOSURE: 1738 is_supported = true; 1739 break; 1740 case TYPE_RAID: 1741 /* 1742 * Only support the HBA controller itself as a RAID 1743 * controller. If it's a RAID controller other than 1744 * the HBA itself (an external RAID controller, for 1745 * example), we don't support it. 1746 */ 1747 if (pqi_is_hba_lunid(device->scsi3addr)) 1748 is_supported = true; 1749 break; 1750 } 1751 1752 return is_supported; 1753 } 1754 1755 static inline bool pqi_skip_device(u8 *scsi3addr) 1756 { 1757 /* Ignore all masked devices. */ 1758 if (MASKED_DEVICE(scsi3addr)) 1759 return true; 1760 1761 return false; 1762 } 1763 1764 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1765 { 1766 int i; 1767 int rc; 1768 LIST_HEAD(new_device_list_head); 1769 struct report_phys_lun_extended *physdev_list = NULL; 1770 struct report_log_lun_extended *logdev_list = NULL; 1771 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1772 struct report_log_lun_extended_entry *log_lun_ext_entry; 1773 struct bmic_identify_physical_device *id_phys = NULL; 1774 u32 num_physicals; 1775 u32 num_logicals; 1776 struct pqi_scsi_dev **new_device_list = NULL; 1777 struct pqi_scsi_dev *device; 1778 struct pqi_scsi_dev *next; 1779 unsigned int num_new_devices; 1780 unsigned int num_valid_devices; 1781 bool is_physical_device; 1782 u8 *scsi3addr; 1783 static char *out_of_memory_msg = 1784 "failed to allocate memory, device discovery stopped"; 1785 1786 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1787 if (rc) 1788 goto out; 1789 1790 if (physdev_list) 1791 num_physicals = 1792 get_unaligned_be32(&physdev_list->header.list_length) 1793 / sizeof(physdev_list->lun_entries[0]); 1794 else 1795 num_physicals = 0; 1796 1797 if (logdev_list) 1798 num_logicals = 1799 get_unaligned_be32(&logdev_list->header.list_length) 1800 / sizeof(logdev_list->lun_entries[0]); 1801 else 1802 num_logicals = 0; 1803 1804 if (num_physicals) { 1805 /* 1806 * We need this buffer for calls to pqi_get_physical_disk_info() 1807 * below. We allocate it here instead of inside 1808 * pqi_get_physical_disk_info() because it's a fairly large 1809 * buffer. 1810 */ 1811 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 1812 if (!id_phys) { 1813 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1814 out_of_memory_msg); 1815 rc = -ENOMEM; 1816 goto out; 1817 } 1818 } 1819 1820 num_new_devices = num_physicals + num_logicals; 1821 1822 new_device_list = kmalloc(sizeof(*new_device_list) * 1823 num_new_devices, GFP_KERNEL); 1824 if (!new_device_list) { 1825 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 1826 rc = -ENOMEM; 1827 goto out; 1828 } 1829 1830 for (i = 0; i < num_new_devices; i++) { 1831 device = kzalloc(sizeof(*device), GFP_KERNEL); 1832 if (!device) { 1833 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1834 out_of_memory_msg); 1835 rc = -ENOMEM; 1836 goto out; 1837 } 1838 list_add_tail(&device->new_device_list_entry, 1839 &new_device_list_head); 1840 } 1841 1842 device = NULL; 1843 num_valid_devices = 0; 1844 1845 for (i = 0; i < num_new_devices; i++) { 1846 1847 if (i < num_physicals) { 1848 is_physical_device = true; 1849 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 1850 log_lun_ext_entry = NULL; 1851 scsi3addr = phys_lun_ext_entry->lunid; 1852 } else { 1853 is_physical_device = false; 1854 phys_lun_ext_entry = NULL; 1855 log_lun_ext_entry = 1856 &logdev_list->lun_entries[i - num_physicals]; 1857 scsi3addr = log_lun_ext_entry->lunid; 1858 } 1859 1860 if (is_physical_device && pqi_skip_device(scsi3addr)) 1861 continue; 1862 1863 if (device) 1864 device = list_next_entry(device, new_device_list_entry); 1865 else 1866 device = list_first_entry(&new_device_list_head, 1867 struct pqi_scsi_dev, new_device_list_entry); 1868 1869 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1870 device->is_physical_device = is_physical_device; 1871 if (!is_physical_device) 1872 device->is_external_raid_device = 1873 pqi_is_external_raid_addr(scsi3addr); 1874 1875 /* Gather information about the device. */ 1876 rc = pqi_get_device_info(ctrl_info, device); 1877 if (rc == -ENOMEM) { 1878 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1879 out_of_memory_msg); 1880 goto out; 1881 } 1882 if (rc) { 1883 if (device->is_physical_device) 1884 dev_warn(&ctrl_info->pci_dev->dev, 1885 "obtaining device info failed, skipping physical device %016llx\n", 1886 get_unaligned_be64( 1887 &phys_lun_ext_entry->wwid)); 1888 else 1889 dev_warn(&ctrl_info->pci_dev->dev, 1890 "obtaining device info failed, skipping logical device %08x%08x\n", 1891 *((u32 *)&device->scsi3addr), 1892 *((u32 *)&device->scsi3addr[4])); 1893 rc = 0; 1894 continue; 1895 } 1896 1897 if (!pqi_is_supported_device(device)) 1898 continue; 1899 1900 pqi_assign_bus_target_lun(device); 1901 1902 if (device->is_physical_device) { 1903 device->wwid = phys_lun_ext_entry->wwid; 1904 if ((phys_lun_ext_entry->device_flags & 1905 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 1906 phys_lun_ext_entry->aio_handle) 1907 device->aio_enabled = true; 1908 } else { 1909 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 1910 sizeof(device->volume_id)); 1911 } 1912 1913 switch (device->devtype) { 1914 case TYPE_DISK: 1915 case TYPE_ZBC: 1916 case TYPE_ENCLOSURE: 1917 if (device->is_physical_device) { 1918 device->sas_address = 1919 get_unaligned_be64(&device->wwid); 1920 if (device->devtype == TYPE_DISK || 1921 device->devtype == TYPE_ZBC) { 1922 device->aio_handle = 1923 phys_lun_ext_entry->aio_handle; 1924 pqi_get_physical_disk_info(ctrl_info, 1925 device, id_phys); 1926 } 1927 } 1928 break; 1929 } 1930 1931 new_device_list[num_valid_devices++] = device; 1932 } 1933 1934 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 1935 1936 out: 1937 list_for_each_entry_safe(device, next, &new_device_list_head, 1938 new_device_list_entry) { 1939 if (device->keep_device) 1940 continue; 1941 list_del(&device->new_device_list_entry); 1942 pqi_free_device(device); 1943 } 1944 1945 kfree(new_device_list); 1946 kfree(physdev_list); 1947 kfree(logdev_list); 1948 kfree(id_phys); 1949 1950 return rc; 1951 } 1952 1953 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1954 { 1955 unsigned long flags; 1956 struct pqi_scsi_dev *device; 1957 1958 while (1) { 1959 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1960 1961 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 1962 struct pqi_scsi_dev, scsi_device_list_entry); 1963 if (device) 1964 list_del(&device->scsi_device_list_entry); 1965 1966 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 1967 flags); 1968 1969 if (!device) 1970 break; 1971 1972 if (device->sdev) 1973 pqi_remove_device(ctrl_info, device); 1974 pqi_free_device(device); 1975 } 1976 } 1977 1978 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1979 { 1980 int rc; 1981 1982 if (pqi_ctrl_offline(ctrl_info)) 1983 return -ENXIO; 1984 1985 mutex_lock(&ctrl_info->scan_mutex); 1986 1987 rc = pqi_update_scsi_devices(ctrl_info); 1988 if (rc) 1989 pqi_schedule_rescan_worker_delayed(ctrl_info); 1990 1991 mutex_unlock(&ctrl_info->scan_mutex); 1992 1993 return rc; 1994 } 1995 1996 static void pqi_scan_start(struct Scsi_Host *shost) 1997 { 1998 pqi_scan_scsi_devices(shost_to_hba(shost)); 1999 } 2000 2001 /* Returns TRUE if scan is finished. */ 2002 2003 static int pqi_scan_finished(struct Scsi_Host *shost, 2004 unsigned long elapsed_time) 2005 { 2006 struct pqi_ctrl_info *ctrl_info; 2007 2008 ctrl_info = shost_priv(shost); 2009 2010 return !mutex_is_locked(&ctrl_info->scan_mutex); 2011 } 2012 2013 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2014 { 2015 mutex_lock(&ctrl_info->scan_mutex); 2016 mutex_unlock(&ctrl_info->scan_mutex); 2017 } 2018 2019 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2020 { 2021 mutex_lock(&ctrl_info->lun_reset_mutex); 2022 mutex_unlock(&ctrl_info->lun_reset_mutex); 2023 } 2024 2025 static inline void pqi_set_encryption_info( 2026 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2027 u64 first_block) 2028 { 2029 u32 volume_blk_size; 2030 2031 /* 2032 * Set the encryption tweak values based on logical block address. 2033 * If the block size is 512, the tweak value is equal to the LBA. 2034 * For other block sizes, tweak value is (LBA * block size) / 512. 2035 */ 2036 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2037 if (volume_blk_size != 512) 2038 first_block = (first_block * volume_blk_size) / 512; 2039 2040 encryption_info->data_encryption_key_index = 2041 get_unaligned_le16(&raid_map->data_encryption_key_index); 2042 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2043 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2044 } 2045 2046 /* 2047 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2048 */ 2049 2050 #define PQI_RAID_BYPASS_INELIGIBLE 1 2051 2052 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2053 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2054 struct pqi_queue_group *queue_group) 2055 { 2056 struct raid_map *raid_map; 2057 bool is_write = false; 2058 u32 map_index; 2059 u64 first_block; 2060 u64 last_block; 2061 u32 block_cnt; 2062 u32 blocks_per_row; 2063 u64 first_row; 2064 u64 last_row; 2065 u32 first_row_offset; 2066 u32 last_row_offset; 2067 u32 first_column; 2068 u32 last_column; 2069 u64 r0_first_row; 2070 u64 r0_last_row; 2071 u32 r5or6_blocks_per_row; 2072 u64 r5or6_first_row; 2073 u64 r5or6_last_row; 2074 u32 r5or6_first_row_offset; 2075 u32 r5or6_last_row_offset; 2076 u32 r5or6_first_column; 2077 u32 r5or6_last_column; 2078 u16 data_disks_per_row; 2079 u32 total_disks_per_row; 2080 u16 layout_map_count; 2081 u32 stripesize; 2082 u16 strip_size; 2083 u32 first_group; 2084 u32 last_group; 2085 u32 current_group; 2086 u32 map_row; 2087 u32 aio_handle; 2088 u64 disk_block; 2089 u32 disk_block_cnt; 2090 u8 cdb[16]; 2091 u8 cdb_length; 2092 int offload_to_mirror; 2093 struct pqi_encryption_info *encryption_info_ptr; 2094 struct pqi_encryption_info encryption_info; 2095 #if BITS_PER_LONG == 32 2096 u64 tmpdiv; 2097 #endif 2098 2099 /* Check for valid opcode, get LBA and block count. */ 2100 switch (scmd->cmnd[0]) { 2101 case WRITE_6: 2102 is_write = true; 2103 /* fall through */ 2104 case READ_6: 2105 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2106 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2107 block_cnt = (u32)scmd->cmnd[4]; 2108 if (block_cnt == 0) 2109 block_cnt = 256; 2110 break; 2111 case WRITE_10: 2112 is_write = true; 2113 /* fall through */ 2114 case READ_10: 2115 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2116 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2117 break; 2118 case WRITE_12: 2119 is_write = true; 2120 /* fall through */ 2121 case READ_12: 2122 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2123 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2124 break; 2125 case WRITE_16: 2126 is_write = true; 2127 /* fall through */ 2128 case READ_16: 2129 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2130 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2131 break; 2132 default: 2133 /* Process via normal I/O path. */ 2134 return PQI_RAID_BYPASS_INELIGIBLE; 2135 } 2136 2137 /* Check for write to non-RAID-0. */ 2138 if (is_write && device->raid_level != SA_RAID_0) 2139 return PQI_RAID_BYPASS_INELIGIBLE; 2140 2141 if (unlikely(block_cnt == 0)) 2142 return PQI_RAID_BYPASS_INELIGIBLE; 2143 2144 last_block = first_block + block_cnt - 1; 2145 raid_map = device->raid_map; 2146 2147 /* Check for invalid block or wraparound. */ 2148 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2149 last_block < first_block) 2150 return PQI_RAID_BYPASS_INELIGIBLE; 2151 2152 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2153 strip_size = get_unaligned_le16(&raid_map->strip_size); 2154 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2155 2156 /* Calculate stripe information for the request. */ 2157 blocks_per_row = data_disks_per_row * strip_size; 2158 #if BITS_PER_LONG == 32 2159 tmpdiv = first_block; 2160 do_div(tmpdiv, blocks_per_row); 2161 first_row = tmpdiv; 2162 tmpdiv = last_block; 2163 do_div(tmpdiv, blocks_per_row); 2164 last_row = tmpdiv; 2165 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2166 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2167 tmpdiv = first_row_offset; 2168 do_div(tmpdiv, strip_size); 2169 first_column = tmpdiv; 2170 tmpdiv = last_row_offset; 2171 do_div(tmpdiv, strip_size); 2172 last_column = tmpdiv; 2173 #else 2174 first_row = first_block / blocks_per_row; 2175 last_row = last_block / blocks_per_row; 2176 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2177 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2178 first_column = first_row_offset / strip_size; 2179 last_column = last_row_offset / strip_size; 2180 #endif 2181 2182 /* If this isn't a single row/column then give to the controller. */ 2183 if (first_row != last_row || first_column != last_column) 2184 return PQI_RAID_BYPASS_INELIGIBLE; 2185 2186 /* Proceeding with driver mapping. */ 2187 total_disks_per_row = data_disks_per_row + 2188 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2189 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2190 get_unaligned_le16(&raid_map->row_cnt); 2191 map_index = (map_row * total_disks_per_row) + first_column; 2192 2193 /* RAID 1 */ 2194 if (device->raid_level == SA_RAID_1) { 2195 if (device->offload_to_mirror) 2196 map_index += data_disks_per_row; 2197 device->offload_to_mirror = !device->offload_to_mirror; 2198 } else if (device->raid_level == SA_RAID_ADM) { 2199 /* RAID ADM */ 2200 /* 2201 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2202 * divisible by 3. 2203 */ 2204 offload_to_mirror = device->offload_to_mirror; 2205 if (offload_to_mirror == 0) { 2206 /* use physical disk in the first mirrored group. */ 2207 map_index %= data_disks_per_row; 2208 } else { 2209 do { 2210 /* 2211 * Determine mirror group that map_index 2212 * indicates. 2213 */ 2214 current_group = map_index / data_disks_per_row; 2215 2216 if (offload_to_mirror != current_group) { 2217 if (current_group < 2218 layout_map_count - 1) { 2219 /* 2220 * Select raid index from 2221 * next group. 2222 */ 2223 map_index += data_disks_per_row; 2224 current_group++; 2225 } else { 2226 /* 2227 * Select raid index from first 2228 * group. 2229 */ 2230 map_index %= data_disks_per_row; 2231 current_group = 0; 2232 } 2233 } 2234 } while (offload_to_mirror != current_group); 2235 } 2236 2237 /* Set mirror group to use next time. */ 2238 offload_to_mirror = 2239 (offload_to_mirror >= layout_map_count - 1) ? 2240 0 : offload_to_mirror + 1; 2241 WARN_ON(offload_to_mirror >= layout_map_count); 2242 device->offload_to_mirror = offload_to_mirror; 2243 /* 2244 * Avoid direct use of device->offload_to_mirror within this 2245 * function since multiple threads might simultaneously 2246 * increment it beyond the range of device->layout_map_count -1. 2247 */ 2248 } else if ((device->raid_level == SA_RAID_5 || 2249 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2250 /* RAID 50/60 */ 2251 /* Verify first and last block are in same RAID group */ 2252 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2253 stripesize = r5or6_blocks_per_row * layout_map_count; 2254 #if BITS_PER_LONG == 32 2255 tmpdiv = first_block; 2256 first_group = do_div(tmpdiv, stripesize); 2257 tmpdiv = first_group; 2258 do_div(tmpdiv, r5or6_blocks_per_row); 2259 first_group = tmpdiv; 2260 tmpdiv = last_block; 2261 last_group = do_div(tmpdiv, stripesize); 2262 tmpdiv = last_group; 2263 do_div(tmpdiv, r5or6_blocks_per_row); 2264 last_group = tmpdiv; 2265 #else 2266 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2267 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2268 #endif 2269 if (first_group != last_group) 2270 return PQI_RAID_BYPASS_INELIGIBLE; 2271 2272 /* Verify request is in a single row of RAID 5/6 */ 2273 #if BITS_PER_LONG == 32 2274 tmpdiv = first_block; 2275 do_div(tmpdiv, stripesize); 2276 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2277 tmpdiv = last_block; 2278 do_div(tmpdiv, stripesize); 2279 r5or6_last_row = r0_last_row = tmpdiv; 2280 #else 2281 first_row = r5or6_first_row = r0_first_row = 2282 first_block / stripesize; 2283 r5or6_last_row = r0_last_row = last_block / stripesize; 2284 #endif 2285 if (r5or6_first_row != r5or6_last_row) 2286 return PQI_RAID_BYPASS_INELIGIBLE; 2287 2288 /* Verify request is in a single column */ 2289 #if BITS_PER_LONG == 32 2290 tmpdiv = first_block; 2291 first_row_offset = do_div(tmpdiv, stripesize); 2292 tmpdiv = first_row_offset; 2293 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2294 r5or6_first_row_offset = first_row_offset; 2295 tmpdiv = last_block; 2296 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2297 tmpdiv = r5or6_last_row_offset; 2298 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2299 tmpdiv = r5or6_first_row_offset; 2300 do_div(tmpdiv, strip_size); 2301 first_column = r5or6_first_column = tmpdiv; 2302 tmpdiv = r5or6_last_row_offset; 2303 do_div(tmpdiv, strip_size); 2304 r5or6_last_column = tmpdiv; 2305 #else 2306 first_row_offset = r5or6_first_row_offset = 2307 (u32)((first_block % stripesize) % 2308 r5or6_blocks_per_row); 2309 2310 r5or6_last_row_offset = 2311 (u32)((last_block % stripesize) % 2312 r5or6_blocks_per_row); 2313 2314 first_column = r5or6_first_row_offset / strip_size; 2315 r5or6_first_column = first_column; 2316 r5or6_last_column = r5or6_last_row_offset / strip_size; 2317 #endif 2318 if (r5or6_first_column != r5or6_last_column) 2319 return PQI_RAID_BYPASS_INELIGIBLE; 2320 2321 /* Request is eligible */ 2322 map_row = 2323 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2324 get_unaligned_le16(&raid_map->row_cnt); 2325 2326 map_index = (first_group * 2327 (get_unaligned_le16(&raid_map->row_cnt) * 2328 total_disks_per_row)) + 2329 (map_row * total_disks_per_row) + first_column; 2330 } 2331 2332 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 2333 return PQI_RAID_BYPASS_INELIGIBLE; 2334 2335 aio_handle = raid_map->disk_data[map_index].aio_handle; 2336 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2337 first_row * strip_size + 2338 (first_row_offset - first_column * strip_size); 2339 disk_block_cnt = block_cnt; 2340 2341 /* Handle differing logical/physical block sizes. */ 2342 if (raid_map->phys_blk_shift) { 2343 disk_block <<= raid_map->phys_blk_shift; 2344 disk_block_cnt <<= raid_map->phys_blk_shift; 2345 } 2346 2347 if (unlikely(disk_block_cnt > 0xffff)) 2348 return PQI_RAID_BYPASS_INELIGIBLE; 2349 2350 /* Build the new CDB for the physical disk I/O. */ 2351 if (disk_block > 0xffffffff) { 2352 cdb[0] = is_write ? WRITE_16 : READ_16; 2353 cdb[1] = 0; 2354 put_unaligned_be64(disk_block, &cdb[2]); 2355 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2356 cdb[14] = 0; 2357 cdb[15] = 0; 2358 cdb_length = 16; 2359 } else { 2360 cdb[0] = is_write ? WRITE_10 : READ_10; 2361 cdb[1] = 0; 2362 put_unaligned_be32((u32)disk_block, &cdb[2]); 2363 cdb[6] = 0; 2364 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2365 cdb[9] = 0; 2366 cdb_length = 10; 2367 } 2368 2369 if (get_unaligned_le16(&raid_map->flags) & 2370 RAID_MAP_ENCRYPTION_ENABLED) { 2371 pqi_set_encryption_info(&encryption_info, raid_map, 2372 first_block); 2373 encryption_info_ptr = &encryption_info; 2374 } else { 2375 encryption_info_ptr = NULL; 2376 } 2377 2378 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2379 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2380 } 2381 2382 #define PQI_STATUS_IDLE 0x0 2383 2384 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2385 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2386 2387 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2388 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2389 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2390 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2391 #define PQI_DEVICE_STATE_ERROR 0x4 2392 2393 #define PQI_MODE_READY_TIMEOUT_SECS 30 2394 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2395 2396 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2397 { 2398 struct pqi_device_registers __iomem *pqi_registers; 2399 unsigned long timeout; 2400 u64 signature; 2401 u8 status; 2402 2403 pqi_registers = ctrl_info->pqi_registers; 2404 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2405 2406 while (1) { 2407 signature = readq(&pqi_registers->signature); 2408 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2409 sizeof(signature)) == 0) 2410 break; 2411 if (time_after(jiffies, timeout)) { 2412 dev_err(&ctrl_info->pci_dev->dev, 2413 "timed out waiting for PQI signature\n"); 2414 return -ETIMEDOUT; 2415 } 2416 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2417 } 2418 2419 while (1) { 2420 status = readb(&pqi_registers->function_and_status_code); 2421 if (status == PQI_STATUS_IDLE) 2422 break; 2423 if (time_after(jiffies, timeout)) { 2424 dev_err(&ctrl_info->pci_dev->dev, 2425 "timed out waiting for PQI IDLE\n"); 2426 return -ETIMEDOUT; 2427 } 2428 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2429 } 2430 2431 while (1) { 2432 if (readl(&pqi_registers->device_status) == 2433 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2434 break; 2435 if (time_after(jiffies, timeout)) { 2436 dev_err(&ctrl_info->pci_dev->dev, 2437 "timed out waiting for PQI all registers ready\n"); 2438 return -ETIMEDOUT; 2439 } 2440 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2441 } 2442 2443 return 0; 2444 } 2445 2446 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2447 { 2448 struct pqi_scsi_dev *device; 2449 2450 device = io_request->scmd->device->hostdata; 2451 device->raid_bypass_enabled = false; 2452 device->aio_enabled = false; 2453 } 2454 2455 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2456 { 2457 struct pqi_ctrl_info *ctrl_info; 2458 struct pqi_scsi_dev *device; 2459 2460 device = sdev->hostdata; 2461 if (device->device_offline) 2462 return; 2463 2464 device->device_offline = true; 2465 scsi_device_set_state(sdev, SDEV_OFFLINE); 2466 ctrl_info = shost_to_hba(sdev->host); 2467 pqi_schedule_rescan_worker(ctrl_info); 2468 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2469 path, ctrl_info->scsi_host->host_no, device->bus, 2470 device->target, device->lun); 2471 } 2472 2473 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2474 { 2475 u8 scsi_status; 2476 u8 host_byte; 2477 struct scsi_cmnd *scmd; 2478 struct pqi_raid_error_info *error_info; 2479 size_t sense_data_length; 2480 int residual_count; 2481 int xfer_count; 2482 struct scsi_sense_hdr sshdr; 2483 2484 scmd = io_request->scmd; 2485 if (!scmd) 2486 return; 2487 2488 error_info = io_request->error_info; 2489 scsi_status = error_info->status; 2490 host_byte = DID_OK; 2491 2492 switch (error_info->data_out_result) { 2493 case PQI_DATA_IN_OUT_GOOD: 2494 break; 2495 case PQI_DATA_IN_OUT_UNDERFLOW: 2496 xfer_count = 2497 get_unaligned_le32(&error_info->data_out_transferred); 2498 residual_count = scsi_bufflen(scmd) - xfer_count; 2499 scsi_set_resid(scmd, residual_count); 2500 if (xfer_count < scmd->underflow) 2501 host_byte = DID_SOFT_ERROR; 2502 break; 2503 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2504 case PQI_DATA_IN_OUT_ABORTED: 2505 host_byte = DID_ABORT; 2506 break; 2507 case PQI_DATA_IN_OUT_TIMEOUT: 2508 host_byte = DID_TIME_OUT; 2509 break; 2510 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2511 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2512 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2513 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2514 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2515 case PQI_DATA_IN_OUT_ERROR: 2516 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2517 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2518 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2519 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2520 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2521 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2522 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2523 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2524 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2525 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2526 default: 2527 host_byte = DID_ERROR; 2528 break; 2529 } 2530 2531 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2532 if (sense_data_length == 0) 2533 sense_data_length = 2534 get_unaligned_le16(&error_info->response_data_length); 2535 if (sense_data_length) { 2536 if (sense_data_length > sizeof(error_info->data)) 2537 sense_data_length = sizeof(error_info->data); 2538 2539 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2540 scsi_normalize_sense(error_info->data, 2541 sense_data_length, &sshdr) && 2542 sshdr.sense_key == HARDWARE_ERROR && 2543 sshdr.asc == 0x3e && 2544 sshdr.ascq == 0x1) { 2545 pqi_take_device_offline(scmd->device, "RAID"); 2546 host_byte = DID_NO_CONNECT; 2547 } 2548 2549 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2550 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2551 memcpy(scmd->sense_buffer, error_info->data, 2552 sense_data_length); 2553 } 2554 2555 scmd->result = scsi_status; 2556 set_host_byte(scmd, host_byte); 2557 } 2558 2559 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2560 { 2561 u8 scsi_status; 2562 u8 host_byte; 2563 struct scsi_cmnd *scmd; 2564 struct pqi_aio_error_info *error_info; 2565 size_t sense_data_length; 2566 int residual_count; 2567 int xfer_count; 2568 bool device_offline; 2569 2570 scmd = io_request->scmd; 2571 error_info = io_request->error_info; 2572 host_byte = DID_OK; 2573 sense_data_length = 0; 2574 device_offline = false; 2575 2576 switch (error_info->service_response) { 2577 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2578 scsi_status = error_info->status; 2579 break; 2580 case PQI_AIO_SERV_RESPONSE_FAILURE: 2581 switch (error_info->status) { 2582 case PQI_AIO_STATUS_IO_ABORTED: 2583 scsi_status = SAM_STAT_TASK_ABORTED; 2584 break; 2585 case PQI_AIO_STATUS_UNDERRUN: 2586 scsi_status = SAM_STAT_GOOD; 2587 residual_count = get_unaligned_le32( 2588 &error_info->residual_count); 2589 scsi_set_resid(scmd, residual_count); 2590 xfer_count = scsi_bufflen(scmd) - residual_count; 2591 if (xfer_count < scmd->underflow) 2592 host_byte = DID_SOFT_ERROR; 2593 break; 2594 case PQI_AIO_STATUS_OVERRUN: 2595 scsi_status = SAM_STAT_GOOD; 2596 break; 2597 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2598 pqi_aio_path_disabled(io_request); 2599 scsi_status = SAM_STAT_GOOD; 2600 io_request->status = -EAGAIN; 2601 break; 2602 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2603 case PQI_AIO_STATUS_INVALID_DEVICE: 2604 if (!io_request->raid_bypass) { 2605 device_offline = true; 2606 pqi_take_device_offline(scmd->device, "AIO"); 2607 host_byte = DID_NO_CONNECT; 2608 } 2609 scsi_status = SAM_STAT_CHECK_CONDITION; 2610 break; 2611 case PQI_AIO_STATUS_IO_ERROR: 2612 default: 2613 scsi_status = SAM_STAT_CHECK_CONDITION; 2614 break; 2615 } 2616 break; 2617 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2618 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2619 scsi_status = SAM_STAT_GOOD; 2620 break; 2621 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2622 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2623 default: 2624 scsi_status = SAM_STAT_CHECK_CONDITION; 2625 break; 2626 } 2627 2628 if (error_info->data_present) { 2629 sense_data_length = 2630 get_unaligned_le16(&error_info->data_length); 2631 if (sense_data_length) { 2632 if (sense_data_length > sizeof(error_info->data)) 2633 sense_data_length = sizeof(error_info->data); 2634 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2635 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2636 memcpy(scmd->sense_buffer, error_info->data, 2637 sense_data_length); 2638 } 2639 } 2640 2641 if (device_offline && sense_data_length == 0) 2642 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2643 0x3e, 0x1); 2644 2645 scmd->result = scsi_status; 2646 set_host_byte(scmd, host_byte); 2647 } 2648 2649 static void pqi_process_io_error(unsigned int iu_type, 2650 struct pqi_io_request *io_request) 2651 { 2652 switch (iu_type) { 2653 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2654 pqi_process_raid_io_error(io_request); 2655 break; 2656 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2657 pqi_process_aio_io_error(io_request); 2658 break; 2659 } 2660 } 2661 2662 static int pqi_interpret_task_management_response( 2663 struct pqi_task_management_response *response) 2664 { 2665 int rc; 2666 2667 switch (response->response_code) { 2668 case SOP_TMF_COMPLETE: 2669 case SOP_TMF_FUNCTION_SUCCEEDED: 2670 rc = 0; 2671 break; 2672 default: 2673 rc = -EIO; 2674 break; 2675 } 2676 2677 return rc; 2678 } 2679 2680 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2681 struct pqi_queue_group *queue_group) 2682 { 2683 unsigned int num_responses; 2684 pqi_index_t oq_pi; 2685 pqi_index_t oq_ci; 2686 struct pqi_io_request *io_request; 2687 struct pqi_io_response *response; 2688 u16 request_id; 2689 2690 num_responses = 0; 2691 oq_ci = queue_group->oq_ci_copy; 2692 2693 while (1) { 2694 oq_pi = *queue_group->oq_pi; 2695 if (oq_pi == oq_ci) 2696 break; 2697 2698 num_responses++; 2699 response = queue_group->oq_element_array + 2700 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2701 2702 request_id = get_unaligned_le16(&response->request_id); 2703 WARN_ON(request_id >= ctrl_info->max_io_slots); 2704 2705 io_request = &ctrl_info->io_request_pool[request_id]; 2706 WARN_ON(atomic_read(&io_request->refcount) == 0); 2707 2708 switch (response->header.iu_type) { 2709 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2710 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2711 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2712 break; 2713 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2714 io_request->status = 2715 pqi_interpret_task_management_response( 2716 (void *)response); 2717 break; 2718 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2719 pqi_aio_path_disabled(io_request); 2720 io_request->status = -EAGAIN; 2721 break; 2722 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2723 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2724 io_request->error_info = ctrl_info->error_buffer + 2725 (get_unaligned_le16(&response->error_index) * 2726 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2727 pqi_process_io_error(response->header.iu_type, 2728 io_request); 2729 break; 2730 default: 2731 dev_err(&ctrl_info->pci_dev->dev, 2732 "unexpected IU type: 0x%x\n", 2733 response->header.iu_type); 2734 break; 2735 } 2736 2737 io_request->io_complete_callback(io_request, 2738 io_request->context); 2739 2740 /* 2741 * Note that the I/O request structure CANNOT BE TOUCHED after 2742 * returning from the I/O completion callback! 2743 */ 2744 2745 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2746 } 2747 2748 if (num_responses) { 2749 queue_group->oq_ci_copy = oq_ci; 2750 writel(oq_ci, queue_group->oq_ci); 2751 } 2752 2753 return num_responses; 2754 } 2755 2756 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2757 unsigned int ci, unsigned int elements_in_queue) 2758 { 2759 unsigned int num_elements_used; 2760 2761 if (pi >= ci) 2762 num_elements_used = pi - ci; 2763 else 2764 num_elements_used = elements_in_queue - ci + pi; 2765 2766 return elements_in_queue - num_elements_used - 1; 2767 } 2768 2769 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 2770 struct pqi_event_acknowledge_request *iu, size_t iu_length) 2771 { 2772 pqi_index_t iq_pi; 2773 pqi_index_t iq_ci; 2774 unsigned long flags; 2775 void *next_element; 2776 struct pqi_queue_group *queue_group; 2777 2778 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 2779 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 2780 2781 while (1) { 2782 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 2783 2784 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 2785 iq_ci = *queue_group->iq_ci[RAID_PATH]; 2786 2787 if (pqi_num_elements_free(iq_pi, iq_ci, 2788 ctrl_info->num_elements_per_iq)) 2789 break; 2790 2791 spin_unlock_irqrestore( 2792 &queue_group->submit_lock[RAID_PATH], flags); 2793 2794 if (pqi_ctrl_offline(ctrl_info)) 2795 return; 2796 } 2797 2798 next_element = queue_group->iq_element_array[RAID_PATH] + 2799 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 2800 2801 memcpy(next_element, iu, iu_length); 2802 2803 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 2804 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 2805 2806 /* 2807 * This write notifies the controller that an IU is available to be 2808 * processed. 2809 */ 2810 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 2811 2812 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 2813 } 2814 2815 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 2816 struct pqi_event *event) 2817 { 2818 struct pqi_event_acknowledge_request request; 2819 2820 memset(&request, 0, sizeof(request)); 2821 2822 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 2823 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 2824 &request.header.iu_length); 2825 request.event_type = event->event_type; 2826 request.event_id = event->event_id; 2827 request.additional_event_id = event->additional_event_id; 2828 2829 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 2830 } 2831 2832 static void pqi_event_worker(struct work_struct *work) 2833 { 2834 unsigned int i; 2835 struct pqi_ctrl_info *ctrl_info; 2836 struct pqi_event *event; 2837 2838 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 2839 2840 pqi_ctrl_busy(ctrl_info); 2841 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 2842 if (pqi_ctrl_offline(ctrl_info)) 2843 goto out; 2844 2845 pqi_schedule_rescan_worker_delayed(ctrl_info); 2846 2847 event = ctrl_info->events; 2848 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 2849 if (event->pending) { 2850 event->pending = false; 2851 pqi_acknowledge_event(ctrl_info, event); 2852 } 2853 event++; 2854 } 2855 2856 out: 2857 pqi_ctrl_unbusy(ctrl_info); 2858 } 2859 2860 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2861 2862 static void pqi_heartbeat_timer_handler(unsigned long data) 2863 { 2864 int num_interrupts; 2865 u32 heartbeat_count; 2866 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; 2867 2868 pqi_check_ctrl_health(ctrl_info); 2869 if (pqi_ctrl_offline(ctrl_info)) 2870 return; 2871 2872 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 2873 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 2874 2875 if (num_interrupts == ctrl_info->previous_num_interrupts) { 2876 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 2877 dev_err(&ctrl_info->pci_dev->dev, 2878 "no heartbeat detected - last heartbeat count: %u\n", 2879 heartbeat_count); 2880 pqi_take_ctrl_offline(ctrl_info); 2881 return; 2882 } 2883 } else { 2884 ctrl_info->previous_num_interrupts = num_interrupts; 2885 } 2886 2887 ctrl_info->previous_heartbeat_count = heartbeat_count; 2888 mod_timer(&ctrl_info->heartbeat_timer, 2889 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 2890 } 2891 2892 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2893 { 2894 if (!ctrl_info->heartbeat_counter) 2895 return; 2896 2897 ctrl_info->previous_num_interrupts = 2898 atomic_read(&ctrl_info->num_interrupts); 2899 ctrl_info->previous_heartbeat_count = 2900 pqi_read_heartbeat_counter(ctrl_info); 2901 2902 ctrl_info->heartbeat_timer.expires = 2903 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2904 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; 2905 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; 2906 add_timer(&ctrl_info->heartbeat_timer); 2907 } 2908 2909 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2910 { 2911 del_timer_sync(&ctrl_info->heartbeat_timer); 2912 } 2913 2914 static inline int pqi_event_type_to_event_index(unsigned int event_type) 2915 { 2916 int index; 2917 2918 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 2919 if (event_type == pqi_supported_event_types[index]) 2920 return index; 2921 2922 return -1; 2923 } 2924 2925 static inline bool pqi_is_supported_event(unsigned int event_type) 2926 { 2927 return pqi_event_type_to_event_index(event_type) != -1; 2928 } 2929 2930 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 2931 { 2932 unsigned int num_events; 2933 pqi_index_t oq_pi; 2934 pqi_index_t oq_ci; 2935 struct pqi_event_queue *event_queue; 2936 struct pqi_event_response *response; 2937 struct pqi_event *event; 2938 int event_index; 2939 2940 event_queue = &ctrl_info->event_queue; 2941 num_events = 0; 2942 oq_ci = event_queue->oq_ci_copy; 2943 2944 while (1) { 2945 oq_pi = *event_queue->oq_pi; 2946 if (oq_pi == oq_ci) 2947 break; 2948 2949 num_events++; 2950 response = event_queue->oq_element_array + 2951 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 2952 2953 event_index = 2954 pqi_event_type_to_event_index(response->event_type); 2955 2956 if (event_index >= 0) { 2957 if (response->request_acknowlege) { 2958 event = &ctrl_info->events[event_index]; 2959 event->pending = true; 2960 event->event_type = response->event_type; 2961 event->event_id = response->event_id; 2962 event->additional_event_id = 2963 response->additional_event_id; 2964 } 2965 } 2966 2967 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 2968 } 2969 2970 if (num_events) { 2971 event_queue->oq_ci_copy = oq_ci; 2972 writel(oq_ci, event_queue->oq_ci); 2973 schedule_work(&ctrl_info->event_work); 2974 } 2975 2976 return num_events; 2977 } 2978 2979 #define PQI_LEGACY_INTX_MASK 0x1 2980 2981 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 2982 bool enable_intx) 2983 { 2984 u32 intx_mask; 2985 struct pqi_device_registers __iomem *pqi_registers; 2986 volatile void __iomem *register_addr; 2987 2988 pqi_registers = ctrl_info->pqi_registers; 2989 2990 if (enable_intx) 2991 register_addr = &pqi_registers->legacy_intx_mask_clear; 2992 else 2993 register_addr = &pqi_registers->legacy_intx_mask_set; 2994 2995 intx_mask = readl(register_addr); 2996 intx_mask |= PQI_LEGACY_INTX_MASK; 2997 writel(intx_mask, register_addr); 2998 } 2999 3000 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3001 enum pqi_irq_mode new_mode) 3002 { 3003 switch (ctrl_info->irq_mode) { 3004 case IRQ_MODE_MSIX: 3005 switch (new_mode) { 3006 case IRQ_MODE_MSIX: 3007 break; 3008 case IRQ_MODE_INTX: 3009 pqi_configure_legacy_intx(ctrl_info, true); 3010 sis_disable_msix(ctrl_info); 3011 sis_enable_intx(ctrl_info); 3012 break; 3013 case IRQ_MODE_NONE: 3014 sis_disable_msix(ctrl_info); 3015 break; 3016 } 3017 break; 3018 case IRQ_MODE_INTX: 3019 switch (new_mode) { 3020 case IRQ_MODE_MSIX: 3021 pqi_configure_legacy_intx(ctrl_info, false); 3022 sis_disable_intx(ctrl_info); 3023 sis_enable_msix(ctrl_info); 3024 break; 3025 case IRQ_MODE_INTX: 3026 break; 3027 case IRQ_MODE_NONE: 3028 pqi_configure_legacy_intx(ctrl_info, false); 3029 sis_disable_intx(ctrl_info); 3030 break; 3031 } 3032 break; 3033 case IRQ_MODE_NONE: 3034 switch (new_mode) { 3035 case IRQ_MODE_MSIX: 3036 sis_enable_msix(ctrl_info); 3037 break; 3038 case IRQ_MODE_INTX: 3039 pqi_configure_legacy_intx(ctrl_info, true); 3040 sis_enable_intx(ctrl_info); 3041 break; 3042 case IRQ_MODE_NONE: 3043 break; 3044 } 3045 break; 3046 } 3047 3048 ctrl_info->irq_mode = new_mode; 3049 } 3050 3051 #define PQI_LEGACY_INTX_PENDING 0x1 3052 3053 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3054 { 3055 bool valid_irq; 3056 u32 intx_status; 3057 3058 switch (ctrl_info->irq_mode) { 3059 case IRQ_MODE_MSIX: 3060 valid_irq = true; 3061 break; 3062 case IRQ_MODE_INTX: 3063 intx_status = 3064 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3065 if (intx_status & PQI_LEGACY_INTX_PENDING) 3066 valid_irq = true; 3067 else 3068 valid_irq = false; 3069 break; 3070 case IRQ_MODE_NONE: 3071 default: 3072 valid_irq = false; 3073 break; 3074 } 3075 3076 return valid_irq; 3077 } 3078 3079 static irqreturn_t pqi_irq_handler(int irq, void *data) 3080 { 3081 struct pqi_ctrl_info *ctrl_info; 3082 struct pqi_queue_group *queue_group; 3083 unsigned int num_responses_handled; 3084 3085 queue_group = data; 3086 ctrl_info = queue_group->ctrl_info; 3087 3088 if (!pqi_is_valid_irq(ctrl_info)) 3089 return IRQ_NONE; 3090 3091 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3092 3093 if (irq == ctrl_info->event_irq) 3094 num_responses_handled += pqi_process_event_intr(ctrl_info); 3095 3096 if (num_responses_handled) 3097 atomic_inc(&ctrl_info->num_interrupts); 3098 3099 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3100 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3101 3102 return IRQ_HANDLED; 3103 } 3104 3105 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3106 { 3107 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3108 int i; 3109 int rc; 3110 3111 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3112 3113 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3114 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3115 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3116 if (rc) { 3117 dev_err(&pci_dev->dev, 3118 "irq %u init failed with error %d\n", 3119 pci_irq_vector(pci_dev, i), rc); 3120 return rc; 3121 } 3122 ctrl_info->num_msix_vectors_initialized++; 3123 } 3124 3125 return 0; 3126 } 3127 3128 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3129 { 3130 int i; 3131 3132 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3133 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3134 &ctrl_info->queue_groups[i]); 3135 3136 ctrl_info->num_msix_vectors_initialized = 0; 3137 } 3138 3139 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3140 { 3141 int num_vectors_enabled; 3142 3143 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3144 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3145 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3146 if (num_vectors_enabled < 0) { 3147 dev_err(&ctrl_info->pci_dev->dev, 3148 "MSI-X init failed with error %d\n", 3149 num_vectors_enabled); 3150 return num_vectors_enabled; 3151 } 3152 3153 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3154 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3155 return 0; 3156 } 3157 3158 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3159 { 3160 if (ctrl_info->num_msix_vectors_enabled) { 3161 pci_free_irq_vectors(ctrl_info->pci_dev); 3162 ctrl_info->num_msix_vectors_enabled = 0; 3163 } 3164 } 3165 3166 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3167 { 3168 unsigned int i; 3169 size_t alloc_length; 3170 size_t element_array_length_per_iq; 3171 size_t element_array_length_per_oq; 3172 void *element_array; 3173 void *next_queue_index; 3174 void *aligned_pointer; 3175 unsigned int num_inbound_queues; 3176 unsigned int num_outbound_queues; 3177 unsigned int num_queue_indexes; 3178 struct pqi_queue_group *queue_group; 3179 3180 element_array_length_per_iq = 3181 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3182 ctrl_info->num_elements_per_iq; 3183 element_array_length_per_oq = 3184 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3185 ctrl_info->num_elements_per_oq; 3186 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3187 num_outbound_queues = ctrl_info->num_queue_groups; 3188 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3189 3190 aligned_pointer = NULL; 3191 3192 for (i = 0; i < num_inbound_queues; i++) { 3193 aligned_pointer = PTR_ALIGN(aligned_pointer, 3194 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3195 aligned_pointer += element_array_length_per_iq; 3196 } 3197 3198 for (i = 0; i < num_outbound_queues; i++) { 3199 aligned_pointer = PTR_ALIGN(aligned_pointer, 3200 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3201 aligned_pointer += element_array_length_per_oq; 3202 } 3203 3204 aligned_pointer = PTR_ALIGN(aligned_pointer, 3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3206 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3207 PQI_EVENT_OQ_ELEMENT_LENGTH; 3208 3209 for (i = 0; i < num_queue_indexes; i++) { 3210 aligned_pointer = PTR_ALIGN(aligned_pointer, 3211 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3212 aligned_pointer += sizeof(pqi_index_t); 3213 } 3214 3215 alloc_length = (size_t)aligned_pointer + 3216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3217 3218 alloc_length += PQI_EXTRA_SGL_MEMORY; 3219 3220 ctrl_info->queue_memory_base = 3221 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3222 alloc_length, 3223 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3224 3225 if (!ctrl_info->queue_memory_base) 3226 return -ENOMEM; 3227 3228 ctrl_info->queue_memory_length = alloc_length; 3229 3230 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3231 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3232 3233 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3234 queue_group = &ctrl_info->queue_groups[i]; 3235 queue_group->iq_element_array[RAID_PATH] = element_array; 3236 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3237 ctrl_info->queue_memory_base_dma_handle + 3238 (element_array - ctrl_info->queue_memory_base); 3239 element_array += element_array_length_per_iq; 3240 element_array = PTR_ALIGN(element_array, 3241 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3242 queue_group->iq_element_array[AIO_PATH] = element_array; 3243 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3244 ctrl_info->queue_memory_base_dma_handle + 3245 (element_array - ctrl_info->queue_memory_base); 3246 element_array += element_array_length_per_iq; 3247 element_array = PTR_ALIGN(element_array, 3248 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3249 } 3250 3251 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3252 queue_group = &ctrl_info->queue_groups[i]; 3253 queue_group->oq_element_array = element_array; 3254 queue_group->oq_element_array_bus_addr = 3255 ctrl_info->queue_memory_base_dma_handle + 3256 (element_array - ctrl_info->queue_memory_base); 3257 element_array += element_array_length_per_oq; 3258 element_array = PTR_ALIGN(element_array, 3259 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3260 } 3261 3262 ctrl_info->event_queue.oq_element_array = element_array; 3263 ctrl_info->event_queue.oq_element_array_bus_addr = 3264 ctrl_info->queue_memory_base_dma_handle + 3265 (element_array - ctrl_info->queue_memory_base); 3266 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3267 PQI_EVENT_OQ_ELEMENT_LENGTH; 3268 3269 next_queue_index = PTR_ALIGN(element_array, 3270 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3271 3272 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3273 queue_group = &ctrl_info->queue_groups[i]; 3274 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3275 queue_group->iq_ci_bus_addr[RAID_PATH] = 3276 ctrl_info->queue_memory_base_dma_handle + 3277 (next_queue_index - ctrl_info->queue_memory_base); 3278 next_queue_index += sizeof(pqi_index_t); 3279 next_queue_index = PTR_ALIGN(next_queue_index, 3280 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3281 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3282 queue_group->iq_ci_bus_addr[AIO_PATH] = 3283 ctrl_info->queue_memory_base_dma_handle + 3284 (next_queue_index - ctrl_info->queue_memory_base); 3285 next_queue_index += sizeof(pqi_index_t); 3286 next_queue_index = PTR_ALIGN(next_queue_index, 3287 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3288 queue_group->oq_pi = next_queue_index; 3289 queue_group->oq_pi_bus_addr = 3290 ctrl_info->queue_memory_base_dma_handle + 3291 (next_queue_index - ctrl_info->queue_memory_base); 3292 next_queue_index += sizeof(pqi_index_t); 3293 next_queue_index = PTR_ALIGN(next_queue_index, 3294 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3295 } 3296 3297 ctrl_info->event_queue.oq_pi = next_queue_index; 3298 ctrl_info->event_queue.oq_pi_bus_addr = 3299 ctrl_info->queue_memory_base_dma_handle + 3300 (next_queue_index - ctrl_info->queue_memory_base); 3301 3302 return 0; 3303 } 3304 3305 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3306 { 3307 unsigned int i; 3308 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3309 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3310 3311 /* 3312 * Initialize the backpointers to the controller structure in 3313 * each operational queue group structure. 3314 */ 3315 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3316 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3317 3318 /* 3319 * Assign IDs to all operational queues. Note that the IDs 3320 * assigned to operational IQs are independent of the IDs 3321 * assigned to operational OQs. 3322 */ 3323 ctrl_info->event_queue.oq_id = next_oq_id++; 3324 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3325 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3326 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3327 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3328 } 3329 3330 /* 3331 * Assign MSI-X table entry indexes to all queues. Note that the 3332 * interrupt for the event queue is shared with the first queue group. 3333 */ 3334 ctrl_info->event_queue.int_msg_num = 0; 3335 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3336 ctrl_info->queue_groups[i].int_msg_num = i; 3337 3338 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3339 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3340 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3341 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3342 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3343 } 3344 } 3345 3346 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3347 { 3348 size_t alloc_length; 3349 struct pqi_admin_queues_aligned *admin_queues_aligned; 3350 struct pqi_admin_queues *admin_queues; 3351 3352 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3353 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3354 3355 ctrl_info->admin_queue_memory_base = 3356 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3357 alloc_length, 3358 &ctrl_info->admin_queue_memory_base_dma_handle, 3359 GFP_KERNEL); 3360 3361 if (!ctrl_info->admin_queue_memory_base) 3362 return -ENOMEM; 3363 3364 ctrl_info->admin_queue_memory_length = alloc_length; 3365 3366 admin_queues = &ctrl_info->admin_queues; 3367 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3368 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3369 admin_queues->iq_element_array = 3370 &admin_queues_aligned->iq_element_array; 3371 admin_queues->oq_element_array = 3372 &admin_queues_aligned->oq_element_array; 3373 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3374 admin_queues->oq_pi = &admin_queues_aligned->oq_pi; 3375 3376 admin_queues->iq_element_array_bus_addr = 3377 ctrl_info->admin_queue_memory_base_dma_handle + 3378 (admin_queues->iq_element_array - 3379 ctrl_info->admin_queue_memory_base); 3380 admin_queues->oq_element_array_bus_addr = 3381 ctrl_info->admin_queue_memory_base_dma_handle + 3382 (admin_queues->oq_element_array - 3383 ctrl_info->admin_queue_memory_base); 3384 admin_queues->iq_ci_bus_addr = 3385 ctrl_info->admin_queue_memory_base_dma_handle + 3386 ((void *)admin_queues->iq_ci - 3387 ctrl_info->admin_queue_memory_base); 3388 admin_queues->oq_pi_bus_addr = 3389 ctrl_info->admin_queue_memory_base_dma_handle + 3390 ((void *)admin_queues->oq_pi - 3391 ctrl_info->admin_queue_memory_base); 3392 3393 return 0; 3394 } 3395 3396 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3397 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3398 3399 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3400 { 3401 struct pqi_device_registers __iomem *pqi_registers; 3402 struct pqi_admin_queues *admin_queues; 3403 unsigned long timeout; 3404 u8 status; 3405 u32 reg; 3406 3407 pqi_registers = ctrl_info->pqi_registers; 3408 admin_queues = &ctrl_info->admin_queues; 3409 3410 writeq((u64)admin_queues->iq_element_array_bus_addr, 3411 &pqi_registers->admin_iq_element_array_addr); 3412 writeq((u64)admin_queues->oq_element_array_bus_addr, 3413 &pqi_registers->admin_oq_element_array_addr); 3414 writeq((u64)admin_queues->iq_ci_bus_addr, 3415 &pqi_registers->admin_iq_ci_addr); 3416 writeq((u64)admin_queues->oq_pi_bus_addr, 3417 &pqi_registers->admin_oq_pi_addr); 3418 3419 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3420 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3421 (admin_queues->int_msg_num << 16); 3422 writel(reg, &pqi_registers->admin_iq_num_elements); 3423 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3424 &pqi_registers->function_and_status_code); 3425 3426 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3427 while (1) { 3428 status = readb(&pqi_registers->function_and_status_code); 3429 if (status == PQI_STATUS_IDLE) 3430 break; 3431 if (time_after(jiffies, timeout)) 3432 return -ETIMEDOUT; 3433 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3434 } 3435 3436 /* 3437 * The offset registers are not initialized to the correct 3438 * offsets until *after* the create admin queue pair command 3439 * completes successfully. 3440 */ 3441 admin_queues->iq_pi = ctrl_info->iomem_base + 3442 PQI_DEVICE_REGISTERS_OFFSET + 3443 readq(&pqi_registers->admin_iq_pi_offset); 3444 admin_queues->oq_ci = ctrl_info->iomem_base + 3445 PQI_DEVICE_REGISTERS_OFFSET + 3446 readq(&pqi_registers->admin_oq_ci_offset); 3447 3448 return 0; 3449 } 3450 3451 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3452 struct pqi_general_admin_request *request) 3453 { 3454 struct pqi_admin_queues *admin_queues; 3455 void *next_element; 3456 pqi_index_t iq_pi; 3457 3458 admin_queues = &ctrl_info->admin_queues; 3459 iq_pi = admin_queues->iq_pi_copy; 3460 3461 next_element = admin_queues->iq_element_array + 3462 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3463 3464 memcpy(next_element, request, sizeof(*request)); 3465 3466 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3467 admin_queues->iq_pi_copy = iq_pi; 3468 3469 /* 3470 * This write notifies the controller that an IU is available to be 3471 * processed. 3472 */ 3473 writel(iq_pi, admin_queues->iq_pi); 3474 } 3475 3476 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3477 3478 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3479 struct pqi_general_admin_response *response) 3480 { 3481 struct pqi_admin_queues *admin_queues; 3482 pqi_index_t oq_pi; 3483 pqi_index_t oq_ci; 3484 unsigned long timeout; 3485 3486 admin_queues = &ctrl_info->admin_queues; 3487 oq_ci = admin_queues->oq_ci_copy; 3488 3489 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3490 3491 while (1) { 3492 oq_pi = *admin_queues->oq_pi; 3493 if (oq_pi != oq_ci) 3494 break; 3495 if (time_after(jiffies, timeout)) { 3496 dev_err(&ctrl_info->pci_dev->dev, 3497 "timed out waiting for admin response\n"); 3498 return -ETIMEDOUT; 3499 } 3500 if (!sis_is_firmware_running(ctrl_info)) 3501 return -ENXIO; 3502 usleep_range(1000, 2000); 3503 } 3504 3505 memcpy(response, admin_queues->oq_element_array + 3506 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3507 3508 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3509 admin_queues->oq_ci_copy = oq_ci; 3510 writel(oq_ci, admin_queues->oq_ci); 3511 3512 return 0; 3513 } 3514 3515 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3516 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3517 struct pqi_io_request *io_request) 3518 { 3519 struct pqi_io_request *next; 3520 void *next_element; 3521 pqi_index_t iq_pi; 3522 pqi_index_t iq_ci; 3523 size_t iu_length; 3524 unsigned long flags; 3525 unsigned int num_elements_needed; 3526 unsigned int num_elements_to_end_of_queue; 3527 size_t copy_count; 3528 struct pqi_iu_header *request; 3529 3530 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3531 3532 if (io_request) { 3533 io_request->queue_group = queue_group; 3534 list_add_tail(&io_request->request_list_entry, 3535 &queue_group->request_list[path]); 3536 } 3537 3538 iq_pi = queue_group->iq_pi_copy[path]; 3539 3540 list_for_each_entry_safe(io_request, next, 3541 &queue_group->request_list[path], request_list_entry) { 3542 3543 request = io_request->iu; 3544 3545 iu_length = get_unaligned_le16(&request->iu_length) + 3546 PQI_REQUEST_HEADER_LENGTH; 3547 num_elements_needed = 3548 DIV_ROUND_UP(iu_length, 3549 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3550 3551 iq_ci = *queue_group->iq_ci[path]; 3552 3553 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3554 ctrl_info->num_elements_per_iq)) 3555 break; 3556 3557 put_unaligned_le16(queue_group->oq_id, 3558 &request->response_queue_id); 3559 3560 next_element = queue_group->iq_element_array[path] + 3561 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3562 3563 num_elements_to_end_of_queue = 3564 ctrl_info->num_elements_per_iq - iq_pi; 3565 3566 if (num_elements_needed <= num_elements_to_end_of_queue) { 3567 memcpy(next_element, request, iu_length); 3568 } else { 3569 copy_count = num_elements_to_end_of_queue * 3570 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3571 memcpy(next_element, request, copy_count); 3572 memcpy(queue_group->iq_element_array[path], 3573 (u8 *)request + copy_count, 3574 iu_length - copy_count); 3575 } 3576 3577 iq_pi = (iq_pi + num_elements_needed) % 3578 ctrl_info->num_elements_per_iq; 3579 3580 list_del(&io_request->request_list_entry); 3581 } 3582 3583 if (iq_pi != queue_group->iq_pi_copy[path]) { 3584 queue_group->iq_pi_copy[path] = iq_pi; 3585 /* 3586 * This write notifies the controller that one or more IUs are 3587 * available to be processed. 3588 */ 3589 writel(iq_pi, queue_group->iq_pi[path]); 3590 } 3591 3592 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3593 } 3594 3595 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3596 3597 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3598 struct completion *wait) 3599 { 3600 int rc; 3601 3602 while (1) { 3603 if (wait_for_completion_io_timeout(wait, 3604 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3605 rc = 0; 3606 break; 3607 } 3608 3609 pqi_check_ctrl_health(ctrl_info); 3610 if (pqi_ctrl_offline(ctrl_info)) { 3611 rc = -ENXIO; 3612 break; 3613 } 3614 } 3615 3616 return rc; 3617 } 3618 3619 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3620 void *context) 3621 { 3622 struct completion *waiting = context; 3623 3624 complete(waiting); 3625 } 3626 3627 static int pqi_submit_raid_request_synchronous_with_io_request( 3628 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 3629 unsigned long timeout_msecs) 3630 { 3631 int rc = 0; 3632 DECLARE_COMPLETION_ONSTACK(wait); 3633 3634 io_request->io_complete_callback = pqi_raid_synchronous_complete; 3635 io_request->context = &wait; 3636 3637 pqi_start_io(ctrl_info, 3638 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 3639 io_request); 3640 3641 if (timeout_msecs == NO_TIMEOUT) { 3642 pqi_wait_for_completion_io(ctrl_info, &wait); 3643 } else { 3644 if (!wait_for_completion_io_timeout(&wait, 3645 msecs_to_jiffies(timeout_msecs))) { 3646 dev_warn(&ctrl_info->pci_dev->dev, 3647 "command timed out\n"); 3648 rc = -ETIMEDOUT; 3649 } 3650 } 3651 3652 return rc; 3653 } 3654 3655 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 3656 struct pqi_iu_header *request, unsigned int flags, 3657 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 3658 { 3659 int rc; 3660 struct pqi_io_request *io_request; 3661 unsigned long start_jiffies; 3662 unsigned long msecs_blocked; 3663 size_t iu_length; 3664 3665 /* 3666 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 3667 * are mutually exclusive. 3668 */ 3669 3670 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 3671 if (down_interruptible(&ctrl_info->sync_request_sem)) 3672 return -ERESTARTSYS; 3673 } else { 3674 if (timeout_msecs == NO_TIMEOUT) { 3675 down(&ctrl_info->sync_request_sem); 3676 } else { 3677 start_jiffies = jiffies; 3678 if (down_timeout(&ctrl_info->sync_request_sem, 3679 msecs_to_jiffies(timeout_msecs))) 3680 return -ETIMEDOUT; 3681 msecs_blocked = 3682 jiffies_to_msecs(jiffies - start_jiffies); 3683 if (msecs_blocked >= timeout_msecs) 3684 return -ETIMEDOUT; 3685 timeout_msecs -= msecs_blocked; 3686 } 3687 } 3688 3689 pqi_ctrl_busy(ctrl_info); 3690 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 3691 if (timeout_msecs == 0) { 3692 rc = -ETIMEDOUT; 3693 goto out; 3694 } 3695 3696 if (pqi_ctrl_offline(ctrl_info)) { 3697 rc = -ENXIO; 3698 goto out; 3699 } 3700 3701 io_request = pqi_alloc_io_request(ctrl_info); 3702 3703 put_unaligned_le16(io_request->index, 3704 &(((struct pqi_raid_path_request *)request)->request_id)); 3705 3706 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 3707 ((struct pqi_raid_path_request *)request)->error_index = 3708 ((struct pqi_raid_path_request *)request)->request_id; 3709 3710 iu_length = get_unaligned_le16(&request->iu_length) + 3711 PQI_REQUEST_HEADER_LENGTH; 3712 memcpy(io_request->iu, request, iu_length); 3713 3714 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, 3715 io_request, timeout_msecs); 3716 3717 if (error_info) { 3718 if (io_request->error_info) 3719 memcpy(error_info, io_request->error_info, 3720 sizeof(*error_info)); 3721 else 3722 memset(error_info, 0, sizeof(*error_info)); 3723 } else if (rc == 0 && io_request->error_info) { 3724 u8 scsi_status; 3725 struct pqi_raid_error_info *raid_error_info; 3726 3727 raid_error_info = io_request->error_info; 3728 scsi_status = raid_error_info->status; 3729 3730 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3731 raid_error_info->data_out_result == 3732 PQI_DATA_IN_OUT_UNDERFLOW) 3733 scsi_status = SAM_STAT_GOOD; 3734 3735 if (scsi_status != SAM_STAT_GOOD) 3736 rc = -EIO; 3737 } 3738 3739 pqi_free_io_request(io_request); 3740 3741 out: 3742 pqi_ctrl_unbusy(ctrl_info); 3743 up(&ctrl_info->sync_request_sem); 3744 3745 return rc; 3746 } 3747 3748 static int pqi_validate_admin_response( 3749 struct pqi_general_admin_response *response, u8 expected_function_code) 3750 { 3751 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 3752 return -EINVAL; 3753 3754 if (get_unaligned_le16(&response->header.iu_length) != 3755 PQI_GENERAL_ADMIN_IU_LENGTH) 3756 return -EINVAL; 3757 3758 if (response->function_code != expected_function_code) 3759 return -EINVAL; 3760 3761 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 3762 return -EINVAL; 3763 3764 return 0; 3765 } 3766 3767 static int pqi_submit_admin_request_synchronous( 3768 struct pqi_ctrl_info *ctrl_info, 3769 struct pqi_general_admin_request *request, 3770 struct pqi_general_admin_response *response) 3771 { 3772 int rc; 3773 3774 pqi_submit_admin_request(ctrl_info, request); 3775 3776 rc = pqi_poll_for_admin_response(ctrl_info, response); 3777 3778 if (rc == 0) 3779 rc = pqi_validate_admin_response(response, 3780 request->function_code); 3781 3782 return rc; 3783 } 3784 3785 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 3786 { 3787 int rc; 3788 struct pqi_general_admin_request request; 3789 struct pqi_general_admin_response response; 3790 struct pqi_device_capability *capability; 3791 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 3792 3793 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 3794 if (!capability) 3795 return -ENOMEM; 3796 3797 memset(&request, 0, sizeof(request)); 3798 3799 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3800 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3801 &request.header.iu_length); 3802 request.function_code = 3803 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 3804 put_unaligned_le32(sizeof(*capability), 3805 &request.data.report_device_capability.buffer_length); 3806 3807 rc = pqi_map_single(ctrl_info->pci_dev, 3808 &request.data.report_device_capability.sg_descriptor, 3809 capability, sizeof(*capability), 3810 PCI_DMA_FROMDEVICE); 3811 if (rc) 3812 goto out; 3813 3814 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3815 &response); 3816 3817 pqi_pci_unmap(ctrl_info->pci_dev, 3818 &request.data.report_device_capability.sg_descriptor, 1, 3819 PCI_DMA_FROMDEVICE); 3820 3821 if (rc) 3822 goto out; 3823 3824 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 3825 rc = -EIO; 3826 goto out; 3827 } 3828 3829 ctrl_info->max_inbound_queues = 3830 get_unaligned_le16(&capability->max_inbound_queues); 3831 ctrl_info->max_elements_per_iq = 3832 get_unaligned_le16(&capability->max_elements_per_iq); 3833 ctrl_info->max_iq_element_length = 3834 get_unaligned_le16(&capability->max_iq_element_length) 3835 * 16; 3836 ctrl_info->max_outbound_queues = 3837 get_unaligned_le16(&capability->max_outbound_queues); 3838 ctrl_info->max_elements_per_oq = 3839 get_unaligned_le16(&capability->max_elements_per_oq); 3840 ctrl_info->max_oq_element_length = 3841 get_unaligned_le16(&capability->max_oq_element_length) 3842 * 16; 3843 3844 sop_iu_layer_descriptor = 3845 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 3846 3847 ctrl_info->max_inbound_iu_length_per_firmware = 3848 get_unaligned_le16( 3849 &sop_iu_layer_descriptor->max_inbound_iu_length); 3850 ctrl_info->inbound_spanning_supported = 3851 sop_iu_layer_descriptor->inbound_spanning_supported; 3852 ctrl_info->outbound_spanning_supported = 3853 sop_iu_layer_descriptor->outbound_spanning_supported; 3854 3855 out: 3856 kfree(capability); 3857 3858 return rc; 3859 } 3860 3861 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 3862 { 3863 if (ctrl_info->max_iq_element_length < 3864 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3865 dev_err(&ctrl_info->pci_dev->dev, 3866 "max. inbound queue element length of %d is less than the required length of %d\n", 3867 ctrl_info->max_iq_element_length, 3868 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3869 return -EINVAL; 3870 } 3871 3872 if (ctrl_info->max_oq_element_length < 3873 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 3874 dev_err(&ctrl_info->pci_dev->dev, 3875 "max. outbound queue element length of %d is less than the required length of %d\n", 3876 ctrl_info->max_oq_element_length, 3877 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3878 return -EINVAL; 3879 } 3880 3881 if (ctrl_info->max_inbound_iu_length_per_firmware < 3882 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3883 dev_err(&ctrl_info->pci_dev->dev, 3884 "max. inbound IU length of %u is less than the min. required length of %d\n", 3885 ctrl_info->max_inbound_iu_length_per_firmware, 3886 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3887 return -EINVAL; 3888 } 3889 3890 if (!ctrl_info->inbound_spanning_supported) { 3891 dev_err(&ctrl_info->pci_dev->dev, 3892 "the controller does not support inbound spanning\n"); 3893 return -EINVAL; 3894 } 3895 3896 if (ctrl_info->outbound_spanning_supported) { 3897 dev_err(&ctrl_info->pci_dev->dev, 3898 "the controller supports outbound spanning but this driver does not\n"); 3899 return -EINVAL; 3900 } 3901 3902 return 0; 3903 } 3904 3905 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info, 3906 bool inbound_queue, u16 queue_id) 3907 { 3908 struct pqi_general_admin_request request; 3909 struct pqi_general_admin_response response; 3910 3911 memset(&request, 0, sizeof(request)); 3912 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3913 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3914 &request.header.iu_length); 3915 if (inbound_queue) 3916 request.function_code = 3917 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ; 3918 else 3919 request.function_code = 3920 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ; 3921 put_unaligned_le16(queue_id, 3922 &request.data.delete_operational_queue.queue_id); 3923 3924 return pqi_submit_admin_request_synchronous(ctrl_info, &request, 3925 &response); 3926 } 3927 3928 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 3929 { 3930 int rc; 3931 struct pqi_event_queue *event_queue; 3932 struct pqi_general_admin_request request; 3933 struct pqi_general_admin_response response; 3934 3935 event_queue = &ctrl_info->event_queue; 3936 3937 /* 3938 * Create OQ (Outbound Queue - device to host queue) to dedicate 3939 * to events. 3940 */ 3941 memset(&request, 0, sizeof(request)); 3942 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3943 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3944 &request.header.iu_length); 3945 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 3946 put_unaligned_le16(event_queue->oq_id, 3947 &request.data.create_operational_oq.queue_id); 3948 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 3949 &request.data.create_operational_oq.element_array_addr); 3950 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 3951 &request.data.create_operational_oq.pi_addr); 3952 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 3953 &request.data.create_operational_oq.num_elements); 3954 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 3955 &request.data.create_operational_oq.element_length); 3956 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 3957 put_unaligned_le16(event_queue->int_msg_num, 3958 &request.data.create_operational_oq.int_msg_num); 3959 3960 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3961 &response); 3962 if (rc) 3963 return rc; 3964 3965 event_queue->oq_ci = ctrl_info->iomem_base + 3966 PQI_DEVICE_REGISTERS_OFFSET + 3967 get_unaligned_le64( 3968 &response.data.create_operational_oq.oq_ci_offset); 3969 3970 return 0; 3971 } 3972 3973 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 3974 unsigned int group_number) 3975 { 3976 int rc; 3977 struct pqi_queue_group *queue_group; 3978 struct pqi_general_admin_request request; 3979 struct pqi_general_admin_response response; 3980 3981 queue_group = &ctrl_info->queue_groups[group_number]; 3982 3983 /* 3984 * Create IQ (Inbound Queue - host to device queue) for 3985 * RAID path. 3986 */ 3987 memset(&request, 0, sizeof(request)); 3988 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3989 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3990 &request.header.iu_length); 3991 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 3992 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 3993 &request.data.create_operational_iq.queue_id); 3994 put_unaligned_le64( 3995 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 3996 &request.data.create_operational_iq.element_array_addr); 3997 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 3998 &request.data.create_operational_iq.ci_addr); 3999 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4000 &request.data.create_operational_iq.num_elements); 4001 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4002 &request.data.create_operational_iq.element_length); 4003 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4004 4005 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4006 &response); 4007 if (rc) { 4008 dev_err(&ctrl_info->pci_dev->dev, 4009 "error creating inbound RAID queue\n"); 4010 return rc; 4011 } 4012 4013 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4014 PQI_DEVICE_REGISTERS_OFFSET + 4015 get_unaligned_le64( 4016 &response.data.create_operational_iq.iq_pi_offset); 4017 4018 /* 4019 * Create IQ (Inbound Queue - host to device queue) for 4020 * Advanced I/O (AIO) path. 4021 */ 4022 memset(&request, 0, sizeof(request)); 4023 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4024 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4025 &request.header.iu_length); 4026 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4027 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4028 &request.data.create_operational_iq.queue_id); 4029 put_unaligned_le64((u64)queue_group-> 4030 iq_element_array_bus_addr[AIO_PATH], 4031 &request.data.create_operational_iq.element_array_addr); 4032 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4033 &request.data.create_operational_iq.ci_addr); 4034 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4035 &request.data.create_operational_iq.num_elements); 4036 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4037 &request.data.create_operational_iq.element_length); 4038 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4039 4040 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4041 &response); 4042 if (rc) { 4043 dev_err(&ctrl_info->pci_dev->dev, 4044 "error creating inbound AIO queue\n"); 4045 goto delete_inbound_queue_raid; 4046 } 4047 4048 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4049 PQI_DEVICE_REGISTERS_OFFSET + 4050 get_unaligned_le64( 4051 &response.data.create_operational_iq.iq_pi_offset); 4052 4053 /* 4054 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4055 * assumed to be for RAID path I/O unless we change the queue's 4056 * property. 4057 */ 4058 memset(&request, 0, sizeof(request)); 4059 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4060 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4061 &request.header.iu_length); 4062 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4063 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4064 &request.data.change_operational_iq_properties.queue_id); 4065 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4066 &request.data.change_operational_iq_properties.vendor_specific); 4067 4068 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4069 &response); 4070 if (rc) { 4071 dev_err(&ctrl_info->pci_dev->dev, 4072 "error changing queue property\n"); 4073 goto delete_inbound_queue_aio; 4074 } 4075 4076 /* 4077 * Create OQ (Outbound Queue - device to host queue). 4078 */ 4079 memset(&request, 0, sizeof(request)); 4080 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4081 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4082 &request.header.iu_length); 4083 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4084 put_unaligned_le16(queue_group->oq_id, 4085 &request.data.create_operational_oq.queue_id); 4086 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4087 &request.data.create_operational_oq.element_array_addr); 4088 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4089 &request.data.create_operational_oq.pi_addr); 4090 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4091 &request.data.create_operational_oq.num_elements); 4092 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4093 &request.data.create_operational_oq.element_length); 4094 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4095 put_unaligned_le16(queue_group->int_msg_num, 4096 &request.data.create_operational_oq.int_msg_num); 4097 4098 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4099 &response); 4100 if (rc) { 4101 dev_err(&ctrl_info->pci_dev->dev, 4102 "error creating outbound queue\n"); 4103 goto delete_inbound_queue_aio; 4104 } 4105 4106 queue_group->oq_ci = ctrl_info->iomem_base + 4107 PQI_DEVICE_REGISTERS_OFFSET + 4108 get_unaligned_le64( 4109 &response.data.create_operational_oq.oq_ci_offset); 4110 4111 return 0; 4112 4113 delete_inbound_queue_aio: 4114 pqi_delete_operational_queue(ctrl_info, true, 4115 queue_group->iq_id[AIO_PATH]); 4116 4117 delete_inbound_queue_raid: 4118 pqi_delete_operational_queue(ctrl_info, true, 4119 queue_group->iq_id[RAID_PATH]); 4120 4121 return rc; 4122 } 4123 4124 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4125 { 4126 int rc; 4127 unsigned int i; 4128 4129 rc = pqi_create_event_queue(ctrl_info); 4130 if (rc) { 4131 dev_err(&ctrl_info->pci_dev->dev, 4132 "error creating event queue\n"); 4133 return rc; 4134 } 4135 4136 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4137 rc = pqi_create_queue_group(ctrl_info, i); 4138 if (rc) { 4139 dev_err(&ctrl_info->pci_dev->dev, 4140 "error creating queue group number %u/%u\n", 4141 i, ctrl_info->num_queue_groups); 4142 return rc; 4143 } 4144 } 4145 4146 return 0; 4147 } 4148 4149 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4150 (offsetof(struct pqi_event_config, descriptors) + \ 4151 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4152 4153 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4154 bool enable_events) 4155 { 4156 int rc; 4157 unsigned int i; 4158 struct pqi_event_config *event_config; 4159 struct pqi_event_descriptor *event_descriptor; 4160 struct pqi_general_management_request request; 4161 4162 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4163 GFP_KERNEL); 4164 if (!event_config) 4165 return -ENOMEM; 4166 4167 memset(&request, 0, sizeof(request)); 4168 4169 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4170 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4171 data.report_event_configuration.sg_descriptors[1]) - 4172 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4173 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4174 &request.data.report_event_configuration.buffer_length); 4175 4176 rc = pqi_map_single(ctrl_info->pci_dev, 4177 request.data.report_event_configuration.sg_descriptors, 4178 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4179 PCI_DMA_FROMDEVICE); 4180 if (rc) 4181 goto out; 4182 4183 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4184 0, NULL, NO_TIMEOUT); 4185 4186 pqi_pci_unmap(ctrl_info->pci_dev, 4187 request.data.report_event_configuration.sg_descriptors, 1, 4188 PCI_DMA_FROMDEVICE); 4189 4190 if (rc) 4191 goto out; 4192 4193 for (i = 0; i < event_config->num_event_descriptors; i++) { 4194 event_descriptor = &event_config->descriptors[i]; 4195 if (enable_events && 4196 pqi_is_supported_event(event_descriptor->event_type)) 4197 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4198 &event_descriptor->oq_id); 4199 else 4200 put_unaligned_le16(0, &event_descriptor->oq_id); 4201 } 4202 4203 memset(&request, 0, sizeof(request)); 4204 4205 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4206 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4207 data.report_event_configuration.sg_descriptors[1]) - 4208 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4209 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4210 &request.data.report_event_configuration.buffer_length); 4211 4212 rc = pqi_map_single(ctrl_info->pci_dev, 4213 request.data.report_event_configuration.sg_descriptors, 4214 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4215 PCI_DMA_TODEVICE); 4216 if (rc) 4217 goto out; 4218 4219 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4220 NULL, NO_TIMEOUT); 4221 4222 pqi_pci_unmap(ctrl_info->pci_dev, 4223 request.data.report_event_configuration.sg_descriptors, 1, 4224 PCI_DMA_TODEVICE); 4225 4226 out: 4227 kfree(event_config); 4228 4229 return rc; 4230 } 4231 4232 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4233 { 4234 return pqi_configure_events(ctrl_info, true); 4235 } 4236 4237 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4238 { 4239 return pqi_configure_events(ctrl_info, false); 4240 } 4241 4242 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4243 { 4244 unsigned int i; 4245 struct device *dev; 4246 size_t sg_chain_buffer_length; 4247 struct pqi_io_request *io_request; 4248 4249 if (!ctrl_info->io_request_pool) 4250 return; 4251 4252 dev = &ctrl_info->pci_dev->dev; 4253 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4254 io_request = ctrl_info->io_request_pool; 4255 4256 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4257 kfree(io_request->iu); 4258 if (!io_request->sg_chain_buffer) 4259 break; 4260 dma_free_coherent(dev, sg_chain_buffer_length, 4261 io_request->sg_chain_buffer, 4262 io_request->sg_chain_buffer_dma_handle); 4263 io_request++; 4264 } 4265 4266 kfree(ctrl_info->io_request_pool); 4267 ctrl_info->io_request_pool = NULL; 4268 } 4269 4270 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4271 { 4272 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4273 ctrl_info->error_buffer_length, 4274 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4275 4276 if (!ctrl_info->error_buffer) 4277 return -ENOMEM; 4278 4279 return 0; 4280 } 4281 4282 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4283 { 4284 unsigned int i; 4285 void *sg_chain_buffer; 4286 size_t sg_chain_buffer_length; 4287 dma_addr_t sg_chain_buffer_dma_handle; 4288 struct device *dev; 4289 struct pqi_io_request *io_request; 4290 4291 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots * 4292 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4293 4294 if (!ctrl_info->io_request_pool) { 4295 dev_err(&ctrl_info->pci_dev->dev, 4296 "failed to allocate I/O request pool\n"); 4297 goto error; 4298 } 4299 4300 dev = &ctrl_info->pci_dev->dev; 4301 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4302 io_request = ctrl_info->io_request_pool; 4303 4304 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4305 io_request->iu = 4306 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4307 4308 if (!io_request->iu) { 4309 dev_err(&ctrl_info->pci_dev->dev, 4310 "failed to allocate IU buffers\n"); 4311 goto error; 4312 } 4313 4314 sg_chain_buffer = dma_alloc_coherent(dev, 4315 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4316 GFP_KERNEL); 4317 4318 if (!sg_chain_buffer) { 4319 dev_err(&ctrl_info->pci_dev->dev, 4320 "failed to allocate PQI scatter-gather chain buffers\n"); 4321 goto error; 4322 } 4323 4324 io_request->index = i; 4325 io_request->sg_chain_buffer = sg_chain_buffer; 4326 io_request->sg_chain_buffer_dma_handle = 4327 sg_chain_buffer_dma_handle; 4328 io_request++; 4329 } 4330 4331 return 0; 4332 4333 error: 4334 pqi_free_all_io_requests(ctrl_info); 4335 4336 return -ENOMEM; 4337 } 4338 4339 /* 4340 * Calculate required resources that are sized based on max. outstanding 4341 * requests and max. transfer size. 4342 */ 4343 4344 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4345 { 4346 u32 max_transfer_size; 4347 u32 max_sg_entries; 4348 4349 ctrl_info->scsi_ml_can_queue = 4350 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4351 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4352 4353 ctrl_info->error_buffer_length = 4354 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4355 4356 if (reset_devices) 4357 max_transfer_size = min(ctrl_info->max_transfer_size, 4358 PQI_MAX_TRANSFER_SIZE_KDUMP); 4359 else 4360 max_transfer_size = min(ctrl_info->max_transfer_size, 4361 PQI_MAX_TRANSFER_SIZE); 4362 4363 max_sg_entries = max_transfer_size / PAGE_SIZE; 4364 4365 /* +1 to cover when the buffer is not page-aligned. */ 4366 max_sg_entries++; 4367 4368 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4369 4370 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4371 4372 ctrl_info->sg_chain_buffer_length = 4373 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4374 PQI_EXTRA_SGL_MEMORY; 4375 ctrl_info->sg_tablesize = max_sg_entries; 4376 ctrl_info->max_sectors = max_transfer_size / 512; 4377 } 4378 4379 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4380 { 4381 int num_queue_groups; 4382 u16 num_elements_per_iq; 4383 u16 num_elements_per_oq; 4384 4385 if (reset_devices) { 4386 num_queue_groups = 1; 4387 } else { 4388 int num_cpus; 4389 int max_queue_groups; 4390 4391 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4392 ctrl_info->max_outbound_queues - 1); 4393 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4394 4395 num_cpus = num_online_cpus(); 4396 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4397 num_queue_groups = min(num_queue_groups, max_queue_groups); 4398 } 4399 4400 ctrl_info->num_queue_groups = num_queue_groups; 4401 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4402 4403 /* 4404 * Make sure that the max. inbound IU length is an even multiple 4405 * of our inbound element length. 4406 */ 4407 ctrl_info->max_inbound_iu_length = 4408 (ctrl_info->max_inbound_iu_length_per_firmware / 4409 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4410 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4411 4412 num_elements_per_iq = 4413 (ctrl_info->max_inbound_iu_length / 4414 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4415 4416 /* Add one because one element in each queue is unusable. */ 4417 num_elements_per_iq++; 4418 4419 num_elements_per_iq = min(num_elements_per_iq, 4420 ctrl_info->max_elements_per_iq); 4421 4422 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4423 num_elements_per_oq = min(num_elements_per_oq, 4424 ctrl_info->max_elements_per_oq); 4425 4426 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4427 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4428 4429 ctrl_info->max_sg_per_iu = 4430 ((ctrl_info->max_inbound_iu_length - 4431 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4432 sizeof(struct pqi_sg_descriptor)) + 4433 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4434 } 4435 4436 static inline void pqi_set_sg_descriptor( 4437 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4438 { 4439 u64 address = (u64)sg_dma_address(sg); 4440 unsigned int length = sg_dma_len(sg); 4441 4442 put_unaligned_le64(address, &sg_descriptor->address); 4443 put_unaligned_le32(length, &sg_descriptor->length); 4444 put_unaligned_le32(0, &sg_descriptor->flags); 4445 } 4446 4447 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4448 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4449 struct pqi_io_request *io_request) 4450 { 4451 int i; 4452 u16 iu_length; 4453 int sg_count; 4454 bool chained; 4455 unsigned int num_sg_in_iu; 4456 unsigned int max_sg_per_iu; 4457 struct scatterlist *sg; 4458 struct pqi_sg_descriptor *sg_descriptor; 4459 4460 sg_count = scsi_dma_map(scmd); 4461 if (sg_count < 0) 4462 return sg_count; 4463 4464 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4465 PQI_REQUEST_HEADER_LENGTH; 4466 4467 if (sg_count == 0) 4468 goto out; 4469 4470 sg = scsi_sglist(scmd); 4471 sg_descriptor = request->sg_descriptors; 4472 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4473 chained = false; 4474 num_sg_in_iu = 0; 4475 i = 0; 4476 4477 while (1) { 4478 pqi_set_sg_descriptor(sg_descriptor, sg); 4479 if (!chained) 4480 num_sg_in_iu++; 4481 i++; 4482 if (i == sg_count) 4483 break; 4484 sg_descriptor++; 4485 if (i == max_sg_per_iu) { 4486 put_unaligned_le64( 4487 (u64)io_request->sg_chain_buffer_dma_handle, 4488 &sg_descriptor->address); 4489 put_unaligned_le32((sg_count - num_sg_in_iu) 4490 * sizeof(*sg_descriptor), 4491 &sg_descriptor->length); 4492 put_unaligned_le32(CISS_SG_CHAIN, 4493 &sg_descriptor->flags); 4494 chained = true; 4495 num_sg_in_iu++; 4496 sg_descriptor = io_request->sg_chain_buffer; 4497 } 4498 sg = sg_next(sg); 4499 } 4500 4501 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4502 request->partial = chained; 4503 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4504 4505 out: 4506 put_unaligned_le16(iu_length, &request->header.iu_length); 4507 4508 return 0; 4509 } 4510 4511 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4512 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4513 struct pqi_io_request *io_request) 4514 { 4515 int i; 4516 u16 iu_length; 4517 int sg_count; 4518 bool chained; 4519 unsigned int num_sg_in_iu; 4520 unsigned int max_sg_per_iu; 4521 struct scatterlist *sg; 4522 struct pqi_sg_descriptor *sg_descriptor; 4523 4524 sg_count = scsi_dma_map(scmd); 4525 if (sg_count < 0) 4526 return sg_count; 4527 4528 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4529 PQI_REQUEST_HEADER_LENGTH; 4530 num_sg_in_iu = 0; 4531 4532 if (sg_count == 0) 4533 goto out; 4534 4535 sg = scsi_sglist(scmd); 4536 sg_descriptor = request->sg_descriptors; 4537 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4538 chained = false; 4539 i = 0; 4540 4541 while (1) { 4542 pqi_set_sg_descriptor(sg_descriptor, sg); 4543 if (!chained) 4544 num_sg_in_iu++; 4545 i++; 4546 if (i == sg_count) 4547 break; 4548 sg_descriptor++; 4549 if (i == max_sg_per_iu) { 4550 put_unaligned_le64( 4551 (u64)io_request->sg_chain_buffer_dma_handle, 4552 &sg_descriptor->address); 4553 put_unaligned_le32((sg_count - num_sg_in_iu) 4554 * sizeof(*sg_descriptor), 4555 &sg_descriptor->length); 4556 put_unaligned_le32(CISS_SG_CHAIN, 4557 &sg_descriptor->flags); 4558 chained = true; 4559 num_sg_in_iu++; 4560 sg_descriptor = io_request->sg_chain_buffer; 4561 } 4562 sg = sg_next(sg); 4563 } 4564 4565 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4566 request->partial = chained; 4567 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4568 4569 out: 4570 put_unaligned_le16(iu_length, &request->header.iu_length); 4571 request->num_sg_descriptors = num_sg_in_iu; 4572 4573 return 0; 4574 } 4575 4576 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4577 void *context) 4578 { 4579 struct scsi_cmnd *scmd; 4580 4581 scmd = io_request->scmd; 4582 pqi_free_io_request(io_request); 4583 scsi_dma_unmap(scmd); 4584 pqi_scsi_done(scmd); 4585 } 4586 4587 static int pqi_raid_submit_scsi_cmd_with_io_request( 4588 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4589 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4590 struct pqi_queue_group *queue_group) 4591 { 4592 int rc; 4593 size_t cdb_length; 4594 struct pqi_raid_path_request *request; 4595 4596 io_request->io_complete_callback = pqi_raid_io_complete; 4597 io_request->scmd = scmd; 4598 4599 request = io_request->iu; 4600 memset(request, 0, 4601 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4602 4603 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4604 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4605 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4606 put_unaligned_le16(io_request->index, &request->request_id); 4607 request->error_index = request->request_id; 4608 memcpy(request->lun_number, device->scsi3addr, 4609 sizeof(request->lun_number)); 4610 4611 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4612 memcpy(request->cdb, scmd->cmnd, cdb_length); 4613 4614 switch (cdb_length) { 4615 case 6: 4616 case 10: 4617 case 12: 4618 case 16: 4619 /* No bytes in the Additional CDB bytes field */ 4620 request->additional_cdb_bytes_usage = 4621 SOP_ADDITIONAL_CDB_BYTES_0; 4622 break; 4623 case 20: 4624 /* 4 bytes in the Additional cdb field */ 4625 request->additional_cdb_bytes_usage = 4626 SOP_ADDITIONAL_CDB_BYTES_4; 4627 break; 4628 case 24: 4629 /* 8 bytes in the Additional cdb field */ 4630 request->additional_cdb_bytes_usage = 4631 SOP_ADDITIONAL_CDB_BYTES_8; 4632 break; 4633 case 28: 4634 /* 12 bytes in the Additional cdb field */ 4635 request->additional_cdb_bytes_usage = 4636 SOP_ADDITIONAL_CDB_BYTES_12; 4637 break; 4638 case 32: 4639 default: 4640 /* 16 bytes in the Additional cdb field */ 4641 request->additional_cdb_bytes_usage = 4642 SOP_ADDITIONAL_CDB_BYTES_16; 4643 break; 4644 } 4645 4646 switch (scmd->sc_data_direction) { 4647 case DMA_TO_DEVICE: 4648 request->data_direction = SOP_READ_FLAG; 4649 break; 4650 case DMA_FROM_DEVICE: 4651 request->data_direction = SOP_WRITE_FLAG; 4652 break; 4653 case DMA_NONE: 4654 request->data_direction = SOP_NO_DIRECTION_FLAG; 4655 break; 4656 case DMA_BIDIRECTIONAL: 4657 request->data_direction = SOP_BIDIRECTIONAL; 4658 break; 4659 default: 4660 dev_err(&ctrl_info->pci_dev->dev, 4661 "unknown data direction: %d\n", 4662 scmd->sc_data_direction); 4663 break; 4664 } 4665 4666 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 4667 if (rc) { 4668 pqi_free_io_request(io_request); 4669 return SCSI_MLQUEUE_HOST_BUSY; 4670 } 4671 4672 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 4673 4674 return 0; 4675 } 4676 4677 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4678 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4679 struct pqi_queue_group *queue_group) 4680 { 4681 struct pqi_io_request *io_request; 4682 4683 io_request = pqi_alloc_io_request(ctrl_info); 4684 4685 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4686 device, scmd, queue_group); 4687 } 4688 4689 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 4690 { 4691 if (!pqi_ctrl_blocked(ctrl_info)) 4692 schedule_work(&ctrl_info->raid_bypass_retry_work); 4693 } 4694 4695 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 4696 { 4697 struct scsi_cmnd *scmd; 4698 struct pqi_scsi_dev *device; 4699 struct pqi_ctrl_info *ctrl_info; 4700 4701 if (!io_request->raid_bypass) 4702 return false; 4703 4704 scmd = io_request->scmd; 4705 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 4706 return false; 4707 if (host_byte(scmd->result) == DID_NO_CONNECT) 4708 return false; 4709 4710 device = scmd->device->hostdata; 4711 if (pqi_device_offline(device)) 4712 return false; 4713 4714 ctrl_info = shost_to_hba(scmd->device->host); 4715 if (pqi_ctrl_offline(ctrl_info)) 4716 return false; 4717 4718 return true; 4719 } 4720 4721 static inline void pqi_add_to_raid_bypass_retry_list( 4722 struct pqi_ctrl_info *ctrl_info, 4723 struct pqi_io_request *io_request, bool at_head) 4724 { 4725 unsigned long flags; 4726 4727 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4728 if (at_head) 4729 list_add(&io_request->request_list_entry, 4730 &ctrl_info->raid_bypass_retry_list); 4731 else 4732 list_add_tail(&io_request->request_list_entry, 4733 &ctrl_info->raid_bypass_retry_list); 4734 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4735 } 4736 4737 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 4738 void *context) 4739 { 4740 struct scsi_cmnd *scmd; 4741 4742 scmd = io_request->scmd; 4743 pqi_free_io_request(io_request); 4744 pqi_scsi_done(scmd); 4745 } 4746 4747 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 4748 { 4749 struct scsi_cmnd *scmd; 4750 struct pqi_ctrl_info *ctrl_info; 4751 4752 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 4753 scmd = io_request->scmd; 4754 scmd->result = 0; 4755 ctrl_info = shost_to_hba(scmd->device->host); 4756 4757 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 4758 pqi_schedule_bypass_retry(ctrl_info); 4759 } 4760 4761 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 4762 { 4763 struct scsi_cmnd *scmd; 4764 struct pqi_scsi_dev *device; 4765 struct pqi_ctrl_info *ctrl_info; 4766 struct pqi_queue_group *queue_group; 4767 4768 scmd = io_request->scmd; 4769 device = scmd->device->hostdata; 4770 if (pqi_device_in_reset(device)) { 4771 pqi_free_io_request(io_request); 4772 set_host_byte(scmd, DID_RESET); 4773 pqi_scsi_done(scmd); 4774 return 0; 4775 } 4776 4777 ctrl_info = shost_to_hba(scmd->device->host); 4778 queue_group = io_request->queue_group; 4779 4780 pqi_reinit_io_request(io_request); 4781 4782 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4783 device, scmd, queue_group); 4784 } 4785 4786 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 4787 struct pqi_ctrl_info *ctrl_info) 4788 { 4789 unsigned long flags; 4790 struct pqi_io_request *io_request; 4791 4792 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4793 io_request = list_first_entry_or_null( 4794 &ctrl_info->raid_bypass_retry_list, 4795 struct pqi_io_request, request_list_entry); 4796 if (io_request) 4797 list_del(&io_request->request_list_entry); 4798 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4799 4800 return io_request; 4801 } 4802 4803 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 4804 { 4805 int rc; 4806 struct pqi_io_request *io_request; 4807 4808 pqi_ctrl_busy(ctrl_info); 4809 4810 while (1) { 4811 if (pqi_ctrl_blocked(ctrl_info)) 4812 break; 4813 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 4814 if (!io_request) 4815 break; 4816 rc = pqi_retry_raid_bypass(io_request); 4817 if (rc) { 4818 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 4819 true); 4820 pqi_schedule_bypass_retry(ctrl_info); 4821 break; 4822 } 4823 } 4824 4825 pqi_ctrl_unbusy(ctrl_info); 4826 } 4827 4828 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 4829 { 4830 struct pqi_ctrl_info *ctrl_info; 4831 4832 ctrl_info = container_of(work, struct pqi_ctrl_info, 4833 raid_bypass_retry_work); 4834 pqi_retry_raid_bypass_requests(ctrl_info); 4835 } 4836 4837 static void pqi_clear_all_queued_raid_bypass_retries( 4838 struct pqi_ctrl_info *ctrl_info) 4839 { 4840 unsigned long flags; 4841 4842 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4843 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 4844 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4845 } 4846 4847 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 4848 void *context) 4849 { 4850 struct scsi_cmnd *scmd; 4851 4852 scmd = io_request->scmd; 4853 scsi_dma_unmap(scmd); 4854 if (io_request->status == -EAGAIN) 4855 set_host_byte(scmd, DID_IMM_RETRY); 4856 else if (pqi_raid_bypass_retry_needed(io_request)) { 4857 pqi_queue_raid_bypass_retry(io_request); 4858 return; 4859 } 4860 pqi_free_io_request(io_request); 4861 pqi_scsi_done(scmd); 4862 } 4863 4864 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4865 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4866 struct pqi_queue_group *queue_group) 4867 { 4868 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 4869 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 4870 } 4871 4872 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 4873 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 4874 unsigned int cdb_length, struct pqi_queue_group *queue_group, 4875 struct pqi_encryption_info *encryption_info, bool raid_bypass) 4876 { 4877 int rc; 4878 struct pqi_io_request *io_request; 4879 struct pqi_aio_path_request *request; 4880 4881 io_request = pqi_alloc_io_request(ctrl_info); 4882 io_request->io_complete_callback = pqi_aio_io_complete; 4883 io_request->scmd = scmd; 4884 io_request->raid_bypass = raid_bypass; 4885 4886 request = io_request->iu; 4887 memset(request, 0, 4888 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4889 4890 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 4891 put_unaligned_le32(aio_handle, &request->nexus_id); 4892 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4893 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4894 put_unaligned_le16(io_request->index, &request->request_id); 4895 request->error_index = request->request_id; 4896 if (cdb_length > sizeof(request->cdb)) 4897 cdb_length = sizeof(request->cdb); 4898 request->cdb_length = cdb_length; 4899 memcpy(request->cdb, cdb, cdb_length); 4900 4901 switch (scmd->sc_data_direction) { 4902 case DMA_TO_DEVICE: 4903 request->data_direction = SOP_READ_FLAG; 4904 break; 4905 case DMA_FROM_DEVICE: 4906 request->data_direction = SOP_WRITE_FLAG; 4907 break; 4908 case DMA_NONE: 4909 request->data_direction = SOP_NO_DIRECTION_FLAG; 4910 break; 4911 case DMA_BIDIRECTIONAL: 4912 request->data_direction = SOP_BIDIRECTIONAL; 4913 break; 4914 default: 4915 dev_err(&ctrl_info->pci_dev->dev, 4916 "unknown data direction: %d\n", 4917 scmd->sc_data_direction); 4918 break; 4919 } 4920 4921 if (encryption_info) { 4922 request->encryption_enable = true; 4923 put_unaligned_le16(encryption_info->data_encryption_key_index, 4924 &request->data_encryption_key_index); 4925 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 4926 &request->encrypt_tweak_lower); 4927 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 4928 &request->encrypt_tweak_upper); 4929 } 4930 4931 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 4932 if (rc) { 4933 pqi_free_io_request(io_request); 4934 return SCSI_MLQUEUE_HOST_BUSY; 4935 } 4936 4937 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 4938 4939 return 0; 4940 } 4941 4942 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 4943 struct scsi_cmnd *scmd) 4944 { 4945 u16 hw_queue; 4946 4947 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 4948 if (hw_queue > ctrl_info->max_hw_queue_index) 4949 hw_queue = 0; 4950 4951 return hw_queue; 4952 } 4953 4954 /* 4955 * This function gets called just before we hand the completed SCSI request 4956 * back to the SML. 4957 */ 4958 4959 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 4960 { 4961 struct pqi_scsi_dev *device; 4962 4963 device = scmd->device->hostdata; 4964 atomic_dec(&device->scsi_cmds_outstanding); 4965 } 4966 4967 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 4968 struct scsi_cmnd *scmd) 4969 { 4970 int rc; 4971 struct pqi_ctrl_info *ctrl_info; 4972 struct pqi_scsi_dev *device; 4973 u16 hw_queue; 4974 struct pqi_queue_group *queue_group; 4975 bool raid_bypassed; 4976 4977 device = scmd->device->hostdata; 4978 ctrl_info = shost_to_hba(shost); 4979 4980 atomic_inc(&device->scsi_cmds_outstanding); 4981 4982 if (pqi_ctrl_offline(ctrl_info)) { 4983 set_host_byte(scmd, DID_NO_CONNECT); 4984 pqi_scsi_done(scmd); 4985 return 0; 4986 } 4987 4988 pqi_ctrl_busy(ctrl_info); 4989 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 4990 rc = SCSI_MLQUEUE_HOST_BUSY; 4991 goto out; 4992 } 4993 4994 /* 4995 * This is necessary because the SML doesn't zero out this field during 4996 * error recovery. 4997 */ 4998 scmd->result = 0; 4999 5000 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 5001 queue_group = &ctrl_info->queue_groups[hw_queue]; 5002 5003 if (pqi_is_logical_device(device)) { 5004 raid_bypassed = false; 5005 if (device->raid_bypass_enabled && 5006 !blk_rq_is_passthrough(scmd->request)) { 5007 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5008 scmd, queue_group); 5009 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 5010 raid_bypassed = true; 5011 } 5012 if (!raid_bypassed) 5013 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5014 queue_group); 5015 } else { 5016 if (device->aio_enabled) 5017 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 5018 queue_group); 5019 else 5020 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5021 queue_group); 5022 } 5023 5024 out: 5025 pqi_ctrl_unbusy(ctrl_info); 5026 if (rc) 5027 atomic_dec(&device->scsi_cmds_outstanding); 5028 5029 return rc; 5030 } 5031 5032 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5033 struct pqi_queue_group *queue_group) 5034 { 5035 unsigned int path; 5036 unsigned long flags; 5037 bool list_is_empty; 5038 5039 for (path = 0; path < 2; path++) { 5040 while (1) { 5041 spin_lock_irqsave( 5042 &queue_group->submit_lock[path], flags); 5043 list_is_empty = 5044 list_empty(&queue_group->request_list[path]); 5045 spin_unlock_irqrestore( 5046 &queue_group->submit_lock[path], flags); 5047 if (list_is_empty) 5048 break; 5049 pqi_check_ctrl_health(ctrl_info); 5050 if (pqi_ctrl_offline(ctrl_info)) 5051 return -ENXIO; 5052 usleep_range(1000, 2000); 5053 } 5054 } 5055 5056 return 0; 5057 } 5058 5059 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5060 { 5061 int rc; 5062 unsigned int i; 5063 unsigned int path; 5064 struct pqi_queue_group *queue_group; 5065 pqi_index_t iq_pi; 5066 pqi_index_t iq_ci; 5067 5068 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5069 queue_group = &ctrl_info->queue_groups[i]; 5070 5071 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5072 if (rc) 5073 return rc; 5074 5075 for (path = 0; path < 2; path++) { 5076 iq_pi = queue_group->iq_pi_copy[path]; 5077 5078 while (1) { 5079 iq_ci = *queue_group->iq_ci[path]; 5080 if (iq_ci == iq_pi) 5081 break; 5082 pqi_check_ctrl_health(ctrl_info); 5083 if (pqi_ctrl_offline(ctrl_info)) 5084 return -ENXIO; 5085 usleep_range(1000, 2000); 5086 } 5087 } 5088 } 5089 5090 return 0; 5091 } 5092 5093 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5094 struct pqi_scsi_dev *device) 5095 { 5096 unsigned int i; 5097 unsigned int path; 5098 struct pqi_queue_group *queue_group; 5099 unsigned long flags; 5100 struct pqi_io_request *io_request; 5101 struct pqi_io_request *next; 5102 struct scsi_cmnd *scmd; 5103 struct pqi_scsi_dev *scsi_device; 5104 5105 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5106 queue_group = &ctrl_info->queue_groups[i]; 5107 5108 for (path = 0; path < 2; path++) { 5109 spin_lock_irqsave( 5110 &queue_group->submit_lock[path], flags); 5111 5112 list_for_each_entry_safe(io_request, next, 5113 &queue_group->request_list[path], 5114 request_list_entry) { 5115 scmd = io_request->scmd; 5116 if (!scmd) 5117 continue; 5118 5119 scsi_device = scmd->device->hostdata; 5120 if (scsi_device != device) 5121 continue; 5122 5123 list_del(&io_request->request_list_entry); 5124 set_host_byte(scmd, DID_RESET); 5125 pqi_scsi_done(scmd); 5126 } 5127 5128 spin_unlock_irqrestore( 5129 &queue_group->submit_lock[path], flags); 5130 } 5131 } 5132 } 5133 5134 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5135 struct pqi_scsi_dev *device) 5136 { 5137 while (atomic_read(&device->scsi_cmds_outstanding)) { 5138 pqi_check_ctrl_health(ctrl_info); 5139 if (pqi_ctrl_offline(ctrl_info)) 5140 return -ENXIO; 5141 usleep_range(1000, 2000); 5142 } 5143 5144 return 0; 5145 } 5146 5147 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5148 { 5149 bool io_pending; 5150 unsigned long flags; 5151 struct pqi_scsi_dev *device; 5152 5153 while (1) { 5154 io_pending = false; 5155 5156 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5157 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5158 scsi_device_list_entry) { 5159 if (atomic_read(&device->scsi_cmds_outstanding)) { 5160 io_pending = true; 5161 break; 5162 } 5163 } 5164 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5165 flags); 5166 5167 if (!io_pending) 5168 break; 5169 5170 pqi_check_ctrl_health(ctrl_info); 5171 if (pqi_ctrl_offline(ctrl_info)) 5172 return -ENXIO; 5173 5174 usleep_range(1000, 2000); 5175 } 5176 5177 return 0; 5178 } 5179 5180 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5181 void *context) 5182 { 5183 struct completion *waiting = context; 5184 5185 complete(waiting); 5186 } 5187 5188 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5189 5190 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5191 struct pqi_scsi_dev *device, struct completion *wait) 5192 { 5193 int rc; 5194 5195 while (1) { 5196 if (wait_for_completion_io_timeout(wait, 5197 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5198 rc = 0; 5199 break; 5200 } 5201 5202 pqi_check_ctrl_health(ctrl_info); 5203 if (pqi_ctrl_offline(ctrl_info)) { 5204 rc = -ENXIO; 5205 break; 5206 } 5207 } 5208 5209 return rc; 5210 } 5211 5212 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5213 struct pqi_scsi_dev *device) 5214 { 5215 int rc; 5216 struct pqi_io_request *io_request; 5217 DECLARE_COMPLETION_ONSTACK(wait); 5218 struct pqi_task_management_request *request; 5219 5220 io_request = pqi_alloc_io_request(ctrl_info); 5221 io_request->io_complete_callback = pqi_lun_reset_complete; 5222 io_request->context = &wait; 5223 5224 request = io_request->iu; 5225 memset(request, 0, sizeof(*request)); 5226 5227 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5228 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5229 &request->header.iu_length); 5230 put_unaligned_le16(io_request->index, &request->request_id); 5231 memcpy(request->lun_number, device->scsi3addr, 5232 sizeof(request->lun_number)); 5233 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5234 5235 pqi_start_io(ctrl_info, 5236 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5237 io_request); 5238 5239 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5240 if (rc == 0) 5241 rc = io_request->status; 5242 5243 pqi_free_io_request(io_request); 5244 5245 return rc; 5246 } 5247 5248 /* Performs a reset at the LUN level. */ 5249 5250 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5251 struct pqi_scsi_dev *device) 5252 { 5253 int rc; 5254 5255 rc = pqi_lun_reset(ctrl_info, device); 5256 if (rc == 0) 5257 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5258 5259 return rc == 0 ? SUCCESS : FAILED; 5260 } 5261 5262 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5263 { 5264 int rc; 5265 struct Scsi_Host *shost; 5266 struct pqi_ctrl_info *ctrl_info; 5267 struct pqi_scsi_dev *device; 5268 5269 shost = scmd->device->host; 5270 ctrl_info = shost_to_hba(shost); 5271 device = scmd->device->hostdata; 5272 5273 dev_err(&ctrl_info->pci_dev->dev, 5274 "resetting scsi %d:%d:%d:%d\n", 5275 shost->host_no, device->bus, device->target, device->lun); 5276 5277 pqi_check_ctrl_health(ctrl_info); 5278 if (pqi_ctrl_offline(ctrl_info)) { 5279 rc = FAILED; 5280 goto out; 5281 } 5282 5283 mutex_lock(&ctrl_info->lun_reset_mutex); 5284 5285 pqi_ctrl_block_requests(ctrl_info); 5286 pqi_ctrl_wait_until_quiesced(ctrl_info); 5287 pqi_fail_io_queued_for_device(ctrl_info, device); 5288 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5289 pqi_device_reset_start(device); 5290 pqi_ctrl_unblock_requests(ctrl_info); 5291 5292 if (rc) 5293 rc = FAILED; 5294 else 5295 rc = pqi_device_reset(ctrl_info, device); 5296 5297 pqi_device_reset_done(device); 5298 5299 mutex_unlock(&ctrl_info->lun_reset_mutex); 5300 5301 out: 5302 dev_err(&ctrl_info->pci_dev->dev, 5303 "reset of scsi %d:%d:%d:%d: %s\n", 5304 shost->host_no, device->bus, device->target, device->lun, 5305 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5306 5307 return rc; 5308 } 5309 5310 static int pqi_slave_alloc(struct scsi_device *sdev) 5311 { 5312 struct pqi_scsi_dev *device; 5313 unsigned long flags; 5314 struct pqi_ctrl_info *ctrl_info; 5315 struct scsi_target *starget; 5316 struct sas_rphy *rphy; 5317 5318 ctrl_info = shost_to_hba(sdev->host); 5319 5320 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5321 5322 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5323 starget = scsi_target(sdev); 5324 rphy = target_to_rphy(starget); 5325 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5326 if (device) { 5327 device->target = sdev_id(sdev); 5328 device->lun = sdev->lun; 5329 device->target_lun_valid = true; 5330 } 5331 } else { 5332 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5333 sdev_id(sdev), sdev->lun); 5334 } 5335 5336 if (device) { 5337 sdev->hostdata = device; 5338 device->sdev = sdev; 5339 if (device->queue_depth) { 5340 device->advertised_queue_depth = device->queue_depth; 5341 scsi_change_queue_depth(sdev, 5342 device->advertised_queue_depth); 5343 } 5344 } 5345 5346 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5347 5348 return 0; 5349 } 5350 5351 static int pqi_map_queues(struct Scsi_Host *shost) 5352 { 5353 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5354 5355 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); 5356 } 5357 5358 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5359 void __user *arg) 5360 { 5361 struct pci_dev *pci_dev; 5362 u32 subsystem_vendor; 5363 u32 subsystem_device; 5364 cciss_pci_info_struct pciinfo; 5365 5366 if (!arg) 5367 return -EINVAL; 5368 5369 pci_dev = ctrl_info->pci_dev; 5370 5371 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5372 pciinfo.bus = pci_dev->bus->number; 5373 pciinfo.dev_fn = pci_dev->devfn; 5374 subsystem_vendor = pci_dev->subsystem_vendor; 5375 subsystem_device = pci_dev->subsystem_device; 5376 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5377 subsystem_vendor; 5378 5379 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5380 return -EFAULT; 5381 5382 return 0; 5383 } 5384 5385 static int pqi_getdrivver_ioctl(void __user *arg) 5386 { 5387 u32 version; 5388 5389 if (!arg) 5390 return -EINVAL; 5391 5392 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5393 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5394 5395 if (copy_to_user(arg, &version, sizeof(version))) 5396 return -EFAULT; 5397 5398 return 0; 5399 } 5400 5401 struct ciss_error_info { 5402 u8 scsi_status; 5403 int command_status; 5404 size_t sense_data_length; 5405 }; 5406 5407 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5408 struct ciss_error_info *ciss_error_info) 5409 { 5410 int ciss_cmd_status; 5411 size_t sense_data_length; 5412 5413 switch (pqi_error_info->data_out_result) { 5414 case PQI_DATA_IN_OUT_GOOD: 5415 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5416 break; 5417 case PQI_DATA_IN_OUT_UNDERFLOW: 5418 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5419 break; 5420 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5421 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5422 break; 5423 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5424 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5425 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5426 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5427 case PQI_DATA_IN_OUT_ERROR: 5428 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5429 break; 5430 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5431 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5432 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5433 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5434 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5435 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5436 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5437 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5438 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5439 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5440 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5441 break; 5442 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5443 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5444 break; 5445 case PQI_DATA_IN_OUT_ABORTED: 5446 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5447 break; 5448 case PQI_DATA_IN_OUT_TIMEOUT: 5449 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5450 break; 5451 default: 5452 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5453 break; 5454 } 5455 5456 sense_data_length = 5457 get_unaligned_le16(&pqi_error_info->sense_data_length); 5458 if (sense_data_length == 0) 5459 sense_data_length = 5460 get_unaligned_le16(&pqi_error_info->response_data_length); 5461 if (sense_data_length) 5462 if (sense_data_length > sizeof(pqi_error_info->data)) 5463 sense_data_length = sizeof(pqi_error_info->data); 5464 5465 ciss_error_info->scsi_status = pqi_error_info->status; 5466 ciss_error_info->command_status = ciss_cmd_status; 5467 ciss_error_info->sense_data_length = sense_data_length; 5468 } 5469 5470 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5471 { 5472 int rc; 5473 char *kernel_buffer = NULL; 5474 u16 iu_length; 5475 size_t sense_data_length; 5476 IOCTL_Command_struct iocommand; 5477 struct pqi_raid_path_request request; 5478 struct pqi_raid_error_info pqi_error_info; 5479 struct ciss_error_info ciss_error_info; 5480 5481 if (pqi_ctrl_offline(ctrl_info)) 5482 return -ENXIO; 5483 if (!arg) 5484 return -EINVAL; 5485 if (!capable(CAP_SYS_RAWIO)) 5486 return -EPERM; 5487 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5488 return -EFAULT; 5489 if (iocommand.buf_size < 1 && 5490 iocommand.Request.Type.Direction != XFER_NONE) 5491 return -EINVAL; 5492 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5493 return -EINVAL; 5494 if (iocommand.Request.Type.Type != TYPE_CMD) 5495 return -EINVAL; 5496 5497 switch (iocommand.Request.Type.Direction) { 5498 case XFER_NONE: 5499 case XFER_WRITE: 5500 case XFER_READ: 5501 break; 5502 default: 5503 return -EINVAL; 5504 } 5505 5506 if (iocommand.buf_size > 0) { 5507 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5508 if (!kernel_buffer) 5509 return -ENOMEM; 5510 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5511 if (copy_from_user(kernel_buffer, iocommand.buf, 5512 iocommand.buf_size)) { 5513 rc = -EFAULT; 5514 goto out; 5515 } 5516 } else { 5517 memset(kernel_buffer, 0, iocommand.buf_size); 5518 } 5519 } 5520 5521 memset(&request, 0, sizeof(request)); 5522 5523 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5524 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5525 PQI_REQUEST_HEADER_LENGTH; 5526 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5527 sizeof(request.lun_number)); 5528 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5529 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5530 5531 switch (iocommand.Request.Type.Direction) { 5532 case XFER_NONE: 5533 request.data_direction = SOP_NO_DIRECTION_FLAG; 5534 break; 5535 case XFER_WRITE: 5536 request.data_direction = SOP_WRITE_FLAG; 5537 break; 5538 case XFER_READ: 5539 request.data_direction = SOP_READ_FLAG; 5540 break; 5541 } 5542 5543 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5544 5545 if (iocommand.buf_size > 0) { 5546 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5547 5548 rc = pqi_map_single(ctrl_info->pci_dev, 5549 &request.sg_descriptors[0], kernel_buffer, 5550 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 5551 if (rc) 5552 goto out; 5553 5554 iu_length += sizeof(request.sg_descriptors[0]); 5555 } 5556 5557 put_unaligned_le16(iu_length, &request.header.iu_length); 5558 5559 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 5560 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 5561 5562 if (iocommand.buf_size > 0) 5563 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5564 PCI_DMA_BIDIRECTIONAL); 5565 5566 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5567 5568 if (rc == 0) { 5569 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 5570 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 5571 iocommand.error_info.CommandStatus = 5572 ciss_error_info.command_status; 5573 sense_data_length = ciss_error_info.sense_data_length; 5574 if (sense_data_length) { 5575 if (sense_data_length > 5576 sizeof(iocommand.error_info.SenseInfo)) 5577 sense_data_length = 5578 sizeof(iocommand.error_info.SenseInfo); 5579 memcpy(iocommand.error_info.SenseInfo, 5580 pqi_error_info.data, sense_data_length); 5581 iocommand.error_info.SenseLen = sense_data_length; 5582 } 5583 } 5584 5585 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 5586 rc = -EFAULT; 5587 goto out; 5588 } 5589 5590 if (rc == 0 && iocommand.buf_size > 0 && 5591 (iocommand.Request.Type.Direction & XFER_READ)) { 5592 if (copy_to_user(iocommand.buf, kernel_buffer, 5593 iocommand.buf_size)) { 5594 rc = -EFAULT; 5595 } 5596 } 5597 5598 out: 5599 kfree(kernel_buffer); 5600 5601 return rc; 5602 } 5603 5604 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5605 { 5606 int rc; 5607 struct pqi_ctrl_info *ctrl_info; 5608 5609 ctrl_info = shost_to_hba(sdev->host); 5610 5611 switch (cmd) { 5612 case CCISS_DEREGDISK: 5613 case CCISS_REGNEWDISK: 5614 case CCISS_REGNEWD: 5615 rc = pqi_scan_scsi_devices(ctrl_info); 5616 break; 5617 case CCISS_GETPCIINFO: 5618 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 5619 break; 5620 case CCISS_GETDRIVVER: 5621 rc = pqi_getdrivver_ioctl(arg); 5622 break; 5623 case CCISS_PASSTHRU: 5624 rc = pqi_passthru_ioctl(ctrl_info, arg); 5625 break; 5626 default: 5627 rc = -EINVAL; 5628 break; 5629 } 5630 5631 return rc; 5632 } 5633 5634 static ssize_t pqi_version_show(struct device *dev, 5635 struct device_attribute *attr, char *buffer) 5636 { 5637 ssize_t count = 0; 5638 struct Scsi_Host *shost; 5639 struct pqi_ctrl_info *ctrl_info; 5640 5641 shost = class_to_shost(dev); 5642 ctrl_info = shost_to_hba(shost); 5643 5644 count += snprintf(buffer + count, PAGE_SIZE - count, 5645 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 5646 5647 count += snprintf(buffer + count, PAGE_SIZE - count, 5648 "firmware: %s\n", ctrl_info->firmware_version); 5649 5650 return count; 5651 } 5652 5653 static ssize_t pqi_host_rescan_store(struct device *dev, 5654 struct device_attribute *attr, const char *buffer, size_t count) 5655 { 5656 struct Scsi_Host *shost = class_to_shost(dev); 5657 5658 pqi_scan_start(shost); 5659 5660 return count; 5661 } 5662 5663 static ssize_t pqi_lockup_action_show(struct device *dev, 5664 struct device_attribute *attr, char *buffer) 5665 { 5666 int count = 0; 5667 unsigned int i; 5668 5669 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5670 if (pqi_lockup_actions[i].action == pqi_lockup_action) 5671 count += snprintf(buffer + count, PAGE_SIZE - count, 5672 "[%s] ", pqi_lockup_actions[i].name); 5673 else 5674 count += snprintf(buffer + count, PAGE_SIZE - count, 5675 "%s ", pqi_lockup_actions[i].name); 5676 } 5677 5678 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 5679 5680 return count; 5681 } 5682 5683 static ssize_t pqi_lockup_action_store(struct device *dev, 5684 struct device_attribute *attr, const char *buffer, size_t count) 5685 { 5686 unsigned int i; 5687 char *action_name; 5688 char action_name_buffer[32]; 5689 5690 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 5691 action_name = strstrip(action_name_buffer); 5692 5693 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5694 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 5695 pqi_lockup_action = pqi_lockup_actions[i].action; 5696 return count; 5697 } 5698 } 5699 5700 return -EINVAL; 5701 } 5702 5703 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 5704 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 5705 static DEVICE_ATTR(lockup_action, 0644, 5706 pqi_lockup_action_show, pqi_lockup_action_store); 5707 5708 static struct device_attribute *pqi_shost_attrs[] = { 5709 &dev_attr_version, 5710 &dev_attr_rescan, 5711 &dev_attr_lockup_action, 5712 NULL 5713 }; 5714 5715 static ssize_t pqi_sas_address_show(struct device *dev, 5716 struct device_attribute *attr, char *buffer) 5717 { 5718 struct pqi_ctrl_info *ctrl_info; 5719 struct scsi_device *sdev; 5720 struct pqi_scsi_dev *device; 5721 unsigned long flags; 5722 u64 sas_address; 5723 5724 sdev = to_scsi_device(dev); 5725 ctrl_info = shost_to_hba(sdev->host); 5726 5727 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5728 5729 device = sdev->hostdata; 5730 if (pqi_is_logical_device(device)) { 5731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5732 flags); 5733 return -ENODEV; 5734 } 5735 sas_address = device->sas_address; 5736 5737 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5738 5739 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 5740 } 5741 5742 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 5743 struct device_attribute *attr, char *buffer) 5744 { 5745 struct pqi_ctrl_info *ctrl_info; 5746 struct scsi_device *sdev; 5747 struct pqi_scsi_dev *device; 5748 unsigned long flags; 5749 5750 sdev = to_scsi_device(dev); 5751 ctrl_info = shost_to_hba(sdev->host); 5752 5753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5754 5755 device = sdev->hostdata; 5756 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 5757 buffer[1] = '\n'; 5758 buffer[2] = '\0'; 5759 5760 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5761 5762 return 2; 5763 } 5764 5765 static ssize_t pqi_raid_level_show(struct device *dev, 5766 struct device_attribute *attr, char *buffer) 5767 { 5768 struct pqi_ctrl_info *ctrl_info; 5769 struct scsi_device *sdev; 5770 struct pqi_scsi_dev *device; 5771 unsigned long flags; 5772 char *raid_level; 5773 5774 sdev = to_scsi_device(dev); 5775 ctrl_info = shost_to_hba(sdev->host); 5776 5777 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5778 5779 device = sdev->hostdata; 5780 5781 if (pqi_is_logical_device(device)) 5782 raid_level = pqi_raid_level_to_string(device->raid_level); 5783 else 5784 raid_level = "N/A"; 5785 5786 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5787 5788 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 5789 } 5790 5791 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 5792 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 5793 pqi_ssd_smart_path_enabled_show, NULL); 5794 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 5795 5796 static struct device_attribute *pqi_sdev_attrs[] = { 5797 &dev_attr_sas_address, 5798 &dev_attr_ssd_smart_path_enabled, 5799 &dev_attr_raid_level, 5800 NULL 5801 }; 5802 5803 static struct scsi_host_template pqi_driver_template = { 5804 .module = THIS_MODULE, 5805 .name = DRIVER_NAME_SHORT, 5806 .proc_name = DRIVER_NAME_SHORT, 5807 .queuecommand = pqi_scsi_queue_command, 5808 .scan_start = pqi_scan_start, 5809 .scan_finished = pqi_scan_finished, 5810 .this_id = -1, 5811 .use_clustering = ENABLE_CLUSTERING, 5812 .eh_device_reset_handler = pqi_eh_device_reset_handler, 5813 .ioctl = pqi_ioctl, 5814 .slave_alloc = pqi_slave_alloc, 5815 .map_queues = pqi_map_queues, 5816 .sdev_attrs = pqi_sdev_attrs, 5817 .shost_attrs = pqi_shost_attrs, 5818 }; 5819 5820 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 5821 { 5822 int rc; 5823 struct Scsi_Host *shost; 5824 5825 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 5826 if (!shost) { 5827 dev_err(&ctrl_info->pci_dev->dev, 5828 "scsi_host_alloc failed for controller %u\n", 5829 ctrl_info->ctrl_id); 5830 return -ENOMEM; 5831 } 5832 5833 shost->io_port = 0; 5834 shost->n_io_port = 0; 5835 shost->this_id = -1; 5836 shost->max_channel = PQI_MAX_BUS; 5837 shost->max_cmd_len = MAX_COMMAND_SIZE; 5838 shost->max_lun = ~0; 5839 shost->max_id = ~0; 5840 shost->max_sectors = ctrl_info->max_sectors; 5841 shost->can_queue = ctrl_info->scsi_ml_can_queue; 5842 shost->cmd_per_lun = shost->can_queue; 5843 shost->sg_tablesize = ctrl_info->sg_tablesize; 5844 shost->transportt = pqi_sas_transport_template; 5845 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 5846 shost->unique_id = shost->irq; 5847 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5848 shost->hostdata[0] = (unsigned long)ctrl_info; 5849 5850 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 5851 if (rc) { 5852 dev_err(&ctrl_info->pci_dev->dev, 5853 "scsi_add_host failed for controller %u\n", 5854 ctrl_info->ctrl_id); 5855 goto free_host; 5856 } 5857 5858 rc = pqi_add_sas_host(shost, ctrl_info); 5859 if (rc) { 5860 dev_err(&ctrl_info->pci_dev->dev, 5861 "add SAS host failed for controller %u\n", 5862 ctrl_info->ctrl_id); 5863 goto remove_host; 5864 } 5865 5866 ctrl_info->scsi_host = shost; 5867 5868 return 0; 5869 5870 remove_host: 5871 scsi_remove_host(shost); 5872 free_host: 5873 scsi_host_put(shost); 5874 5875 return rc; 5876 } 5877 5878 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 5879 { 5880 struct Scsi_Host *shost; 5881 5882 pqi_delete_sas_host(ctrl_info); 5883 5884 shost = ctrl_info->scsi_host; 5885 if (!shost) 5886 return; 5887 5888 scsi_remove_host(shost); 5889 scsi_host_put(shost); 5890 } 5891 5892 #define PQI_RESET_ACTION_RESET 0x1 5893 5894 #define PQI_RESET_TYPE_NO_RESET 0x0 5895 #define PQI_RESET_TYPE_SOFT_RESET 0x1 5896 #define PQI_RESET_TYPE_FIRM_RESET 0x2 5897 #define PQI_RESET_TYPE_HARD_RESET 0x3 5898 5899 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 5900 { 5901 int rc; 5902 u32 reset_params; 5903 5904 reset_params = (PQI_RESET_ACTION_RESET << 5) | 5905 PQI_RESET_TYPE_HARD_RESET; 5906 5907 writel(reset_params, 5908 &ctrl_info->pqi_registers->device_reset); 5909 5910 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 5911 if (rc) 5912 dev_err(&ctrl_info->pci_dev->dev, 5913 "PQI reset failed\n"); 5914 5915 return rc; 5916 } 5917 5918 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 5919 { 5920 int rc; 5921 struct bmic_identify_controller *identify; 5922 5923 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 5924 if (!identify) 5925 return -ENOMEM; 5926 5927 rc = pqi_identify_controller(ctrl_info, identify); 5928 if (rc) 5929 goto out; 5930 5931 memcpy(ctrl_info->firmware_version, identify->firmware_version, 5932 sizeof(identify->firmware_version)); 5933 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 5934 snprintf(ctrl_info->firmware_version + 5935 strlen(ctrl_info->firmware_version), 5936 sizeof(ctrl_info->firmware_version), 5937 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 5938 5939 out: 5940 kfree(identify); 5941 5942 return rc; 5943 } 5944 5945 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 5946 { 5947 u32 table_length; 5948 u32 section_offset; 5949 void __iomem *table_iomem_addr; 5950 struct pqi_config_table *config_table; 5951 struct pqi_config_table_section_header *section; 5952 5953 table_length = ctrl_info->config_table_length; 5954 5955 config_table = kmalloc(table_length, GFP_KERNEL); 5956 if (!config_table) { 5957 dev_err(&ctrl_info->pci_dev->dev, 5958 "failed to allocate memory for PQI configuration table\n"); 5959 return -ENOMEM; 5960 } 5961 5962 /* 5963 * Copy the config table contents from I/O memory space into the 5964 * temporary buffer. 5965 */ 5966 table_iomem_addr = ctrl_info->iomem_base + 5967 ctrl_info->config_table_offset; 5968 memcpy_fromio(config_table, table_iomem_addr, table_length); 5969 5970 section_offset = 5971 get_unaligned_le32(&config_table->first_section_offset); 5972 5973 while (section_offset) { 5974 section = (void *)config_table + section_offset; 5975 5976 switch (get_unaligned_le16(§ion->section_id)) { 5977 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 5978 if (pqi_disable_heartbeat) 5979 dev_warn(&ctrl_info->pci_dev->dev, 5980 "heartbeat disabled by module parameter\n"); 5981 else 5982 ctrl_info->heartbeat_counter = 5983 table_iomem_addr + 5984 section_offset + 5985 offsetof( 5986 struct pqi_config_table_heartbeat, 5987 heartbeat_counter); 5988 break; 5989 } 5990 5991 section_offset = 5992 get_unaligned_le16(§ion->next_section_offset); 5993 } 5994 5995 kfree(config_table); 5996 5997 return 0; 5998 } 5999 6000 /* Switches the controller from PQI mode back into SIS mode. */ 6001 6002 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6003 { 6004 int rc; 6005 6006 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6007 rc = pqi_reset(ctrl_info); 6008 if (rc) 6009 return rc; 6010 sis_reenable_sis_mode(ctrl_info); 6011 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6012 6013 return 0; 6014 } 6015 6016 /* 6017 * If the controller isn't already in SIS mode, this function forces it into 6018 * SIS mode. 6019 */ 6020 6021 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6022 { 6023 if (!sis_is_firmware_running(ctrl_info)) 6024 return -ENXIO; 6025 6026 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6027 return 0; 6028 6029 if (sis_is_kernel_up(ctrl_info)) { 6030 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6031 return 0; 6032 } 6033 6034 return pqi_revert_to_sis_mode(ctrl_info); 6035 } 6036 6037 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6038 { 6039 int rc; 6040 6041 rc = pqi_force_sis_mode(ctrl_info); 6042 if (rc) 6043 return rc; 6044 6045 /* 6046 * Wait until the controller is ready to start accepting SIS 6047 * commands. 6048 */ 6049 rc = sis_wait_for_ctrl_ready(ctrl_info); 6050 if (rc) 6051 return rc; 6052 6053 /* 6054 * Get the controller properties. This allows us to determine 6055 * whether or not it supports PQI mode. 6056 */ 6057 rc = sis_get_ctrl_properties(ctrl_info); 6058 if (rc) { 6059 dev_err(&ctrl_info->pci_dev->dev, 6060 "error obtaining controller properties\n"); 6061 return rc; 6062 } 6063 6064 rc = sis_get_pqi_capabilities(ctrl_info); 6065 if (rc) { 6066 dev_err(&ctrl_info->pci_dev->dev, 6067 "error obtaining controller capabilities\n"); 6068 return rc; 6069 } 6070 6071 if (reset_devices) { 6072 if (ctrl_info->max_outstanding_requests > 6073 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6074 ctrl_info->max_outstanding_requests = 6075 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6076 } else { 6077 if (ctrl_info->max_outstanding_requests > 6078 PQI_MAX_OUTSTANDING_REQUESTS) 6079 ctrl_info->max_outstanding_requests = 6080 PQI_MAX_OUTSTANDING_REQUESTS; 6081 } 6082 6083 pqi_calculate_io_resources(ctrl_info); 6084 6085 rc = pqi_alloc_error_buffer(ctrl_info); 6086 if (rc) { 6087 dev_err(&ctrl_info->pci_dev->dev, 6088 "failed to allocate PQI error buffer\n"); 6089 return rc; 6090 } 6091 6092 /* 6093 * If the function we are about to call succeeds, the 6094 * controller will transition from legacy SIS mode 6095 * into PQI mode. 6096 */ 6097 rc = sis_init_base_struct_addr(ctrl_info); 6098 if (rc) { 6099 dev_err(&ctrl_info->pci_dev->dev, 6100 "error initializing PQI mode\n"); 6101 return rc; 6102 } 6103 6104 /* Wait for the controller to complete the SIS -> PQI transition. */ 6105 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6106 if (rc) { 6107 dev_err(&ctrl_info->pci_dev->dev, 6108 "transition to PQI mode failed\n"); 6109 return rc; 6110 } 6111 6112 /* From here on, we are running in PQI mode. */ 6113 ctrl_info->pqi_mode_enabled = true; 6114 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6115 6116 rc = pqi_process_config_table(ctrl_info); 6117 if (rc) 6118 return rc; 6119 6120 rc = pqi_alloc_admin_queues(ctrl_info); 6121 if (rc) { 6122 dev_err(&ctrl_info->pci_dev->dev, 6123 "failed to allocate admin queues\n"); 6124 return rc; 6125 } 6126 6127 rc = pqi_create_admin_queues(ctrl_info); 6128 if (rc) { 6129 dev_err(&ctrl_info->pci_dev->dev, 6130 "error creating admin queues\n"); 6131 return rc; 6132 } 6133 6134 rc = pqi_report_device_capability(ctrl_info); 6135 if (rc) { 6136 dev_err(&ctrl_info->pci_dev->dev, 6137 "obtaining device capability failed\n"); 6138 return rc; 6139 } 6140 6141 rc = pqi_validate_device_capability(ctrl_info); 6142 if (rc) 6143 return rc; 6144 6145 pqi_calculate_queue_resources(ctrl_info); 6146 6147 rc = pqi_enable_msix_interrupts(ctrl_info); 6148 if (rc) 6149 return rc; 6150 6151 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 6152 ctrl_info->max_msix_vectors = 6153 ctrl_info->num_msix_vectors_enabled; 6154 pqi_calculate_queue_resources(ctrl_info); 6155 } 6156 6157 rc = pqi_alloc_io_resources(ctrl_info); 6158 if (rc) 6159 return rc; 6160 6161 rc = pqi_alloc_operational_queues(ctrl_info); 6162 if (rc) { 6163 dev_err(&ctrl_info->pci_dev->dev, 6164 "failed to allocate operational queues\n"); 6165 return rc; 6166 } 6167 6168 pqi_init_operational_queues(ctrl_info); 6169 6170 rc = pqi_request_irqs(ctrl_info); 6171 if (rc) 6172 return rc; 6173 6174 rc = pqi_create_queues(ctrl_info); 6175 if (rc) 6176 return rc; 6177 6178 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6179 6180 ctrl_info->controller_online = true; 6181 pqi_start_heartbeat_timer(ctrl_info); 6182 6183 rc = pqi_enable_events(ctrl_info); 6184 if (rc) { 6185 dev_err(&ctrl_info->pci_dev->dev, 6186 "error enabling events\n"); 6187 return rc; 6188 } 6189 6190 /* Register with the SCSI subsystem. */ 6191 rc = pqi_register_scsi(ctrl_info); 6192 if (rc) 6193 return rc; 6194 6195 rc = pqi_get_ctrl_firmware_version(ctrl_info); 6196 if (rc) { 6197 dev_err(&ctrl_info->pci_dev->dev, 6198 "error obtaining firmware version\n"); 6199 return rc; 6200 } 6201 6202 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6203 if (rc) { 6204 dev_err(&ctrl_info->pci_dev->dev, 6205 "error updating host wellness\n"); 6206 return rc; 6207 } 6208 6209 pqi_schedule_update_time_worker(ctrl_info); 6210 6211 pqi_scan_scsi_devices(ctrl_info); 6212 6213 return 0; 6214 } 6215 6216 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 6217 { 6218 unsigned int i; 6219 struct pqi_admin_queues *admin_queues; 6220 struct pqi_event_queue *event_queue; 6221 6222 admin_queues = &ctrl_info->admin_queues; 6223 admin_queues->iq_pi_copy = 0; 6224 admin_queues->oq_ci_copy = 0; 6225 *admin_queues->oq_pi = 0; 6226 6227 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6228 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 6229 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 6230 ctrl_info->queue_groups[i].oq_ci_copy = 0; 6231 6232 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; 6233 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; 6234 *ctrl_info->queue_groups[i].oq_pi = 0; 6235 } 6236 6237 event_queue = &ctrl_info->event_queue; 6238 *event_queue->oq_pi = 0; 6239 event_queue->oq_ci_copy = 0; 6240 } 6241 6242 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 6243 { 6244 int rc; 6245 6246 rc = pqi_force_sis_mode(ctrl_info); 6247 if (rc) 6248 return rc; 6249 6250 /* 6251 * Wait until the controller is ready to start accepting SIS 6252 * commands. 6253 */ 6254 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 6255 if (rc) 6256 return rc; 6257 6258 /* 6259 * If the function we are about to call succeeds, the 6260 * controller will transition from legacy SIS mode 6261 * into PQI mode. 6262 */ 6263 rc = sis_init_base_struct_addr(ctrl_info); 6264 if (rc) { 6265 dev_err(&ctrl_info->pci_dev->dev, 6266 "error initializing PQI mode\n"); 6267 return rc; 6268 } 6269 6270 /* Wait for the controller to complete the SIS -> PQI transition. */ 6271 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6272 if (rc) { 6273 dev_err(&ctrl_info->pci_dev->dev, 6274 "transition to PQI mode failed\n"); 6275 return rc; 6276 } 6277 6278 /* From here on, we are running in PQI mode. */ 6279 ctrl_info->pqi_mode_enabled = true; 6280 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6281 6282 pqi_reinit_queues(ctrl_info); 6283 6284 rc = pqi_create_admin_queues(ctrl_info); 6285 if (rc) { 6286 dev_err(&ctrl_info->pci_dev->dev, 6287 "error creating admin queues\n"); 6288 return rc; 6289 } 6290 6291 rc = pqi_create_queues(ctrl_info); 6292 if (rc) 6293 return rc; 6294 6295 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6296 6297 ctrl_info->controller_online = true; 6298 pqi_start_heartbeat_timer(ctrl_info); 6299 pqi_ctrl_unblock_requests(ctrl_info); 6300 6301 rc = pqi_enable_events(ctrl_info); 6302 if (rc) { 6303 dev_err(&ctrl_info->pci_dev->dev, 6304 "error enabling events\n"); 6305 return rc; 6306 } 6307 6308 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6309 if (rc) { 6310 dev_err(&ctrl_info->pci_dev->dev, 6311 "error updating host wellness\n"); 6312 return rc; 6313 } 6314 6315 pqi_schedule_update_time_worker(ctrl_info); 6316 6317 pqi_scan_scsi_devices(ctrl_info); 6318 6319 return 0; 6320 } 6321 6322 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 6323 u16 timeout) 6324 { 6325 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 6326 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 6327 } 6328 6329 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 6330 { 6331 int rc; 6332 u64 mask; 6333 6334 rc = pci_enable_device(ctrl_info->pci_dev); 6335 if (rc) { 6336 dev_err(&ctrl_info->pci_dev->dev, 6337 "failed to enable PCI device\n"); 6338 return rc; 6339 } 6340 6341 if (sizeof(dma_addr_t) > 4) 6342 mask = DMA_BIT_MASK(64); 6343 else 6344 mask = DMA_BIT_MASK(32); 6345 6346 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 6347 if (rc) { 6348 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 6349 goto disable_device; 6350 } 6351 6352 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 6353 if (rc) { 6354 dev_err(&ctrl_info->pci_dev->dev, 6355 "failed to obtain PCI resources\n"); 6356 goto disable_device; 6357 } 6358 6359 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 6360 ctrl_info->pci_dev, 0), 6361 sizeof(struct pqi_ctrl_registers)); 6362 if (!ctrl_info->iomem_base) { 6363 dev_err(&ctrl_info->pci_dev->dev, 6364 "failed to map memory for controller registers\n"); 6365 rc = -ENOMEM; 6366 goto release_regions; 6367 } 6368 6369 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 6370 6371 /* Increase the PCIe completion timeout. */ 6372 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 6373 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 6374 if (rc) { 6375 dev_err(&ctrl_info->pci_dev->dev, 6376 "failed to set PCIe completion timeout\n"); 6377 goto release_regions; 6378 } 6379 6380 /* Enable bus mastering. */ 6381 pci_set_master(ctrl_info->pci_dev); 6382 6383 ctrl_info->registers = ctrl_info->iomem_base; 6384 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 6385 6386 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 6387 6388 return 0; 6389 6390 release_regions: 6391 pci_release_regions(ctrl_info->pci_dev); 6392 disable_device: 6393 pci_disable_device(ctrl_info->pci_dev); 6394 6395 return rc; 6396 } 6397 6398 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 6399 { 6400 iounmap(ctrl_info->iomem_base); 6401 pci_release_regions(ctrl_info->pci_dev); 6402 if (pci_is_enabled(ctrl_info->pci_dev)) 6403 pci_disable_device(ctrl_info->pci_dev); 6404 pci_set_drvdata(ctrl_info->pci_dev, NULL); 6405 } 6406 6407 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 6408 { 6409 struct pqi_ctrl_info *ctrl_info; 6410 6411 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 6412 GFP_KERNEL, numa_node); 6413 if (!ctrl_info) 6414 return NULL; 6415 6416 mutex_init(&ctrl_info->scan_mutex); 6417 mutex_init(&ctrl_info->lun_reset_mutex); 6418 6419 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 6420 spin_lock_init(&ctrl_info->scsi_device_list_lock); 6421 6422 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 6423 atomic_set(&ctrl_info->num_interrupts, 0); 6424 6425 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6426 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6427 6428 init_timer(&ctrl_info->heartbeat_timer); 6429 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6430 6431 sema_init(&ctrl_info->sync_request_sem, 6432 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 6433 init_waitqueue_head(&ctrl_info->block_requests_wait); 6434 6435 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 6436 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 6437 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 6438 pqi_raid_bypass_retry_worker); 6439 6440 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 6441 ctrl_info->irq_mode = IRQ_MODE_NONE; 6442 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 6443 6444 return ctrl_info; 6445 } 6446 6447 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 6448 { 6449 kfree(ctrl_info); 6450 } 6451 6452 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 6453 { 6454 pqi_free_irqs(ctrl_info); 6455 pqi_disable_msix_interrupts(ctrl_info); 6456 } 6457 6458 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 6459 { 6460 pqi_stop_heartbeat_timer(ctrl_info); 6461 pqi_free_interrupts(ctrl_info); 6462 if (ctrl_info->queue_memory_base) 6463 dma_free_coherent(&ctrl_info->pci_dev->dev, 6464 ctrl_info->queue_memory_length, 6465 ctrl_info->queue_memory_base, 6466 ctrl_info->queue_memory_base_dma_handle); 6467 if (ctrl_info->admin_queue_memory_base) 6468 dma_free_coherent(&ctrl_info->pci_dev->dev, 6469 ctrl_info->admin_queue_memory_length, 6470 ctrl_info->admin_queue_memory_base, 6471 ctrl_info->admin_queue_memory_base_dma_handle); 6472 pqi_free_all_io_requests(ctrl_info); 6473 if (ctrl_info->error_buffer) 6474 dma_free_coherent(&ctrl_info->pci_dev->dev, 6475 ctrl_info->error_buffer_length, 6476 ctrl_info->error_buffer, 6477 ctrl_info->error_buffer_dma_handle); 6478 if (ctrl_info->iomem_base) 6479 pqi_cleanup_pci_init(ctrl_info); 6480 pqi_free_ctrl_info(ctrl_info); 6481 } 6482 6483 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 6484 { 6485 pqi_cancel_rescan_worker(ctrl_info); 6486 pqi_cancel_update_time_worker(ctrl_info); 6487 pqi_remove_all_scsi_devices(ctrl_info); 6488 pqi_unregister_scsi(ctrl_info); 6489 if (ctrl_info->pqi_mode_enabled) 6490 pqi_revert_to_sis_mode(ctrl_info); 6491 pqi_free_ctrl_resources(ctrl_info); 6492 } 6493 6494 static void pqi_perform_lockup_action(void) 6495 { 6496 switch (pqi_lockup_action) { 6497 case PANIC: 6498 panic("FATAL: Smart Family Controller lockup detected"); 6499 break; 6500 case REBOOT: 6501 emergency_restart(); 6502 break; 6503 case NONE: 6504 default: 6505 break; 6506 } 6507 } 6508 6509 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 6510 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 6511 .status = SAM_STAT_CHECK_CONDITION, 6512 }; 6513 6514 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 6515 { 6516 unsigned int i; 6517 struct pqi_io_request *io_request; 6518 struct scsi_cmnd *scmd; 6519 6520 for (i = 0; i < ctrl_info->max_io_slots; i++) { 6521 io_request = &ctrl_info->io_request_pool[i]; 6522 if (atomic_read(&io_request->refcount) == 0) 6523 continue; 6524 6525 scmd = io_request->scmd; 6526 if (scmd) { 6527 set_host_byte(scmd, DID_NO_CONNECT); 6528 } else { 6529 io_request->status = -ENXIO; 6530 io_request->error_info = 6531 &pqi_ctrl_offline_raid_error_info; 6532 } 6533 6534 io_request->io_complete_callback(io_request, 6535 io_request->context); 6536 } 6537 } 6538 6539 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 6540 { 6541 pqi_perform_lockup_action(); 6542 pqi_stop_heartbeat_timer(ctrl_info); 6543 pqi_free_interrupts(ctrl_info); 6544 pqi_cancel_rescan_worker(ctrl_info); 6545 pqi_cancel_update_time_worker(ctrl_info); 6546 pqi_ctrl_wait_until_quiesced(ctrl_info); 6547 pqi_fail_all_outstanding_requests(ctrl_info); 6548 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 6549 pqi_ctrl_unblock_requests(ctrl_info); 6550 } 6551 6552 static void pqi_ctrl_offline_worker(struct work_struct *work) 6553 { 6554 struct pqi_ctrl_info *ctrl_info; 6555 6556 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 6557 pqi_take_ctrl_offline_deferred(ctrl_info); 6558 } 6559 6560 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 6561 { 6562 if (!ctrl_info->controller_online) 6563 return; 6564 6565 ctrl_info->controller_online = false; 6566 ctrl_info->pqi_mode_enabled = false; 6567 pqi_ctrl_block_requests(ctrl_info); 6568 if (!pqi_disable_ctrl_shutdown) 6569 sis_shutdown_ctrl(ctrl_info); 6570 pci_disable_device(ctrl_info->pci_dev); 6571 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 6572 schedule_work(&ctrl_info->ctrl_offline_work); 6573 } 6574 6575 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 6576 const struct pci_device_id *id) 6577 { 6578 char *ctrl_description; 6579 6580 if (id->driver_data) 6581 ctrl_description = (char *)id->driver_data; 6582 else 6583 ctrl_description = "Microsemi Smart Family Controller"; 6584 6585 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 6586 } 6587 6588 static int pqi_pci_probe(struct pci_dev *pci_dev, 6589 const struct pci_device_id *id) 6590 { 6591 int rc; 6592 int node; 6593 struct pqi_ctrl_info *ctrl_info; 6594 6595 pqi_print_ctrl_info(pci_dev, id); 6596 6597 if (pqi_disable_device_id_wildcards && 6598 id->subvendor == PCI_ANY_ID && 6599 id->subdevice == PCI_ANY_ID) { 6600 dev_warn(&pci_dev->dev, 6601 "controller not probed because device ID wildcards are disabled\n"); 6602 return -ENODEV; 6603 } 6604 6605 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 6606 dev_warn(&pci_dev->dev, 6607 "controller device ID matched using wildcards\n"); 6608 6609 node = dev_to_node(&pci_dev->dev); 6610 if (node == NUMA_NO_NODE) 6611 set_dev_node(&pci_dev->dev, 0); 6612 6613 ctrl_info = pqi_alloc_ctrl_info(node); 6614 if (!ctrl_info) { 6615 dev_err(&pci_dev->dev, 6616 "failed to allocate controller info block\n"); 6617 return -ENOMEM; 6618 } 6619 6620 ctrl_info->pci_dev = pci_dev; 6621 6622 rc = pqi_pci_init(ctrl_info); 6623 if (rc) 6624 goto error; 6625 6626 rc = pqi_ctrl_init(ctrl_info); 6627 if (rc) 6628 goto error; 6629 6630 return 0; 6631 6632 error: 6633 pqi_remove_ctrl(ctrl_info); 6634 6635 return rc; 6636 } 6637 6638 static void pqi_pci_remove(struct pci_dev *pci_dev) 6639 { 6640 struct pqi_ctrl_info *ctrl_info; 6641 6642 ctrl_info = pci_get_drvdata(pci_dev); 6643 if (!ctrl_info) 6644 return; 6645 6646 pqi_remove_ctrl(ctrl_info); 6647 } 6648 6649 static void pqi_shutdown(struct pci_dev *pci_dev) 6650 { 6651 int rc; 6652 struct pqi_ctrl_info *ctrl_info; 6653 6654 ctrl_info = pci_get_drvdata(pci_dev); 6655 if (!ctrl_info) 6656 goto error; 6657 6658 /* 6659 * Write all data in the controller's battery-backed cache to 6660 * storage. 6661 */ 6662 rc = pqi_flush_cache(ctrl_info); 6663 if (rc == 0) 6664 return; 6665 6666 error: 6667 dev_warn(&pci_dev->dev, 6668 "unable to flush controller cache\n"); 6669 } 6670 6671 static void pqi_process_lockup_action_param(void) 6672 { 6673 unsigned int i; 6674 6675 if (!pqi_lockup_action_param) 6676 return; 6677 6678 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6679 if (strcmp(pqi_lockup_action_param, 6680 pqi_lockup_actions[i].name) == 0) { 6681 pqi_lockup_action = pqi_lockup_actions[i].action; 6682 return; 6683 } 6684 } 6685 6686 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 6687 DRIVER_NAME_SHORT, pqi_lockup_action_param); 6688 } 6689 6690 static void pqi_process_module_params(void) 6691 { 6692 pqi_process_lockup_action_param(); 6693 } 6694 6695 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 6696 { 6697 struct pqi_ctrl_info *ctrl_info; 6698 6699 ctrl_info = pci_get_drvdata(pci_dev); 6700 6701 pqi_disable_events(ctrl_info); 6702 pqi_cancel_update_time_worker(ctrl_info); 6703 pqi_cancel_rescan_worker(ctrl_info); 6704 pqi_wait_until_scan_finished(ctrl_info); 6705 pqi_wait_until_lun_reset_finished(ctrl_info); 6706 pqi_flush_cache(ctrl_info); 6707 pqi_ctrl_block_requests(ctrl_info); 6708 pqi_ctrl_wait_until_quiesced(ctrl_info); 6709 pqi_wait_until_inbound_queues_empty(ctrl_info); 6710 pqi_ctrl_wait_for_pending_io(ctrl_info); 6711 pqi_stop_heartbeat_timer(ctrl_info); 6712 6713 if (state.event == PM_EVENT_FREEZE) 6714 return 0; 6715 6716 pci_save_state(pci_dev); 6717 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 6718 6719 ctrl_info->controller_online = false; 6720 ctrl_info->pqi_mode_enabled = false; 6721 6722 return 0; 6723 } 6724 6725 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 6726 { 6727 int rc; 6728 struct pqi_ctrl_info *ctrl_info; 6729 6730 ctrl_info = pci_get_drvdata(pci_dev); 6731 6732 if (pci_dev->current_state != PCI_D0) { 6733 ctrl_info->max_hw_queue_index = 0; 6734 pqi_free_interrupts(ctrl_info); 6735 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 6736 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 6737 IRQF_SHARED, DRIVER_NAME_SHORT, 6738 &ctrl_info->queue_groups[0]); 6739 if (rc) { 6740 dev_err(&ctrl_info->pci_dev->dev, 6741 "irq %u init failed with error %d\n", 6742 pci_dev->irq, rc); 6743 return rc; 6744 } 6745 pqi_start_heartbeat_timer(ctrl_info); 6746 pqi_ctrl_unblock_requests(ctrl_info); 6747 return 0; 6748 } 6749 6750 pci_set_power_state(pci_dev, PCI_D0); 6751 pci_restore_state(pci_dev); 6752 6753 return pqi_ctrl_init_resume(ctrl_info); 6754 } 6755 6756 /* Define the PCI IDs for the controllers that we support. */ 6757 static const struct pci_device_id pqi_pci_id_table[] = { 6758 { 6759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6760 0x152d, 0x8a22) 6761 }, 6762 { 6763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6764 0x152d, 0x8a23) 6765 }, 6766 { 6767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6768 0x152d, 0x8a24) 6769 }, 6770 { 6771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6772 0x152d, 0x8a36) 6773 }, 6774 { 6775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6776 0x152d, 0x8a37) 6777 }, 6778 { 6779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6780 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 6781 }, 6782 { 6783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6784 PCI_VENDOR_ID_ADAPTEC2, 0x0605) 6785 }, 6786 { 6787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6788 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 6789 }, 6790 { 6791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6792 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 6793 }, 6794 { 6795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6796 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 6797 }, 6798 { 6799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6800 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 6801 }, 6802 { 6803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6804 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 6805 }, 6806 { 6807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6808 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 6809 }, 6810 { 6811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6812 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 6813 }, 6814 { 6815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6816 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 6817 }, 6818 { 6819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6820 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 6821 }, 6822 { 6823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6824 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 6825 }, 6826 { 6827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6828 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 6829 }, 6830 { 6831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6832 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 6833 }, 6834 { 6835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6836 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 6837 }, 6838 { 6839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6840 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 6841 }, 6842 { 6843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6844 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 6845 }, 6846 { 6847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6848 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 6849 }, 6850 { 6851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6852 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 6853 }, 6854 { 6855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6856 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 6857 }, 6858 { 6859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6860 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 6861 }, 6862 { 6863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6864 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 6865 }, 6866 { 6867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6868 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 6869 }, 6870 { 6871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6872 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 6873 }, 6874 { 6875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6876 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 6877 }, 6878 { 6879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6880 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 6881 }, 6882 { 6883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6884 PCI_VENDOR_ID_HP, 0x0600) 6885 }, 6886 { 6887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6888 PCI_VENDOR_ID_HP, 0x0601) 6889 }, 6890 { 6891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6892 PCI_VENDOR_ID_HP, 0x0602) 6893 }, 6894 { 6895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6896 PCI_VENDOR_ID_HP, 0x0603) 6897 }, 6898 { 6899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6900 PCI_VENDOR_ID_HP, 0x0604) 6901 }, 6902 { 6903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6904 PCI_VENDOR_ID_HP, 0x0606) 6905 }, 6906 { 6907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6908 PCI_VENDOR_ID_HP, 0x0650) 6909 }, 6910 { 6911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6912 PCI_VENDOR_ID_HP, 0x0651) 6913 }, 6914 { 6915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6916 PCI_VENDOR_ID_HP, 0x0652) 6917 }, 6918 { 6919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6920 PCI_VENDOR_ID_HP, 0x0653) 6921 }, 6922 { 6923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6924 PCI_VENDOR_ID_HP, 0x0654) 6925 }, 6926 { 6927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6928 PCI_VENDOR_ID_HP, 0x0655) 6929 }, 6930 { 6931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6932 PCI_VENDOR_ID_HP, 0x0656) 6933 }, 6934 { 6935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6936 PCI_VENDOR_ID_HP, 0x0657) 6937 }, 6938 { 6939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6940 PCI_VENDOR_ID_HP, 0x0700) 6941 }, 6942 { 6943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6944 PCI_VENDOR_ID_HP, 0x0701) 6945 }, 6946 { 6947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6948 PCI_VENDOR_ID_HP, 0x1001) 6949 }, 6950 { 6951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6952 PCI_VENDOR_ID_HP, 0x1100) 6953 }, 6954 { 6955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6956 PCI_VENDOR_ID_HP, 0x1101) 6957 }, 6958 { 6959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6960 PCI_VENDOR_ID_HP, 0x1102) 6961 }, 6962 { 6963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6964 PCI_VENDOR_ID_HP, 0x1150) 6965 }, 6966 { 6967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6968 PCI_ANY_ID, PCI_ANY_ID) 6969 }, 6970 { 0 } 6971 }; 6972 6973 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 6974 6975 static struct pci_driver pqi_pci_driver = { 6976 .name = DRIVER_NAME_SHORT, 6977 .id_table = pqi_pci_id_table, 6978 .probe = pqi_pci_probe, 6979 .remove = pqi_pci_remove, 6980 .shutdown = pqi_shutdown, 6981 #if defined(CONFIG_PM) 6982 .suspend = pqi_suspend, 6983 .resume = pqi_resume, 6984 #endif 6985 }; 6986 6987 static int __init pqi_init(void) 6988 { 6989 int rc; 6990 6991 pr_info(DRIVER_NAME "\n"); 6992 6993 pqi_sas_transport_template = 6994 sas_attach_transport(&pqi_sas_transport_functions); 6995 if (!pqi_sas_transport_template) 6996 return -ENODEV; 6997 6998 pqi_process_module_params(); 6999 7000 rc = pci_register_driver(&pqi_pci_driver); 7001 if (rc) 7002 sas_release_transport(pqi_sas_transport_template); 7003 7004 return rc; 7005 } 7006 7007 static void __exit pqi_cleanup(void) 7008 { 7009 pci_unregister_driver(&pqi_pci_driver); 7010 sas_release_transport(pqi_sas_transport_template); 7011 } 7012 7013 module_init(pqi_init); 7014 module_exit(pqi_cleanup); 7015 7016 static void __attribute__((unused)) verify_structures(void) 7017 { 7018 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7019 sis_host_to_ctrl_doorbell) != 0x20); 7020 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7021 sis_interrupt_mask) != 0x34); 7022 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7023 sis_ctrl_to_host_doorbell) != 0x9c); 7024 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7025 sis_ctrl_to_host_doorbell_clear) != 0xa0); 7026 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7027 sis_driver_scratch) != 0xb0); 7028 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7029 sis_firmware_status) != 0xbc); 7030 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7031 sis_mailbox) != 0x1000); 7032 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7033 pqi_registers) != 0x4000); 7034 7035 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7036 iu_type) != 0x0); 7037 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7038 iu_length) != 0x2); 7039 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7040 response_queue_id) != 0x4); 7041 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7042 work_area) != 0x6); 7043 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 7044 7045 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7046 status) != 0x0); 7047 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7048 service_response) != 0x1); 7049 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7050 data_present) != 0x2); 7051 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7052 reserved) != 0x3); 7053 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7054 residual_count) != 0x4); 7055 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7056 data_length) != 0x8); 7057 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7058 reserved1) != 0xa); 7059 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7060 data) != 0xc); 7061 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 7062 7063 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7064 data_in_result) != 0x0); 7065 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7066 data_out_result) != 0x1); 7067 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7068 reserved) != 0x2); 7069 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7070 status) != 0x5); 7071 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7072 status_qualifier) != 0x6); 7073 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7074 sense_data_length) != 0x8); 7075 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7076 response_data_length) != 0xa); 7077 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7078 data_in_transferred) != 0xc); 7079 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7080 data_out_transferred) != 0x10); 7081 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7082 data) != 0x14); 7083 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 7084 7085 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7086 signature) != 0x0); 7087 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7088 function_and_status_code) != 0x8); 7089 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7090 max_admin_iq_elements) != 0x10); 7091 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7092 max_admin_oq_elements) != 0x11); 7093 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7094 admin_iq_element_length) != 0x12); 7095 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7096 admin_oq_element_length) != 0x13); 7097 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7098 max_reset_timeout) != 0x14); 7099 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7100 legacy_intx_status) != 0x18); 7101 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7102 legacy_intx_mask_set) != 0x1c); 7103 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7104 legacy_intx_mask_clear) != 0x20); 7105 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7106 device_status) != 0x40); 7107 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7108 admin_iq_pi_offset) != 0x48); 7109 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7110 admin_oq_ci_offset) != 0x50); 7111 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7112 admin_iq_element_array_addr) != 0x58); 7113 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7114 admin_oq_element_array_addr) != 0x60); 7115 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7116 admin_iq_ci_addr) != 0x68); 7117 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7118 admin_oq_pi_addr) != 0x70); 7119 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7120 admin_iq_num_elements) != 0x78); 7121 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7122 admin_oq_num_elements) != 0x79); 7123 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7124 admin_queue_int_msg_num) != 0x7a); 7125 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7126 device_error) != 0x80); 7127 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7128 error_details) != 0x88); 7129 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7130 device_reset) != 0x90); 7131 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7132 power_action) != 0x94); 7133 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 7134 7135 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7136 header.iu_type) != 0); 7137 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7138 header.iu_length) != 2); 7139 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7140 header.work_area) != 6); 7141 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7142 request_id) != 8); 7143 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7144 function_code) != 10); 7145 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7146 data.report_device_capability.buffer_length) != 44); 7147 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7148 data.report_device_capability.sg_descriptor) != 48); 7149 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7150 data.create_operational_iq.queue_id) != 12); 7151 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7152 data.create_operational_iq.element_array_addr) != 16); 7153 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7154 data.create_operational_iq.ci_addr) != 24); 7155 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7156 data.create_operational_iq.num_elements) != 32); 7157 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7158 data.create_operational_iq.element_length) != 34); 7159 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7160 data.create_operational_iq.queue_protocol) != 36); 7161 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7162 data.create_operational_oq.queue_id) != 12); 7163 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7164 data.create_operational_oq.element_array_addr) != 16); 7165 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7166 data.create_operational_oq.pi_addr) != 24); 7167 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7168 data.create_operational_oq.num_elements) != 32); 7169 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7170 data.create_operational_oq.element_length) != 34); 7171 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7172 data.create_operational_oq.queue_protocol) != 36); 7173 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7174 data.create_operational_oq.int_msg_num) != 40); 7175 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7176 data.create_operational_oq.coalescing_count) != 42); 7177 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7178 data.create_operational_oq.min_coalescing_time) != 44); 7179 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7180 data.create_operational_oq.max_coalescing_time) != 48); 7181 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7182 data.delete_operational_queue.queue_id) != 12); 7183 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 7184 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7185 data.create_operational_iq) != 64 - 11); 7186 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7187 data.create_operational_oq) != 64 - 11); 7188 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7189 data.delete_operational_queue) != 64 - 11); 7190 7191 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7192 header.iu_type) != 0); 7193 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7194 header.iu_length) != 2); 7195 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7196 header.work_area) != 6); 7197 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7198 request_id) != 8); 7199 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7200 function_code) != 10); 7201 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7202 status) != 11); 7203 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7204 data.create_operational_iq.status_descriptor) != 12); 7205 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7206 data.create_operational_iq.iq_pi_offset) != 16); 7207 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7208 data.create_operational_oq.status_descriptor) != 12); 7209 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7210 data.create_operational_oq.oq_ci_offset) != 16); 7211 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 7212 7213 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7214 header.iu_type) != 0); 7215 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7216 header.iu_length) != 2); 7217 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7218 header.response_queue_id) != 4); 7219 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7220 header.work_area) != 6); 7221 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7222 request_id) != 8); 7223 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7224 nexus_id) != 10); 7225 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7226 buffer_length) != 12); 7227 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7228 lun_number) != 16); 7229 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7230 protocol_specific) != 24); 7231 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7232 error_index) != 27); 7233 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7234 cdb) != 32); 7235 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7236 sg_descriptors) != 64); 7237 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 7238 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7239 7240 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7241 header.iu_type) != 0); 7242 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7243 header.iu_length) != 2); 7244 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7245 header.response_queue_id) != 4); 7246 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7247 header.work_area) != 6); 7248 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7249 request_id) != 8); 7250 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7251 nexus_id) != 12); 7252 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7253 buffer_length) != 16); 7254 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7255 data_encryption_key_index) != 22); 7256 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7257 encrypt_tweak_lower) != 24); 7258 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7259 encrypt_tweak_upper) != 28); 7260 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7261 cdb) != 32); 7262 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7263 error_index) != 48); 7264 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7265 num_sg_descriptors) != 50); 7266 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7267 cdb_length) != 51); 7268 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7269 lun_number) != 52); 7270 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7271 sg_descriptors) != 64); 7272 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 7273 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7274 7275 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7276 header.iu_type) != 0); 7277 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7278 header.iu_length) != 2); 7279 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7280 request_id) != 8); 7281 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7282 error_index) != 10); 7283 7284 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7285 header.iu_type) != 0); 7286 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7287 header.iu_length) != 2); 7288 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7289 header.response_queue_id) != 4); 7290 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7291 request_id) != 8); 7292 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7293 data.report_event_configuration.buffer_length) != 12); 7294 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7295 data.report_event_configuration.sg_descriptors) != 16); 7296 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7297 data.set_event_configuration.global_event_oq_id) != 10); 7298 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7299 data.set_event_configuration.buffer_length) != 12); 7300 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7301 data.set_event_configuration.sg_descriptors) != 16); 7302 7303 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7304 max_inbound_iu_length) != 6); 7305 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7306 max_outbound_iu_length) != 14); 7307 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 7308 7309 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7310 data_length) != 0); 7311 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7312 iq_arbitration_priority_support_bitmask) != 8); 7313 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7314 maximum_aw_a) != 9); 7315 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7316 maximum_aw_b) != 10); 7317 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7318 maximum_aw_c) != 11); 7319 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7320 max_inbound_queues) != 16); 7321 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7322 max_elements_per_iq) != 18); 7323 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7324 max_iq_element_length) != 24); 7325 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7326 min_iq_element_length) != 26); 7327 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7328 max_outbound_queues) != 30); 7329 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7330 max_elements_per_oq) != 32); 7331 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7332 intr_coalescing_time_granularity) != 34); 7333 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7334 max_oq_element_length) != 36); 7335 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7336 min_oq_element_length) != 38); 7337 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7338 iu_layer_descriptors) != 64); 7339 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 7340 7341 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7342 event_type) != 0); 7343 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7344 oq_id) != 2); 7345 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 7346 7347 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7348 num_event_descriptors) != 2); 7349 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7350 descriptors) != 4); 7351 7352 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 7353 ARRAY_SIZE(pqi_supported_event_types)); 7354 7355 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7356 header.iu_type) != 0); 7357 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7358 header.iu_length) != 2); 7359 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7360 event_type) != 8); 7361 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7362 event_id) != 10); 7363 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7364 additional_event_id) != 12); 7365 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7366 data) != 16); 7367 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 7368 7369 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7370 header.iu_type) != 0); 7371 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7372 header.iu_length) != 2); 7373 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7374 event_type) != 8); 7375 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7376 event_id) != 10); 7377 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7378 additional_event_id) != 12); 7379 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 7380 7381 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7382 header.iu_type) != 0); 7383 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7384 header.iu_length) != 2); 7385 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7386 request_id) != 8); 7387 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7388 nexus_id) != 10); 7389 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7390 lun_number) != 16); 7391 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7392 protocol_specific) != 24); 7393 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7394 outbound_queue_id_to_manage) != 26); 7395 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7396 request_id_to_manage) != 28); 7397 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7398 task_management_function) != 30); 7399 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 7400 7401 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7402 header.iu_type) != 0); 7403 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7404 header.iu_length) != 2); 7405 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7406 request_id) != 8); 7407 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7408 nexus_id) != 10); 7409 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7410 additional_response_info) != 12); 7411 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7412 response_code) != 15); 7413 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 7414 7415 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7416 configured_logical_drive_count) != 0); 7417 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7418 configuration_signature) != 1); 7419 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7420 firmware_version) != 5); 7421 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7422 extended_logical_unit_count) != 154); 7423 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7424 firmware_build_number) != 190); 7425 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7426 controller_mode) != 292); 7427 7428 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7429 phys_bay_in_box) != 115); 7430 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7431 device_type) != 120); 7432 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7433 redundant_path_present_map) != 1736); 7434 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7435 active_path_number) != 1738); 7436 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7437 alternate_paths_phys_connector) != 1739); 7438 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7439 alternate_paths_phys_box_on_port) != 1755); 7440 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7441 current_queue_depth_limit) != 1796); 7442 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 7443 7444 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 7445 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 7446 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 7447 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7448 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 7449 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7450 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 7451 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 7452 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7453 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 7454 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 7455 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7456 7457 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 7458 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 7459 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 7460 } 7461