1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.1.2-126" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 1 46 #define DRIVER_RELEASE 2 47 #define DRIVER_REVISION 126 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 78 /* for flags argument to pqi_submit_raid_request_synchronous() */ 79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 80 81 static struct scsi_transport_template *pqi_sas_transport_template; 82 83 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 84 85 enum pqi_lockup_action { 86 NONE, 87 REBOOT, 88 PANIC 89 }; 90 91 static enum pqi_lockup_action pqi_lockup_action = NONE; 92 93 static struct { 94 enum pqi_lockup_action action; 95 char *name; 96 } pqi_lockup_actions[] = { 97 { 98 .action = NONE, 99 .name = "none", 100 }, 101 { 102 .action = REBOOT, 103 .name = "reboot", 104 }, 105 { 106 .action = PANIC, 107 .name = "panic", 108 }, 109 }; 110 111 static unsigned int pqi_supported_event_types[] = { 112 PQI_EVENT_TYPE_HOTPLUG, 113 PQI_EVENT_TYPE_HARDWARE, 114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 118 }; 119 120 static int pqi_disable_device_id_wildcards; 121 module_param_named(disable_device_id_wildcards, 122 pqi_disable_device_id_wildcards, int, 0644); 123 MODULE_PARM_DESC(disable_device_id_wildcards, 124 "Disable device ID wildcards."); 125 126 static int pqi_disable_heartbeat; 127 module_param_named(disable_heartbeat, 128 pqi_disable_heartbeat, int, 0644); 129 MODULE_PARM_DESC(disable_heartbeat, 130 "Disable heartbeat."); 131 132 static int pqi_disable_ctrl_shutdown; 133 module_param_named(disable_ctrl_shutdown, 134 pqi_disable_ctrl_shutdown, int, 0644); 135 MODULE_PARM_DESC(disable_ctrl_shutdown, 136 "Disable controller shutdown when controller locked up."); 137 138 static char *pqi_lockup_action_param; 139 module_param_named(lockup_action, 140 pqi_lockup_action_param, charp, 0644); 141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 142 "\t\tSupported: none, reboot, panic\n" 143 "\t\tDefault: none"); 144 145 static char *raid_levels[] = { 146 "RAID-0", 147 "RAID-4", 148 "RAID-1(1+0)", 149 "RAID-5", 150 "RAID-5+1", 151 "RAID-ADG", 152 "RAID-1(ADM)", 153 }; 154 155 static char *pqi_raid_level_to_string(u8 raid_level) 156 { 157 if (raid_level < ARRAY_SIZE(raid_levels)) 158 return raid_levels[raid_level]; 159 160 return "RAID UNKNOWN"; 161 } 162 163 #define SA_RAID_0 0 164 #define SA_RAID_4 1 165 #define SA_RAID_1 2 /* also used for RAID 10 */ 166 #define SA_RAID_5 3 /* also used for RAID 50 */ 167 #define SA_RAID_51 4 168 #define SA_RAID_6 5 /* also used for RAID 60 */ 169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 170 #define SA_RAID_MAX SA_RAID_ADM 171 #define SA_RAID_UNKNOWN 0xff 172 173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 174 { 175 pqi_prep_for_scsi_done(scmd); 176 scmd->scsi_done(scmd); 177 } 178 179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 180 { 181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 182 } 183 184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 185 { 186 void *hostdata = shost_priv(shost); 187 188 return *((struct pqi_ctrl_info **)hostdata); 189 } 190 191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 192 { 193 return !device->is_physical_device; 194 } 195 196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 197 { 198 return scsi3addr[2] != 0; 199 } 200 201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 202 { 203 return !ctrl_info->controller_online; 204 } 205 206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 207 { 208 if (ctrl_info->controller_online) 209 if (!sis_is_firmware_running(ctrl_info)) 210 pqi_take_ctrl_offline(ctrl_info); 211 } 212 213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 214 { 215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 216 } 217 218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 219 struct pqi_ctrl_info *ctrl_info) 220 { 221 return sis_read_driver_scratch(ctrl_info); 222 } 223 224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 225 enum pqi_ctrl_mode mode) 226 { 227 sis_write_driver_scratch(ctrl_info, mode); 228 } 229 230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 231 { 232 ctrl_info->block_requests = true; 233 scsi_block_requests(ctrl_info->scsi_host); 234 } 235 236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 237 { 238 ctrl_info->block_requests = false; 239 wake_up_all(&ctrl_info->block_requests_wait); 240 pqi_retry_raid_bypass_requests(ctrl_info); 241 scsi_unblock_requests(ctrl_info->scsi_host); 242 } 243 244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 245 { 246 return ctrl_info->block_requests; 247 } 248 249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 250 unsigned long timeout_msecs) 251 { 252 unsigned long remaining_msecs; 253 254 if (!pqi_ctrl_blocked(ctrl_info)) 255 return timeout_msecs; 256 257 atomic_inc(&ctrl_info->num_blocked_threads); 258 259 if (timeout_msecs == NO_TIMEOUT) { 260 wait_event(ctrl_info->block_requests_wait, 261 !pqi_ctrl_blocked(ctrl_info)); 262 remaining_msecs = timeout_msecs; 263 } else { 264 unsigned long remaining_jiffies; 265 266 remaining_jiffies = 267 wait_event_timeout(ctrl_info->block_requests_wait, 268 !pqi_ctrl_blocked(ctrl_info), 269 msecs_to_jiffies(timeout_msecs)); 270 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 271 } 272 273 atomic_dec(&ctrl_info->num_blocked_threads); 274 275 return remaining_msecs; 276 } 277 278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 279 { 280 atomic_inc(&ctrl_info->num_busy_threads); 281 } 282 283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 284 { 285 atomic_dec(&ctrl_info->num_busy_threads); 286 } 287 288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 289 { 290 while (atomic_read(&ctrl_info->num_busy_threads) > 291 atomic_read(&ctrl_info->num_blocked_threads)) 292 usleep_range(1000, 2000); 293 } 294 295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 296 { 297 return device->device_offline; 298 } 299 300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 301 { 302 device->in_reset = true; 303 } 304 305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 306 { 307 device->in_reset = false; 308 } 309 310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 311 { 312 return device->in_reset; 313 } 314 315 static inline void pqi_schedule_rescan_worker_with_delay( 316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 317 { 318 if (pqi_ctrl_offline(ctrl_info)) 319 return; 320 321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 322 } 323 324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 325 { 326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 327 } 328 329 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 330 331 static inline void pqi_schedule_rescan_worker_delayed( 332 struct pqi_ctrl_info *ctrl_info) 333 { 334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 335 } 336 337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 338 { 339 cancel_delayed_work_sync(&ctrl_info->rescan_work); 340 } 341 342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 343 { 344 if (!ctrl_info->heartbeat_counter) 345 return 0; 346 347 return readl(ctrl_info->heartbeat_counter); 348 } 349 350 static int pqi_map_single(struct pci_dev *pci_dev, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 352 size_t buffer_length, int data_direction) 353 { 354 dma_addr_t bus_address; 355 356 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE) 357 return 0; 358 359 bus_address = pci_map_single(pci_dev, buffer, buffer_length, 360 data_direction); 361 if (pci_dma_mapping_error(pci_dev, bus_address)) 362 return -ENOMEM; 363 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 365 put_unaligned_le32(buffer_length, &sg_descriptor->length); 366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 367 368 return 0; 369 } 370 371 static void pqi_pci_unmap(struct pci_dev *pci_dev, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 373 int data_direction) 374 { 375 int i; 376 377 if (data_direction == PCI_DMA_NONE) 378 return; 379 380 for (i = 0; i < num_descriptors; i++) 381 pci_unmap_single(pci_dev, 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 383 get_unaligned_le32(&descriptors[i].length), 384 data_direction); 385 } 386 387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 388 struct pqi_raid_path_request *request, u8 cmd, 389 u8 *scsi3addr, void *buffer, size_t buffer_length, 390 u16 vpd_page, int *pci_direction) 391 { 392 u8 *cdb; 393 int pci_dir; 394 395 memset(request, 0, sizeof(*request)); 396 397 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 398 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 399 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 400 &request->header.iu_length); 401 put_unaligned_le32(buffer_length, &request->buffer_length); 402 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 403 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 404 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 405 406 cdb = request->cdb; 407 408 switch (cmd) { 409 case INQUIRY: 410 request->data_direction = SOP_READ_FLAG; 411 cdb[0] = INQUIRY; 412 if (vpd_page & VPD_PAGE) { 413 cdb[1] = 0x1; 414 cdb[2] = (u8)vpd_page; 415 } 416 cdb[4] = (u8)buffer_length; 417 break; 418 case CISS_REPORT_LOG: 419 case CISS_REPORT_PHYS: 420 request->data_direction = SOP_READ_FLAG; 421 cdb[0] = cmd; 422 if (cmd == CISS_REPORT_PHYS) 423 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 424 else 425 cdb[1] = CISS_REPORT_LOG_EXTENDED; 426 put_unaligned_be32(buffer_length, &cdb[6]); 427 break; 428 case CISS_GET_RAID_MAP: 429 request->data_direction = SOP_READ_FLAG; 430 cdb[0] = CISS_READ; 431 cdb[1] = CISS_GET_RAID_MAP; 432 put_unaligned_be32(buffer_length, &cdb[6]); 433 break; 434 case SA_FLUSH_CACHE: 435 request->data_direction = SOP_WRITE_FLAG; 436 cdb[0] = BMIC_WRITE; 437 cdb[6] = BMIC_FLUSH_CACHE; 438 put_unaligned_be16(buffer_length, &cdb[7]); 439 break; 440 case BMIC_IDENTIFY_CONTROLLER: 441 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 442 request->data_direction = SOP_READ_FLAG; 443 cdb[0] = BMIC_READ; 444 cdb[6] = cmd; 445 put_unaligned_be16(buffer_length, &cdb[7]); 446 break; 447 case BMIC_WRITE_HOST_WELLNESS: 448 request->data_direction = SOP_WRITE_FLAG; 449 cdb[0] = BMIC_WRITE; 450 cdb[6] = cmd; 451 put_unaligned_be16(buffer_length, &cdb[7]); 452 break; 453 default: 454 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 455 cmd); 456 break; 457 } 458 459 switch (request->data_direction) { 460 case SOP_READ_FLAG: 461 pci_dir = PCI_DMA_FROMDEVICE; 462 break; 463 case SOP_WRITE_FLAG: 464 pci_dir = PCI_DMA_TODEVICE; 465 break; 466 case SOP_NO_DIRECTION_FLAG: 467 pci_dir = PCI_DMA_NONE; 468 break; 469 default: 470 pci_dir = PCI_DMA_BIDIRECTIONAL; 471 break; 472 } 473 474 *pci_direction = pci_dir; 475 476 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 477 buffer, buffer_length, pci_dir); 478 } 479 480 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 481 { 482 io_request->scmd = NULL; 483 io_request->status = 0; 484 io_request->error_info = NULL; 485 io_request->raid_bypass = false; 486 } 487 488 static struct pqi_io_request *pqi_alloc_io_request( 489 struct pqi_ctrl_info *ctrl_info) 490 { 491 struct pqi_io_request *io_request; 492 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 493 494 while (1) { 495 io_request = &ctrl_info->io_request_pool[i]; 496 if (atomic_inc_return(&io_request->refcount) == 1) 497 break; 498 atomic_dec(&io_request->refcount); 499 i = (i + 1) % ctrl_info->max_io_slots; 500 } 501 502 /* benignly racy */ 503 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 504 505 pqi_reinit_io_request(io_request); 506 507 return io_request; 508 } 509 510 static void pqi_free_io_request(struct pqi_io_request *io_request) 511 { 512 atomic_dec(&io_request->refcount); 513 } 514 515 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 516 struct bmic_identify_controller *buffer) 517 { 518 int rc; 519 int pci_direction; 520 struct pqi_raid_path_request request; 521 522 rc = pqi_build_raid_path_request(ctrl_info, &request, 523 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 524 sizeof(*buffer), 0, &pci_direction); 525 if (rc) 526 return rc; 527 528 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 529 NULL, NO_TIMEOUT); 530 531 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 532 pci_direction); 533 534 return rc; 535 } 536 537 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 538 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 539 { 540 int rc; 541 int pci_direction; 542 struct pqi_raid_path_request request; 543 544 rc = pqi_build_raid_path_request(ctrl_info, &request, 545 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 546 &pci_direction); 547 if (rc) 548 return rc; 549 550 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 551 NULL, NO_TIMEOUT); 552 553 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 554 pci_direction); 555 556 return rc; 557 } 558 559 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 560 struct pqi_scsi_dev *device, 561 struct bmic_identify_physical_device *buffer, 562 size_t buffer_length) 563 { 564 int rc; 565 int pci_direction; 566 u16 bmic_device_index; 567 struct pqi_raid_path_request request; 568 569 rc = pqi_build_raid_path_request(ctrl_info, &request, 570 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 571 buffer_length, 0, &pci_direction); 572 if (rc) 573 return rc; 574 575 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 576 request.cdb[2] = (u8)bmic_device_index; 577 request.cdb[9] = (u8)(bmic_device_index >> 8); 578 579 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 580 0, NULL, NO_TIMEOUT); 581 582 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 583 pci_direction); 584 585 return rc; 586 } 587 588 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 589 enum bmic_flush_cache_shutdown_event shutdown_event) 590 { 591 int rc; 592 struct pqi_raid_path_request request; 593 int pci_direction; 594 struct bmic_flush_cache *flush_cache; 595 596 /* 597 * Don't bother trying to flush the cache if the controller is 598 * locked up. 599 */ 600 if (pqi_ctrl_offline(ctrl_info)) 601 return -ENXIO; 602 603 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 604 if (!flush_cache) 605 return -ENOMEM; 606 607 flush_cache->shutdown_event = shutdown_event; 608 609 rc = pqi_build_raid_path_request(ctrl_info, &request, 610 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 611 sizeof(*flush_cache), 0, &pci_direction); 612 if (rc) 613 goto out; 614 615 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 616 0, NULL, NO_TIMEOUT); 617 618 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 619 pci_direction); 620 621 out: 622 kfree(flush_cache); 623 624 return rc; 625 } 626 627 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 628 void *buffer, size_t buffer_length) 629 { 630 int rc; 631 struct pqi_raid_path_request request; 632 int pci_direction; 633 634 rc = pqi_build_raid_path_request(ctrl_info, &request, 635 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 636 buffer_length, 0, &pci_direction); 637 if (rc) 638 return rc; 639 640 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 641 0, NULL, NO_TIMEOUT); 642 643 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 644 pci_direction); 645 646 return rc; 647 } 648 649 #pragma pack(1) 650 651 struct bmic_host_wellness_driver_version { 652 u8 start_tag[4]; 653 u8 driver_version_tag[2]; 654 __le16 driver_version_length; 655 char driver_version[32]; 656 u8 end_tag[2]; 657 }; 658 659 #pragma pack() 660 661 static int pqi_write_driver_version_to_host_wellness( 662 struct pqi_ctrl_info *ctrl_info) 663 { 664 int rc; 665 struct bmic_host_wellness_driver_version *buffer; 666 size_t buffer_length; 667 668 buffer_length = sizeof(*buffer); 669 670 buffer = kmalloc(buffer_length, GFP_KERNEL); 671 if (!buffer) 672 return -ENOMEM; 673 674 buffer->start_tag[0] = '<'; 675 buffer->start_tag[1] = 'H'; 676 buffer->start_tag[2] = 'W'; 677 buffer->start_tag[3] = '>'; 678 buffer->driver_version_tag[0] = 'D'; 679 buffer->driver_version_tag[1] = 'V'; 680 put_unaligned_le16(sizeof(buffer->driver_version), 681 &buffer->driver_version_length); 682 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 683 sizeof(buffer->driver_version) - 1); 684 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 685 buffer->end_tag[0] = 'Z'; 686 buffer->end_tag[1] = 'Z'; 687 688 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 689 690 kfree(buffer); 691 692 return rc; 693 } 694 695 #pragma pack(1) 696 697 struct bmic_host_wellness_time { 698 u8 start_tag[4]; 699 u8 time_tag[2]; 700 __le16 time_length; 701 u8 time[8]; 702 u8 dont_write_tag[2]; 703 u8 end_tag[2]; 704 }; 705 706 #pragma pack() 707 708 static int pqi_write_current_time_to_host_wellness( 709 struct pqi_ctrl_info *ctrl_info) 710 { 711 int rc; 712 struct bmic_host_wellness_time *buffer; 713 size_t buffer_length; 714 time64_t local_time; 715 unsigned int year; 716 struct tm tm; 717 718 buffer_length = sizeof(*buffer); 719 720 buffer = kmalloc(buffer_length, GFP_KERNEL); 721 if (!buffer) 722 return -ENOMEM; 723 724 buffer->start_tag[0] = '<'; 725 buffer->start_tag[1] = 'H'; 726 buffer->start_tag[2] = 'W'; 727 buffer->start_tag[3] = '>'; 728 buffer->time_tag[0] = 'T'; 729 buffer->time_tag[1] = 'D'; 730 put_unaligned_le16(sizeof(buffer->time), 731 &buffer->time_length); 732 733 local_time = ktime_get_real_seconds(); 734 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 735 year = tm.tm_year + 1900; 736 737 buffer->time[0] = bin2bcd(tm.tm_hour); 738 buffer->time[1] = bin2bcd(tm.tm_min); 739 buffer->time[2] = bin2bcd(tm.tm_sec); 740 buffer->time[3] = 0; 741 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 742 buffer->time[5] = bin2bcd(tm.tm_mday); 743 buffer->time[6] = bin2bcd(year / 100); 744 buffer->time[7] = bin2bcd(year % 100); 745 746 buffer->dont_write_tag[0] = 'D'; 747 buffer->dont_write_tag[1] = 'W'; 748 buffer->end_tag[0] = 'Z'; 749 buffer->end_tag[1] = 'Z'; 750 751 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 752 753 kfree(buffer); 754 755 return rc; 756 } 757 758 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 759 760 static void pqi_update_time_worker(struct work_struct *work) 761 { 762 int rc; 763 struct pqi_ctrl_info *ctrl_info; 764 765 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 766 update_time_work); 767 768 if (pqi_ctrl_offline(ctrl_info)) 769 return; 770 771 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 772 if (rc) 773 dev_warn(&ctrl_info->pci_dev->dev, 774 "error updating time on controller\n"); 775 776 schedule_delayed_work(&ctrl_info->update_time_work, 777 PQI_UPDATE_TIME_WORK_INTERVAL); 778 } 779 780 static inline void pqi_schedule_update_time_worker( 781 struct pqi_ctrl_info *ctrl_info) 782 { 783 schedule_delayed_work(&ctrl_info->update_time_work, 0); 784 } 785 786 static inline void pqi_cancel_update_time_worker( 787 struct pqi_ctrl_info *ctrl_info) 788 { 789 cancel_delayed_work_sync(&ctrl_info->update_time_work); 790 } 791 792 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 793 void *buffer, size_t buffer_length) 794 { 795 int rc; 796 int pci_direction; 797 struct pqi_raid_path_request request; 798 799 rc = pqi_build_raid_path_request(ctrl_info, &request, 800 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction); 801 if (rc) 802 return rc; 803 804 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 805 NULL, NO_TIMEOUT); 806 807 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 808 pci_direction); 809 810 return rc; 811 } 812 813 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 814 void **buffer) 815 { 816 int rc; 817 size_t lun_list_length; 818 size_t lun_data_length; 819 size_t new_lun_list_length; 820 void *lun_data = NULL; 821 struct report_lun_header *report_lun_header; 822 823 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 824 if (!report_lun_header) { 825 rc = -ENOMEM; 826 goto out; 827 } 828 829 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 830 sizeof(*report_lun_header)); 831 if (rc) 832 goto out; 833 834 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 835 836 again: 837 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 838 839 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 840 if (!lun_data) { 841 rc = -ENOMEM; 842 goto out; 843 } 844 845 if (lun_list_length == 0) { 846 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 847 goto out; 848 } 849 850 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 851 if (rc) 852 goto out; 853 854 new_lun_list_length = get_unaligned_be32( 855 &((struct report_lun_header *)lun_data)->list_length); 856 857 if (new_lun_list_length > lun_list_length) { 858 lun_list_length = new_lun_list_length; 859 kfree(lun_data); 860 goto again; 861 } 862 863 out: 864 kfree(report_lun_header); 865 866 if (rc) { 867 kfree(lun_data); 868 lun_data = NULL; 869 } 870 871 *buffer = lun_data; 872 873 return rc; 874 } 875 876 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 877 void **buffer) 878 { 879 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 880 buffer); 881 } 882 883 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 884 void **buffer) 885 { 886 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 887 } 888 889 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 890 struct report_phys_lun_extended **physdev_list, 891 struct report_log_lun_extended **logdev_list) 892 { 893 int rc; 894 size_t logdev_list_length; 895 size_t logdev_data_length; 896 struct report_log_lun_extended *internal_logdev_list; 897 struct report_log_lun_extended *logdev_data; 898 struct report_lun_header report_lun_header; 899 900 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 901 if (rc) 902 dev_err(&ctrl_info->pci_dev->dev, 903 "report physical LUNs failed\n"); 904 905 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 906 if (rc) 907 dev_err(&ctrl_info->pci_dev->dev, 908 "report logical LUNs failed\n"); 909 910 /* 911 * Tack the controller itself onto the end of the logical device list. 912 */ 913 914 logdev_data = *logdev_list; 915 916 if (logdev_data) { 917 logdev_list_length = 918 get_unaligned_be32(&logdev_data->header.list_length); 919 } else { 920 memset(&report_lun_header, 0, sizeof(report_lun_header)); 921 logdev_data = 922 (struct report_log_lun_extended *)&report_lun_header; 923 logdev_list_length = 0; 924 } 925 926 logdev_data_length = sizeof(struct report_lun_header) + 927 logdev_list_length; 928 929 internal_logdev_list = kmalloc(logdev_data_length + 930 sizeof(struct report_log_lun_extended), GFP_KERNEL); 931 if (!internal_logdev_list) { 932 kfree(*logdev_list); 933 *logdev_list = NULL; 934 return -ENOMEM; 935 } 936 937 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 938 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 939 sizeof(struct report_log_lun_extended_entry)); 940 put_unaligned_be32(logdev_list_length + 941 sizeof(struct report_log_lun_extended_entry), 942 &internal_logdev_list->header.list_length); 943 944 kfree(*logdev_list); 945 *logdev_list = internal_logdev_list; 946 947 return 0; 948 } 949 950 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 951 int bus, int target, int lun) 952 { 953 device->bus = bus; 954 device->target = target; 955 device->lun = lun; 956 } 957 958 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 959 { 960 u8 *scsi3addr; 961 u32 lunid; 962 int bus; 963 int target; 964 int lun; 965 966 scsi3addr = device->scsi3addr; 967 lunid = get_unaligned_le32(scsi3addr); 968 969 if (pqi_is_hba_lunid(scsi3addr)) { 970 /* The specified device is the controller. */ 971 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 972 device->target_lun_valid = true; 973 return; 974 } 975 976 if (pqi_is_logical_device(device)) { 977 if (device->is_external_raid_device) { 978 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 979 target = (lunid >> 16) & 0x3fff; 980 lun = lunid & 0xff; 981 } else { 982 bus = PQI_RAID_VOLUME_BUS; 983 target = 0; 984 lun = lunid & 0x3fff; 985 } 986 pqi_set_bus_target_lun(device, bus, target, lun); 987 device->target_lun_valid = true; 988 return; 989 } 990 991 /* 992 * Defer target and LUN assignment for non-controller physical devices 993 * because the SAS transport layer will make these assignments later. 994 */ 995 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 996 } 997 998 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 999 struct pqi_scsi_dev *device) 1000 { 1001 int rc; 1002 u8 raid_level; 1003 u8 *buffer; 1004 1005 raid_level = SA_RAID_UNKNOWN; 1006 1007 buffer = kmalloc(64, GFP_KERNEL); 1008 if (buffer) { 1009 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1010 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1011 if (rc == 0) { 1012 raid_level = buffer[8]; 1013 if (raid_level > SA_RAID_MAX) 1014 raid_level = SA_RAID_UNKNOWN; 1015 } 1016 kfree(buffer); 1017 } 1018 1019 device->raid_level = raid_level; 1020 } 1021 1022 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1023 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1024 { 1025 char *err_msg; 1026 u32 raid_map_size; 1027 u32 r5or6_blocks_per_row; 1028 unsigned int num_phys_disks; 1029 unsigned int num_raid_map_entries; 1030 1031 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1032 1033 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1034 err_msg = "RAID map too small"; 1035 goto bad_raid_map; 1036 } 1037 1038 if (raid_map_size > sizeof(*raid_map)) { 1039 err_msg = "RAID map too large"; 1040 goto bad_raid_map; 1041 } 1042 1043 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * 1044 (get_unaligned_le16(&raid_map->data_disks_per_row) + 1045 get_unaligned_le16(&raid_map->metadata_disks_per_row)); 1046 num_raid_map_entries = num_phys_disks * 1047 get_unaligned_le16(&raid_map->row_cnt); 1048 1049 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { 1050 err_msg = "invalid number of map entries in RAID map"; 1051 goto bad_raid_map; 1052 } 1053 1054 if (device->raid_level == SA_RAID_1) { 1055 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1056 err_msg = "invalid RAID-1 map"; 1057 goto bad_raid_map; 1058 } 1059 } else if (device->raid_level == SA_RAID_ADM) { 1060 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1061 err_msg = "invalid RAID-1(ADM) map"; 1062 goto bad_raid_map; 1063 } 1064 } else if ((device->raid_level == SA_RAID_5 || 1065 device->raid_level == SA_RAID_6) && 1066 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1067 /* RAID 50/60 */ 1068 r5or6_blocks_per_row = 1069 get_unaligned_le16(&raid_map->strip_size) * 1070 get_unaligned_le16(&raid_map->data_disks_per_row); 1071 if (r5or6_blocks_per_row == 0) { 1072 err_msg = "invalid RAID-5 or RAID-6 map"; 1073 goto bad_raid_map; 1074 } 1075 } 1076 1077 return 0; 1078 1079 bad_raid_map: 1080 dev_warn(&ctrl_info->pci_dev->dev, 1081 "logical device %08x%08x %s\n", 1082 *((u32 *)&device->scsi3addr), 1083 *((u32 *)&device->scsi3addr[4]), err_msg); 1084 1085 return -EINVAL; 1086 } 1087 1088 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1089 struct pqi_scsi_dev *device) 1090 { 1091 int rc; 1092 int pci_direction; 1093 struct pqi_raid_path_request request; 1094 struct raid_map *raid_map; 1095 1096 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1097 if (!raid_map) 1098 return -ENOMEM; 1099 1100 rc = pqi_build_raid_path_request(ctrl_info, &request, 1101 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1102 sizeof(*raid_map), 0, &pci_direction); 1103 if (rc) 1104 goto error; 1105 1106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1107 NULL, NO_TIMEOUT); 1108 1109 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 1110 pci_direction); 1111 1112 if (rc) 1113 goto error; 1114 1115 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1116 if (rc) 1117 goto error; 1118 1119 device->raid_map = raid_map; 1120 1121 return 0; 1122 1123 error: 1124 kfree(raid_map); 1125 1126 return rc; 1127 } 1128 1129 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1130 struct pqi_scsi_dev *device) 1131 { 1132 int rc; 1133 u8 *buffer; 1134 u8 bypass_status; 1135 1136 buffer = kmalloc(64, GFP_KERNEL); 1137 if (!buffer) 1138 return; 1139 1140 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1141 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1142 if (rc) 1143 goto out; 1144 1145 #define RAID_BYPASS_STATUS 4 1146 #define RAID_BYPASS_CONFIGURED 0x1 1147 #define RAID_BYPASS_ENABLED 0x2 1148 1149 bypass_status = buffer[RAID_BYPASS_STATUS]; 1150 device->raid_bypass_configured = 1151 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1152 if (device->raid_bypass_configured && 1153 (bypass_status & RAID_BYPASS_ENABLED) && 1154 pqi_get_raid_map(ctrl_info, device) == 0) 1155 device->raid_bypass_enabled = true; 1156 1157 out: 1158 kfree(buffer); 1159 } 1160 1161 /* 1162 * Use vendor-specific VPD to determine online/offline status of a volume. 1163 */ 1164 1165 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1166 struct pqi_scsi_dev *device) 1167 { 1168 int rc; 1169 size_t page_length; 1170 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1171 bool volume_offline = true; 1172 u32 volume_flags; 1173 struct ciss_vpd_logical_volume_status *vpd; 1174 1175 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1176 if (!vpd) 1177 goto no_buffer; 1178 1179 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1180 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1181 if (rc) 1182 goto out; 1183 1184 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1185 volume_status) + vpd->page_length; 1186 if (page_length < sizeof(*vpd)) 1187 goto out; 1188 1189 volume_status = vpd->volume_status; 1190 volume_flags = get_unaligned_be32(&vpd->flags); 1191 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1192 1193 out: 1194 kfree(vpd); 1195 no_buffer: 1196 device->volume_status = volume_status; 1197 device->volume_offline = volume_offline; 1198 } 1199 1200 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1201 struct pqi_scsi_dev *device) 1202 { 1203 int rc; 1204 u8 *buffer; 1205 1206 buffer = kmalloc(64, GFP_KERNEL); 1207 if (!buffer) 1208 return -ENOMEM; 1209 1210 /* Send an inquiry to the device to see what it is. */ 1211 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1212 if (rc) 1213 goto out; 1214 1215 scsi_sanitize_inquiry_string(&buffer[8], 8); 1216 scsi_sanitize_inquiry_string(&buffer[16], 16); 1217 1218 device->devtype = buffer[0] & 0x1f; 1219 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1220 memcpy(device->model, &buffer[16], sizeof(device->model)); 1221 1222 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1223 if (device->is_external_raid_device) { 1224 device->raid_level = SA_RAID_UNKNOWN; 1225 device->volume_status = CISS_LV_OK; 1226 device->volume_offline = false; 1227 } else { 1228 pqi_get_raid_level(ctrl_info, device); 1229 pqi_get_raid_bypass_status(ctrl_info, device); 1230 pqi_get_volume_status(ctrl_info, device); 1231 } 1232 } 1233 1234 out: 1235 kfree(buffer); 1236 1237 return rc; 1238 } 1239 1240 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1241 struct pqi_scsi_dev *device, 1242 struct bmic_identify_physical_device *id_phys) 1243 { 1244 int rc; 1245 1246 memset(id_phys, 0, sizeof(*id_phys)); 1247 1248 rc = pqi_identify_physical_device(ctrl_info, device, 1249 id_phys, sizeof(*id_phys)); 1250 if (rc) { 1251 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1252 return; 1253 } 1254 1255 device->queue_depth = 1256 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1257 device->device_type = id_phys->device_type; 1258 device->active_path_index = id_phys->active_path_number; 1259 device->path_map = id_phys->redundant_path_present_map; 1260 memcpy(&device->box, 1261 &id_phys->alternate_paths_phys_box_on_port, 1262 sizeof(device->box)); 1263 memcpy(&device->phys_connector, 1264 &id_phys->alternate_paths_phys_connector, 1265 sizeof(device->phys_connector)); 1266 device->bay = id_phys->phys_bay_in_box; 1267 } 1268 1269 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1270 struct pqi_scsi_dev *device) 1271 { 1272 char *status; 1273 static const char unknown_state_str[] = 1274 "Volume is in an unknown state (%u)"; 1275 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1276 1277 switch (device->volume_status) { 1278 case CISS_LV_OK: 1279 status = "Volume online"; 1280 break; 1281 case CISS_LV_FAILED: 1282 status = "Volume failed"; 1283 break; 1284 case CISS_LV_NOT_CONFIGURED: 1285 status = "Volume not configured"; 1286 break; 1287 case CISS_LV_DEGRADED: 1288 status = "Volume degraded"; 1289 break; 1290 case CISS_LV_READY_FOR_RECOVERY: 1291 status = "Volume ready for recovery operation"; 1292 break; 1293 case CISS_LV_UNDERGOING_RECOVERY: 1294 status = "Volume undergoing recovery"; 1295 break; 1296 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1297 status = "Wrong physical drive was replaced"; 1298 break; 1299 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1300 status = "A physical drive not properly connected"; 1301 break; 1302 case CISS_LV_HARDWARE_OVERHEATING: 1303 status = "Hardware is overheating"; 1304 break; 1305 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1306 status = "Hardware has overheated"; 1307 break; 1308 case CISS_LV_UNDERGOING_EXPANSION: 1309 status = "Volume undergoing expansion"; 1310 break; 1311 case CISS_LV_NOT_AVAILABLE: 1312 status = "Volume waiting for transforming volume"; 1313 break; 1314 case CISS_LV_QUEUED_FOR_EXPANSION: 1315 status = "Volume queued for expansion"; 1316 break; 1317 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1318 status = "Volume disabled due to SCSI ID conflict"; 1319 break; 1320 case CISS_LV_EJECTED: 1321 status = "Volume has been ejected"; 1322 break; 1323 case CISS_LV_UNDERGOING_ERASE: 1324 status = "Volume undergoing background erase"; 1325 break; 1326 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1327 status = "Volume ready for predictive spare rebuild"; 1328 break; 1329 case CISS_LV_UNDERGOING_RPI: 1330 status = "Volume undergoing rapid parity initialization"; 1331 break; 1332 case CISS_LV_PENDING_RPI: 1333 status = "Volume queued for rapid parity initialization"; 1334 break; 1335 case CISS_LV_ENCRYPTED_NO_KEY: 1336 status = "Encrypted volume inaccessible - key not present"; 1337 break; 1338 case CISS_LV_UNDERGOING_ENCRYPTION: 1339 status = "Volume undergoing encryption process"; 1340 break; 1341 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1342 status = "Volume undergoing encryption re-keying process"; 1343 break; 1344 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1345 status = "Volume encrypted but encryption is disabled"; 1346 break; 1347 case CISS_LV_PENDING_ENCRYPTION: 1348 status = "Volume pending migration to encrypted state"; 1349 break; 1350 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1351 status = "Volume pending encryption rekeying"; 1352 break; 1353 case CISS_LV_NOT_SUPPORTED: 1354 status = "Volume not supported on this controller"; 1355 break; 1356 case CISS_LV_STATUS_UNAVAILABLE: 1357 status = "Volume status not available"; 1358 break; 1359 default: 1360 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1361 unknown_state_str, device->volume_status); 1362 status = unknown_state_buffer; 1363 break; 1364 } 1365 1366 dev_info(&ctrl_info->pci_dev->dev, 1367 "scsi %d:%d:%d:%d %s\n", 1368 ctrl_info->scsi_host->host_no, 1369 device->bus, device->target, device->lun, status); 1370 } 1371 1372 static void pqi_rescan_worker(struct work_struct *work) 1373 { 1374 struct pqi_ctrl_info *ctrl_info; 1375 1376 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1377 rescan_work); 1378 1379 pqi_scan_scsi_devices(ctrl_info); 1380 } 1381 1382 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1383 struct pqi_scsi_dev *device) 1384 { 1385 int rc; 1386 1387 if (pqi_is_logical_device(device)) 1388 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1389 device->target, device->lun); 1390 else 1391 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1392 1393 return rc; 1394 } 1395 1396 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1397 struct pqi_scsi_dev *device) 1398 { 1399 if (pqi_is_logical_device(device)) 1400 scsi_remove_device(device->sdev); 1401 else 1402 pqi_remove_sas_device(device); 1403 } 1404 1405 /* Assumes the SCSI device list lock is held. */ 1406 1407 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1408 int bus, int target, int lun) 1409 { 1410 struct pqi_scsi_dev *device; 1411 1412 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1413 scsi_device_list_entry) 1414 if (device->bus == bus && device->target == target && 1415 device->lun == lun) 1416 return device; 1417 1418 return NULL; 1419 } 1420 1421 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1422 struct pqi_scsi_dev *dev2) 1423 { 1424 if (dev1->is_physical_device != dev2->is_physical_device) 1425 return false; 1426 1427 if (dev1->is_physical_device) 1428 return dev1->wwid == dev2->wwid; 1429 1430 return memcmp(dev1->volume_id, dev2->volume_id, 1431 sizeof(dev1->volume_id)) == 0; 1432 } 1433 1434 enum pqi_find_result { 1435 DEVICE_NOT_FOUND, 1436 DEVICE_CHANGED, 1437 DEVICE_SAME, 1438 }; 1439 1440 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1441 struct pqi_scsi_dev *device_to_find, 1442 struct pqi_scsi_dev **matching_device) 1443 { 1444 struct pqi_scsi_dev *device; 1445 1446 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1447 scsi_device_list_entry) { 1448 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1449 device->scsi3addr)) { 1450 *matching_device = device; 1451 if (pqi_device_equal(device_to_find, device)) { 1452 if (device_to_find->volume_offline) 1453 return DEVICE_CHANGED; 1454 return DEVICE_SAME; 1455 } 1456 return DEVICE_CHANGED; 1457 } 1458 } 1459 1460 return DEVICE_NOT_FOUND; 1461 } 1462 1463 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1464 1465 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1466 char *action, struct pqi_scsi_dev *device) 1467 { 1468 ssize_t count; 1469 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1470 1471 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1472 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1473 1474 if (device->target_lun_valid) 1475 count += snprintf(buffer + count, 1476 PQI_DEV_INFO_BUFFER_LENGTH - count, 1477 "%d:%d", 1478 device->target, 1479 device->lun); 1480 else 1481 count += snprintf(buffer + count, 1482 PQI_DEV_INFO_BUFFER_LENGTH - count, 1483 "-:-"); 1484 1485 if (pqi_is_logical_device(device)) 1486 count += snprintf(buffer + count, 1487 PQI_DEV_INFO_BUFFER_LENGTH - count, 1488 " %08x%08x", 1489 *((u32 *)&device->scsi3addr), 1490 *((u32 *)&device->scsi3addr[4])); 1491 else 1492 count += snprintf(buffer + count, 1493 PQI_DEV_INFO_BUFFER_LENGTH - count, 1494 " %016llx", device->sas_address); 1495 1496 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1497 " %s %.8s %.16s ", 1498 scsi_device_type(device->devtype), 1499 device->vendor, 1500 device->model); 1501 1502 if (pqi_is_logical_device(device)) { 1503 if (device->devtype == TYPE_DISK) 1504 count += snprintf(buffer + count, 1505 PQI_DEV_INFO_BUFFER_LENGTH - count, 1506 "SSDSmartPathCap%c En%c %-12s", 1507 device->raid_bypass_configured ? '+' : '-', 1508 device->raid_bypass_enabled ? '+' : '-', 1509 pqi_raid_level_to_string(device->raid_level)); 1510 } else { 1511 count += snprintf(buffer + count, 1512 PQI_DEV_INFO_BUFFER_LENGTH - count, 1513 "AIO%c", device->aio_enabled ? '+' : '-'); 1514 if (device->devtype == TYPE_DISK || 1515 device->devtype == TYPE_ZBC) 1516 count += snprintf(buffer + count, 1517 PQI_DEV_INFO_BUFFER_LENGTH - count, 1518 " qd=%-6d", device->queue_depth); 1519 } 1520 1521 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1522 } 1523 1524 /* Assumes the SCSI device list lock is held. */ 1525 1526 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1527 struct pqi_scsi_dev *new_device) 1528 { 1529 existing_device->devtype = new_device->devtype; 1530 existing_device->device_type = new_device->device_type; 1531 existing_device->bus = new_device->bus; 1532 if (new_device->target_lun_valid) { 1533 existing_device->target = new_device->target; 1534 existing_device->lun = new_device->lun; 1535 existing_device->target_lun_valid = true; 1536 } 1537 1538 /* By definition, the scsi3addr and wwid fields are already the same. */ 1539 1540 existing_device->is_physical_device = new_device->is_physical_device; 1541 existing_device->is_external_raid_device = 1542 new_device->is_external_raid_device; 1543 existing_device->aio_enabled = new_device->aio_enabled; 1544 memcpy(existing_device->vendor, new_device->vendor, 1545 sizeof(existing_device->vendor)); 1546 memcpy(existing_device->model, new_device->model, 1547 sizeof(existing_device->model)); 1548 existing_device->sas_address = new_device->sas_address; 1549 existing_device->raid_level = new_device->raid_level; 1550 existing_device->queue_depth = new_device->queue_depth; 1551 existing_device->aio_handle = new_device->aio_handle; 1552 existing_device->volume_status = new_device->volume_status; 1553 existing_device->active_path_index = new_device->active_path_index; 1554 existing_device->path_map = new_device->path_map; 1555 existing_device->bay = new_device->bay; 1556 memcpy(existing_device->box, new_device->box, 1557 sizeof(existing_device->box)); 1558 memcpy(existing_device->phys_connector, new_device->phys_connector, 1559 sizeof(existing_device->phys_connector)); 1560 existing_device->offload_to_mirror = 0; 1561 kfree(existing_device->raid_map); 1562 existing_device->raid_map = new_device->raid_map; 1563 existing_device->raid_bypass_configured = 1564 new_device->raid_bypass_configured; 1565 existing_device->raid_bypass_enabled = 1566 new_device->raid_bypass_enabled; 1567 1568 /* To prevent this from being freed later. */ 1569 new_device->raid_map = NULL; 1570 } 1571 1572 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1573 { 1574 if (device) { 1575 kfree(device->raid_map); 1576 kfree(device); 1577 } 1578 } 1579 1580 /* 1581 * Called when exposing a new device to the OS fails in order to re-adjust 1582 * our internal SCSI device list to match the SCSI ML's view. 1583 */ 1584 1585 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1586 struct pqi_scsi_dev *device) 1587 { 1588 unsigned long flags; 1589 1590 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1591 list_del(&device->scsi_device_list_entry); 1592 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1593 1594 /* Allow the device structure to be freed later. */ 1595 device->keep_device = false; 1596 } 1597 1598 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1599 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1600 { 1601 int rc; 1602 unsigned int i; 1603 unsigned long flags; 1604 enum pqi_find_result find_result; 1605 struct pqi_scsi_dev *device; 1606 struct pqi_scsi_dev *next; 1607 struct pqi_scsi_dev *matching_device; 1608 LIST_HEAD(add_list); 1609 LIST_HEAD(delete_list); 1610 1611 /* 1612 * The idea here is to do as little work as possible while holding the 1613 * spinlock. That's why we go to great pains to defer anything other 1614 * than updating the internal device list until after we release the 1615 * spinlock. 1616 */ 1617 1618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1619 1620 /* Assume that all devices in the existing list have gone away. */ 1621 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1622 scsi_device_list_entry) 1623 device->device_gone = true; 1624 1625 for (i = 0; i < num_new_devices; i++) { 1626 device = new_device_list[i]; 1627 1628 find_result = pqi_scsi_find_entry(ctrl_info, device, 1629 &matching_device); 1630 1631 switch (find_result) { 1632 case DEVICE_SAME: 1633 /* 1634 * The newly found device is already in the existing 1635 * device list. 1636 */ 1637 device->new_device = false; 1638 matching_device->device_gone = false; 1639 pqi_scsi_update_device(matching_device, device); 1640 break; 1641 case DEVICE_NOT_FOUND: 1642 /* 1643 * The newly found device is NOT in the existing device 1644 * list. 1645 */ 1646 device->new_device = true; 1647 break; 1648 case DEVICE_CHANGED: 1649 /* 1650 * The original device has gone away and we need to add 1651 * the new device. 1652 */ 1653 device->new_device = true; 1654 break; 1655 } 1656 } 1657 1658 /* Process all devices that have gone away. */ 1659 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1660 scsi_device_list_entry) { 1661 if (device->device_gone) { 1662 list_del(&device->scsi_device_list_entry); 1663 list_add_tail(&device->delete_list_entry, &delete_list); 1664 } 1665 } 1666 1667 /* Process all new devices. */ 1668 for (i = 0; i < num_new_devices; i++) { 1669 device = new_device_list[i]; 1670 if (!device->new_device) 1671 continue; 1672 if (device->volume_offline) 1673 continue; 1674 list_add_tail(&device->scsi_device_list_entry, 1675 &ctrl_info->scsi_device_list); 1676 list_add_tail(&device->add_list_entry, &add_list); 1677 /* To prevent this device structure from being freed later. */ 1678 device->keep_device = true; 1679 } 1680 1681 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1682 1683 /* Remove all devices that have gone away. */ 1684 list_for_each_entry_safe(device, next, &delete_list, 1685 delete_list_entry) { 1686 if (device->volume_offline) { 1687 pqi_dev_info(ctrl_info, "offline", device); 1688 pqi_show_volume_status(ctrl_info, device); 1689 } else { 1690 pqi_dev_info(ctrl_info, "removed", device); 1691 } 1692 if (device->sdev) 1693 pqi_remove_device(ctrl_info, device); 1694 list_del(&device->delete_list_entry); 1695 pqi_free_device(device); 1696 } 1697 1698 /* 1699 * Notify the SCSI ML if the queue depth of any existing device has 1700 * changed. 1701 */ 1702 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1703 scsi_device_list_entry) { 1704 if (device->sdev && device->queue_depth != 1705 device->advertised_queue_depth) { 1706 device->advertised_queue_depth = device->queue_depth; 1707 scsi_change_queue_depth(device->sdev, 1708 device->advertised_queue_depth); 1709 } 1710 } 1711 1712 /* Expose any new devices. */ 1713 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1714 if (!device->sdev) { 1715 pqi_dev_info(ctrl_info, "added", device); 1716 rc = pqi_add_device(ctrl_info, device); 1717 if (rc) { 1718 dev_warn(&ctrl_info->pci_dev->dev, 1719 "scsi %d:%d:%d:%d addition failed, device not added\n", 1720 ctrl_info->scsi_host->host_no, 1721 device->bus, device->target, 1722 device->lun); 1723 pqi_fixup_botched_add(ctrl_info, device); 1724 } 1725 } 1726 } 1727 } 1728 1729 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1730 { 1731 bool is_supported = false; 1732 1733 switch (device->devtype) { 1734 case TYPE_DISK: 1735 case TYPE_ZBC: 1736 case TYPE_TAPE: 1737 case TYPE_MEDIUM_CHANGER: 1738 case TYPE_ENCLOSURE: 1739 is_supported = true; 1740 break; 1741 case TYPE_RAID: 1742 /* 1743 * Only support the HBA controller itself as a RAID 1744 * controller. If it's a RAID controller other than 1745 * the HBA itself (an external RAID controller, for 1746 * example), we don't support it. 1747 */ 1748 if (pqi_is_hba_lunid(device->scsi3addr)) 1749 is_supported = true; 1750 break; 1751 } 1752 1753 return is_supported; 1754 } 1755 1756 static inline bool pqi_skip_device(u8 *scsi3addr) 1757 { 1758 /* Ignore all masked devices. */ 1759 if (MASKED_DEVICE(scsi3addr)) 1760 return true; 1761 1762 return false; 1763 } 1764 1765 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1766 { 1767 int i; 1768 int rc; 1769 LIST_HEAD(new_device_list_head); 1770 struct report_phys_lun_extended *physdev_list = NULL; 1771 struct report_log_lun_extended *logdev_list = NULL; 1772 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1773 struct report_log_lun_extended_entry *log_lun_ext_entry; 1774 struct bmic_identify_physical_device *id_phys = NULL; 1775 u32 num_physicals; 1776 u32 num_logicals; 1777 struct pqi_scsi_dev **new_device_list = NULL; 1778 struct pqi_scsi_dev *device; 1779 struct pqi_scsi_dev *next; 1780 unsigned int num_new_devices; 1781 unsigned int num_valid_devices; 1782 bool is_physical_device; 1783 u8 *scsi3addr; 1784 static char *out_of_memory_msg = 1785 "failed to allocate memory, device discovery stopped"; 1786 1787 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1788 if (rc) 1789 goto out; 1790 1791 if (physdev_list) 1792 num_physicals = 1793 get_unaligned_be32(&physdev_list->header.list_length) 1794 / sizeof(physdev_list->lun_entries[0]); 1795 else 1796 num_physicals = 0; 1797 1798 if (logdev_list) 1799 num_logicals = 1800 get_unaligned_be32(&logdev_list->header.list_length) 1801 / sizeof(logdev_list->lun_entries[0]); 1802 else 1803 num_logicals = 0; 1804 1805 if (num_physicals) { 1806 /* 1807 * We need this buffer for calls to pqi_get_physical_disk_info() 1808 * below. We allocate it here instead of inside 1809 * pqi_get_physical_disk_info() because it's a fairly large 1810 * buffer. 1811 */ 1812 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 1813 if (!id_phys) { 1814 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1815 out_of_memory_msg); 1816 rc = -ENOMEM; 1817 goto out; 1818 } 1819 } 1820 1821 num_new_devices = num_physicals + num_logicals; 1822 1823 new_device_list = kmalloc(sizeof(*new_device_list) * 1824 num_new_devices, GFP_KERNEL); 1825 if (!new_device_list) { 1826 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 1827 rc = -ENOMEM; 1828 goto out; 1829 } 1830 1831 for (i = 0; i < num_new_devices; i++) { 1832 device = kzalloc(sizeof(*device), GFP_KERNEL); 1833 if (!device) { 1834 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1835 out_of_memory_msg); 1836 rc = -ENOMEM; 1837 goto out; 1838 } 1839 list_add_tail(&device->new_device_list_entry, 1840 &new_device_list_head); 1841 } 1842 1843 device = NULL; 1844 num_valid_devices = 0; 1845 1846 for (i = 0; i < num_new_devices; i++) { 1847 1848 if (i < num_physicals) { 1849 is_physical_device = true; 1850 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 1851 log_lun_ext_entry = NULL; 1852 scsi3addr = phys_lun_ext_entry->lunid; 1853 } else { 1854 is_physical_device = false; 1855 phys_lun_ext_entry = NULL; 1856 log_lun_ext_entry = 1857 &logdev_list->lun_entries[i - num_physicals]; 1858 scsi3addr = log_lun_ext_entry->lunid; 1859 } 1860 1861 if (is_physical_device && pqi_skip_device(scsi3addr)) 1862 continue; 1863 1864 if (device) 1865 device = list_next_entry(device, new_device_list_entry); 1866 else 1867 device = list_first_entry(&new_device_list_head, 1868 struct pqi_scsi_dev, new_device_list_entry); 1869 1870 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1871 device->is_physical_device = is_physical_device; 1872 if (!is_physical_device) 1873 device->is_external_raid_device = 1874 pqi_is_external_raid_addr(scsi3addr); 1875 1876 /* Gather information about the device. */ 1877 rc = pqi_get_device_info(ctrl_info, device); 1878 if (rc == -ENOMEM) { 1879 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1880 out_of_memory_msg); 1881 goto out; 1882 } 1883 if (rc) { 1884 if (device->is_physical_device) 1885 dev_warn(&ctrl_info->pci_dev->dev, 1886 "obtaining device info failed, skipping physical device %016llx\n", 1887 get_unaligned_be64( 1888 &phys_lun_ext_entry->wwid)); 1889 else 1890 dev_warn(&ctrl_info->pci_dev->dev, 1891 "obtaining device info failed, skipping logical device %08x%08x\n", 1892 *((u32 *)&device->scsi3addr), 1893 *((u32 *)&device->scsi3addr[4])); 1894 rc = 0; 1895 continue; 1896 } 1897 1898 if (!pqi_is_supported_device(device)) 1899 continue; 1900 1901 pqi_assign_bus_target_lun(device); 1902 1903 if (device->is_physical_device) { 1904 device->wwid = phys_lun_ext_entry->wwid; 1905 if ((phys_lun_ext_entry->device_flags & 1906 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 1907 phys_lun_ext_entry->aio_handle) 1908 device->aio_enabled = true; 1909 } else { 1910 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 1911 sizeof(device->volume_id)); 1912 } 1913 1914 switch (device->devtype) { 1915 case TYPE_DISK: 1916 case TYPE_ZBC: 1917 case TYPE_ENCLOSURE: 1918 if (device->is_physical_device) { 1919 device->sas_address = 1920 get_unaligned_be64(&device->wwid); 1921 if (device->devtype == TYPE_DISK || 1922 device->devtype == TYPE_ZBC) { 1923 device->aio_handle = 1924 phys_lun_ext_entry->aio_handle; 1925 pqi_get_physical_disk_info(ctrl_info, 1926 device, id_phys); 1927 } 1928 } 1929 break; 1930 } 1931 1932 new_device_list[num_valid_devices++] = device; 1933 } 1934 1935 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 1936 1937 out: 1938 list_for_each_entry_safe(device, next, &new_device_list_head, 1939 new_device_list_entry) { 1940 if (device->keep_device) 1941 continue; 1942 list_del(&device->new_device_list_entry); 1943 pqi_free_device(device); 1944 } 1945 1946 kfree(new_device_list); 1947 kfree(physdev_list); 1948 kfree(logdev_list); 1949 kfree(id_phys); 1950 1951 return rc; 1952 } 1953 1954 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1955 { 1956 unsigned long flags; 1957 struct pqi_scsi_dev *device; 1958 1959 while (1) { 1960 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1961 1962 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 1963 struct pqi_scsi_dev, scsi_device_list_entry); 1964 if (device) 1965 list_del(&device->scsi_device_list_entry); 1966 1967 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 1968 flags); 1969 1970 if (!device) 1971 break; 1972 1973 if (device->sdev) 1974 pqi_remove_device(ctrl_info, device); 1975 pqi_free_device(device); 1976 } 1977 } 1978 1979 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1980 { 1981 int rc; 1982 1983 if (pqi_ctrl_offline(ctrl_info)) 1984 return -ENXIO; 1985 1986 mutex_lock(&ctrl_info->scan_mutex); 1987 1988 rc = pqi_update_scsi_devices(ctrl_info); 1989 if (rc) 1990 pqi_schedule_rescan_worker_delayed(ctrl_info); 1991 1992 mutex_unlock(&ctrl_info->scan_mutex); 1993 1994 return rc; 1995 } 1996 1997 static void pqi_scan_start(struct Scsi_Host *shost) 1998 { 1999 pqi_scan_scsi_devices(shost_to_hba(shost)); 2000 } 2001 2002 /* Returns TRUE if scan is finished. */ 2003 2004 static int pqi_scan_finished(struct Scsi_Host *shost, 2005 unsigned long elapsed_time) 2006 { 2007 struct pqi_ctrl_info *ctrl_info; 2008 2009 ctrl_info = shost_priv(shost); 2010 2011 return !mutex_is_locked(&ctrl_info->scan_mutex); 2012 } 2013 2014 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2015 { 2016 mutex_lock(&ctrl_info->scan_mutex); 2017 mutex_unlock(&ctrl_info->scan_mutex); 2018 } 2019 2020 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2021 { 2022 mutex_lock(&ctrl_info->lun_reset_mutex); 2023 mutex_unlock(&ctrl_info->lun_reset_mutex); 2024 } 2025 2026 static inline void pqi_set_encryption_info( 2027 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2028 u64 first_block) 2029 { 2030 u32 volume_blk_size; 2031 2032 /* 2033 * Set the encryption tweak values based on logical block address. 2034 * If the block size is 512, the tweak value is equal to the LBA. 2035 * For other block sizes, tweak value is (LBA * block size) / 512. 2036 */ 2037 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2038 if (volume_blk_size != 512) 2039 first_block = (first_block * volume_blk_size) / 512; 2040 2041 encryption_info->data_encryption_key_index = 2042 get_unaligned_le16(&raid_map->data_encryption_key_index); 2043 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2044 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2045 } 2046 2047 /* 2048 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2049 */ 2050 2051 #define PQI_RAID_BYPASS_INELIGIBLE 1 2052 2053 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2054 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2055 struct pqi_queue_group *queue_group) 2056 { 2057 struct raid_map *raid_map; 2058 bool is_write = false; 2059 u32 map_index; 2060 u64 first_block; 2061 u64 last_block; 2062 u32 block_cnt; 2063 u32 blocks_per_row; 2064 u64 first_row; 2065 u64 last_row; 2066 u32 first_row_offset; 2067 u32 last_row_offset; 2068 u32 first_column; 2069 u32 last_column; 2070 u64 r0_first_row; 2071 u64 r0_last_row; 2072 u32 r5or6_blocks_per_row; 2073 u64 r5or6_first_row; 2074 u64 r5or6_last_row; 2075 u32 r5or6_first_row_offset; 2076 u32 r5or6_last_row_offset; 2077 u32 r5or6_first_column; 2078 u32 r5or6_last_column; 2079 u16 data_disks_per_row; 2080 u32 total_disks_per_row; 2081 u16 layout_map_count; 2082 u32 stripesize; 2083 u16 strip_size; 2084 u32 first_group; 2085 u32 last_group; 2086 u32 current_group; 2087 u32 map_row; 2088 u32 aio_handle; 2089 u64 disk_block; 2090 u32 disk_block_cnt; 2091 u8 cdb[16]; 2092 u8 cdb_length; 2093 int offload_to_mirror; 2094 struct pqi_encryption_info *encryption_info_ptr; 2095 struct pqi_encryption_info encryption_info; 2096 #if BITS_PER_LONG == 32 2097 u64 tmpdiv; 2098 #endif 2099 2100 /* Check for valid opcode, get LBA and block count. */ 2101 switch (scmd->cmnd[0]) { 2102 case WRITE_6: 2103 is_write = true; 2104 /* fall through */ 2105 case READ_6: 2106 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2107 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2108 block_cnt = (u32)scmd->cmnd[4]; 2109 if (block_cnt == 0) 2110 block_cnt = 256; 2111 break; 2112 case WRITE_10: 2113 is_write = true; 2114 /* fall through */ 2115 case READ_10: 2116 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2117 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2118 break; 2119 case WRITE_12: 2120 is_write = true; 2121 /* fall through */ 2122 case READ_12: 2123 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2124 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2125 break; 2126 case WRITE_16: 2127 is_write = true; 2128 /* fall through */ 2129 case READ_16: 2130 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2131 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2132 break; 2133 default: 2134 /* Process via normal I/O path. */ 2135 return PQI_RAID_BYPASS_INELIGIBLE; 2136 } 2137 2138 /* Check for write to non-RAID-0. */ 2139 if (is_write && device->raid_level != SA_RAID_0) 2140 return PQI_RAID_BYPASS_INELIGIBLE; 2141 2142 if (unlikely(block_cnt == 0)) 2143 return PQI_RAID_BYPASS_INELIGIBLE; 2144 2145 last_block = first_block + block_cnt - 1; 2146 raid_map = device->raid_map; 2147 2148 /* Check for invalid block or wraparound. */ 2149 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2150 last_block < first_block) 2151 return PQI_RAID_BYPASS_INELIGIBLE; 2152 2153 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2154 strip_size = get_unaligned_le16(&raid_map->strip_size); 2155 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2156 2157 /* Calculate stripe information for the request. */ 2158 blocks_per_row = data_disks_per_row * strip_size; 2159 #if BITS_PER_LONG == 32 2160 tmpdiv = first_block; 2161 do_div(tmpdiv, blocks_per_row); 2162 first_row = tmpdiv; 2163 tmpdiv = last_block; 2164 do_div(tmpdiv, blocks_per_row); 2165 last_row = tmpdiv; 2166 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2167 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2168 tmpdiv = first_row_offset; 2169 do_div(tmpdiv, strip_size); 2170 first_column = tmpdiv; 2171 tmpdiv = last_row_offset; 2172 do_div(tmpdiv, strip_size); 2173 last_column = tmpdiv; 2174 #else 2175 first_row = first_block / blocks_per_row; 2176 last_row = last_block / blocks_per_row; 2177 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2178 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2179 first_column = first_row_offset / strip_size; 2180 last_column = last_row_offset / strip_size; 2181 #endif 2182 2183 /* If this isn't a single row/column then give to the controller. */ 2184 if (first_row != last_row || first_column != last_column) 2185 return PQI_RAID_BYPASS_INELIGIBLE; 2186 2187 /* Proceeding with driver mapping. */ 2188 total_disks_per_row = data_disks_per_row + 2189 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2190 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2191 get_unaligned_le16(&raid_map->row_cnt); 2192 map_index = (map_row * total_disks_per_row) + first_column; 2193 2194 /* RAID 1 */ 2195 if (device->raid_level == SA_RAID_1) { 2196 if (device->offload_to_mirror) 2197 map_index += data_disks_per_row; 2198 device->offload_to_mirror = !device->offload_to_mirror; 2199 } else if (device->raid_level == SA_RAID_ADM) { 2200 /* RAID ADM */ 2201 /* 2202 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2203 * divisible by 3. 2204 */ 2205 offload_to_mirror = device->offload_to_mirror; 2206 if (offload_to_mirror == 0) { 2207 /* use physical disk in the first mirrored group. */ 2208 map_index %= data_disks_per_row; 2209 } else { 2210 do { 2211 /* 2212 * Determine mirror group that map_index 2213 * indicates. 2214 */ 2215 current_group = map_index / data_disks_per_row; 2216 2217 if (offload_to_mirror != current_group) { 2218 if (current_group < 2219 layout_map_count - 1) { 2220 /* 2221 * Select raid index from 2222 * next group. 2223 */ 2224 map_index += data_disks_per_row; 2225 current_group++; 2226 } else { 2227 /* 2228 * Select raid index from first 2229 * group. 2230 */ 2231 map_index %= data_disks_per_row; 2232 current_group = 0; 2233 } 2234 } 2235 } while (offload_to_mirror != current_group); 2236 } 2237 2238 /* Set mirror group to use next time. */ 2239 offload_to_mirror = 2240 (offload_to_mirror >= layout_map_count - 1) ? 2241 0 : offload_to_mirror + 1; 2242 WARN_ON(offload_to_mirror >= layout_map_count); 2243 device->offload_to_mirror = offload_to_mirror; 2244 /* 2245 * Avoid direct use of device->offload_to_mirror within this 2246 * function since multiple threads might simultaneously 2247 * increment it beyond the range of device->layout_map_count -1. 2248 */ 2249 } else if ((device->raid_level == SA_RAID_5 || 2250 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2251 /* RAID 50/60 */ 2252 /* Verify first and last block are in same RAID group */ 2253 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2254 stripesize = r5or6_blocks_per_row * layout_map_count; 2255 #if BITS_PER_LONG == 32 2256 tmpdiv = first_block; 2257 first_group = do_div(tmpdiv, stripesize); 2258 tmpdiv = first_group; 2259 do_div(tmpdiv, r5or6_blocks_per_row); 2260 first_group = tmpdiv; 2261 tmpdiv = last_block; 2262 last_group = do_div(tmpdiv, stripesize); 2263 tmpdiv = last_group; 2264 do_div(tmpdiv, r5or6_blocks_per_row); 2265 last_group = tmpdiv; 2266 #else 2267 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2268 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2269 #endif 2270 if (first_group != last_group) 2271 return PQI_RAID_BYPASS_INELIGIBLE; 2272 2273 /* Verify request is in a single row of RAID 5/6 */ 2274 #if BITS_PER_LONG == 32 2275 tmpdiv = first_block; 2276 do_div(tmpdiv, stripesize); 2277 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2278 tmpdiv = last_block; 2279 do_div(tmpdiv, stripesize); 2280 r5or6_last_row = r0_last_row = tmpdiv; 2281 #else 2282 first_row = r5or6_first_row = r0_first_row = 2283 first_block / stripesize; 2284 r5or6_last_row = r0_last_row = last_block / stripesize; 2285 #endif 2286 if (r5or6_first_row != r5or6_last_row) 2287 return PQI_RAID_BYPASS_INELIGIBLE; 2288 2289 /* Verify request is in a single column */ 2290 #if BITS_PER_LONG == 32 2291 tmpdiv = first_block; 2292 first_row_offset = do_div(tmpdiv, stripesize); 2293 tmpdiv = first_row_offset; 2294 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2295 r5or6_first_row_offset = first_row_offset; 2296 tmpdiv = last_block; 2297 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2298 tmpdiv = r5or6_last_row_offset; 2299 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2300 tmpdiv = r5or6_first_row_offset; 2301 do_div(tmpdiv, strip_size); 2302 first_column = r5or6_first_column = tmpdiv; 2303 tmpdiv = r5or6_last_row_offset; 2304 do_div(tmpdiv, strip_size); 2305 r5or6_last_column = tmpdiv; 2306 #else 2307 first_row_offset = r5or6_first_row_offset = 2308 (u32)((first_block % stripesize) % 2309 r5or6_blocks_per_row); 2310 2311 r5or6_last_row_offset = 2312 (u32)((last_block % stripesize) % 2313 r5or6_blocks_per_row); 2314 2315 first_column = r5or6_first_row_offset / strip_size; 2316 r5or6_first_column = first_column; 2317 r5or6_last_column = r5or6_last_row_offset / strip_size; 2318 #endif 2319 if (r5or6_first_column != r5or6_last_column) 2320 return PQI_RAID_BYPASS_INELIGIBLE; 2321 2322 /* Request is eligible */ 2323 map_row = 2324 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2325 get_unaligned_le16(&raid_map->row_cnt); 2326 2327 map_index = (first_group * 2328 (get_unaligned_le16(&raid_map->row_cnt) * 2329 total_disks_per_row)) + 2330 (map_row * total_disks_per_row) + first_column; 2331 } 2332 2333 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 2334 return PQI_RAID_BYPASS_INELIGIBLE; 2335 2336 aio_handle = raid_map->disk_data[map_index].aio_handle; 2337 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2338 first_row * strip_size + 2339 (first_row_offset - first_column * strip_size); 2340 disk_block_cnt = block_cnt; 2341 2342 /* Handle differing logical/physical block sizes. */ 2343 if (raid_map->phys_blk_shift) { 2344 disk_block <<= raid_map->phys_blk_shift; 2345 disk_block_cnt <<= raid_map->phys_blk_shift; 2346 } 2347 2348 if (unlikely(disk_block_cnt > 0xffff)) 2349 return PQI_RAID_BYPASS_INELIGIBLE; 2350 2351 /* Build the new CDB for the physical disk I/O. */ 2352 if (disk_block > 0xffffffff) { 2353 cdb[0] = is_write ? WRITE_16 : READ_16; 2354 cdb[1] = 0; 2355 put_unaligned_be64(disk_block, &cdb[2]); 2356 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2357 cdb[14] = 0; 2358 cdb[15] = 0; 2359 cdb_length = 16; 2360 } else { 2361 cdb[0] = is_write ? WRITE_10 : READ_10; 2362 cdb[1] = 0; 2363 put_unaligned_be32((u32)disk_block, &cdb[2]); 2364 cdb[6] = 0; 2365 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2366 cdb[9] = 0; 2367 cdb_length = 10; 2368 } 2369 2370 if (get_unaligned_le16(&raid_map->flags) & 2371 RAID_MAP_ENCRYPTION_ENABLED) { 2372 pqi_set_encryption_info(&encryption_info, raid_map, 2373 first_block); 2374 encryption_info_ptr = &encryption_info; 2375 } else { 2376 encryption_info_ptr = NULL; 2377 } 2378 2379 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2380 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2381 } 2382 2383 #define PQI_STATUS_IDLE 0x0 2384 2385 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2386 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2387 2388 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2389 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2390 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2391 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2392 #define PQI_DEVICE_STATE_ERROR 0x4 2393 2394 #define PQI_MODE_READY_TIMEOUT_SECS 30 2395 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2396 2397 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2398 { 2399 struct pqi_device_registers __iomem *pqi_registers; 2400 unsigned long timeout; 2401 u64 signature; 2402 u8 status; 2403 2404 pqi_registers = ctrl_info->pqi_registers; 2405 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2406 2407 while (1) { 2408 signature = readq(&pqi_registers->signature); 2409 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2410 sizeof(signature)) == 0) 2411 break; 2412 if (time_after(jiffies, timeout)) { 2413 dev_err(&ctrl_info->pci_dev->dev, 2414 "timed out waiting for PQI signature\n"); 2415 return -ETIMEDOUT; 2416 } 2417 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2418 } 2419 2420 while (1) { 2421 status = readb(&pqi_registers->function_and_status_code); 2422 if (status == PQI_STATUS_IDLE) 2423 break; 2424 if (time_after(jiffies, timeout)) { 2425 dev_err(&ctrl_info->pci_dev->dev, 2426 "timed out waiting for PQI IDLE\n"); 2427 return -ETIMEDOUT; 2428 } 2429 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2430 } 2431 2432 while (1) { 2433 if (readl(&pqi_registers->device_status) == 2434 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2435 break; 2436 if (time_after(jiffies, timeout)) { 2437 dev_err(&ctrl_info->pci_dev->dev, 2438 "timed out waiting for PQI all registers ready\n"); 2439 return -ETIMEDOUT; 2440 } 2441 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2442 } 2443 2444 return 0; 2445 } 2446 2447 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2448 { 2449 struct pqi_scsi_dev *device; 2450 2451 device = io_request->scmd->device->hostdata; 2452 device->raid_bypass_enabled = false; 2453 device->aio_enabled = false; 2454 } 2455 2456 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2457 { 2458 struct pqi_ctrl_info *ctrl_info; 2459 struct pqi_scsi_dev *device; 2460 2461 device = sdev->hostdata; 2462 if (device->device_offline) 2463 return; 2464 2465 device->device_offline = true; 2466 scsi_device_set_state(sdev, SDEV_OFFLINE); 2467 ctrl_info = shost_to_hba(sdev->host); 2468 pqi_schedule_rescan_worker(ctrl_info); 2469 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2470 path, ctrl_info->scsi_host->host_no, device->bus, 2471 device->target, device->lun); 2472 } 2473 2474 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2475 { 2476 u8 scsi_status; 2477 u8 host_byte; 2478 struct scsi_cmnd *scmd; 2479 struct pqi_raid_error_info *error_info; 2480 size_t sense_data_length; 2481 int residual_count; 2482 int xfer_count; 2483 struct scsi_sense_hdr sshdr; 2484 2485 scmd = io_request->scmd; 2486 if (!scmd) 2487 return; 2488 2489 error_info = io_request->error_info; 2490 scsi_status = error_info->status; 2491 host_byte = DID_OK; 2492 2493 switch (error_info->data_out_result) { 2494 case PQI_DATA_IN_OUT_GOOD: 2495 break; 2496 case PQI_DATA_IN_OUT_UNDERFLOW: 2497 xfer_count = 2498 get_unaligned_le32(&error_info->data_out_transferred); 2499 residual_count = scsi_bufflen(scmd) - xfer_count; 2500 scsi_set_resid(scmd, residual_count); 2501 if (xfer_count < scmd->underflow) 2502 host_byte = DID_SOFT_ERROR; 2503 break; 2504 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2505 case PQI_DATA_IN_OUT_ABORTED: 2506 host_byte = DID_ABORT; 2507 break; 2508 case PQI_DATA_IN_OUT_TIMEOUT: 2509 host_byte = DID_TIME_OUT; 2510 break; 2511 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2512 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2513 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2514 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2515 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2516 case PQI_DATA_IN_OUT_ERROR: 2517 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2518 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2519 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2520 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2521 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2522 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2523 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2524 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2525 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2526 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2527 default: 2528 host_byte = DID_ERROR; 2529 break; 2530 } 2531 2532 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2533 if (sense_data_length == 0) 2534 sense_data_length = 2535 get_unaligned_le16(&error_info->response_data_length); 2536 if (sense_data_length) { 2537 if (sense_data_length > sizeof(error_info->data)) 2538 sense_data_length = sizeof(error_info->data); 2539 2540 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2541 scsi_normalize_sense(error_info->data, 2542 sense_data_length, &sshdr) && 2543 sshdr.sense_key == HARDWARE_ERROR && 2544 sshdr.asc == 0x3e && 2545 sshdr.ascq == 0x1) { 2546 pqi_take_device_offline(scmd->device, "RAID"); 2547 host_byte = DID_NO_CONNECT; 2548 } 2549 2550 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2551 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2552 memcpy(scmd->sense_buffer, error_info->data, 2553 sense_data_length); 2554 } 2555 2556 scmd->result = scsi_status; 2557 set_host_byte(scmd, host_byte); 2558 } 2559 2560 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2561 { 2562 u8 scsi_status; 2563 u8 host_byte; 2564 struct scsi_cmnd *scmd; 2565 struct pqi_aio_error_info *error_info; 2566 size_t sense_data_length; 2567 int residual_count; 2568 int xfer_count; 2569 bool device_offline; 2570 2571 scmd = io_request->scmd; 2572 error_info = io_request->error_info; 2573 host_byte = DID_OK; 2574 sense_data_length = 0; 2575 device_offline = false; 2576 2577 switch (error_info->service_response) { 2578 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2579 scsi_status = error_info->status; 2580 break; 2581 case PQI_AIO_SERV_RESPONSE_FAILURE: 2582 switch (error_info->status) { 2583 case PQI_AIO_STATUS_IO_ABORTED: 2584 scsi_status = SAM_STAT_TASK_ABORTED; 2585 break; 2586 case PQI_AIO_STATUS_UNDERRUN: 2587 scsi_status = SAM_STAT_GOOD; 2588 residual_count = get_unaligned_le32( 2589 &error_info->residual_count); 2590 scsi_set_resid(scmd, residual_count); 2591 xfer_count = scsi_bufflen(scmd) - residual_count; 2592 if (xfer_count < scmd->underflow) 2593 host_byte = DID_SOFT_ERROR; 2594 break; 2595 case PQI_AIO_STATUS_OVERRUN: 2596 scsi_status = SAM_STAT_GOOD; 2597 break; 2598 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2599 pqi_aio_path_disabled(io_request); 2600 scsi_status = SAM_STAT_GOOD; 2601 io_request->status = -EAGAIN; 2602 break; 2603 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2604 case PQI_AIO_STATUS_INVALID_DEVICE: 2605 if (!io_request->raid_bypass) { 2606 device_offline = true; 2607 pqi_take_device_offline(scmd->device, "AIO"); 2608 host_byte = DID_NO_CONNECT; 2609 } 2610 scsi_status = SAM_STAT_CHECK_CONDITION; 2611 break; 2612 case PQI_AIO_STATUS_IO_ERROR: 2613 default: 2614 scsi_status = SAM_STAT_CHECK_CONDITION; 2615 break; 2616 } 2617 break; 2618 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2619 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2620 scsi_status = SAM_STAT_GOOD; 2621 break; 2622 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2623 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2624 default: 2625 scsi_status = SAM_STAT_CHECK_CONDITION; 2626 break; 2627 } 2628 2629 if (error_info->data_present) { 2630 sense_data_length = 2631 get_unaligned_le16(&error_info->data_length); 2632 if (sense_data_length) { 2633 if (sense_data_length > sizeof(error_info->data)) 2634 sense_data_length = sizeof(error_info->data); 2635 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2636 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2637 memcpy(scmd->sense_buffer, error_info->data, 2638 sense_data_length); 2639 } 2640 } 2641 2642 if (device_offline && sense_data_length == 0) 2643 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2644 0x3e, 0x1); 2645 2646 scmd->result = scsi_status; 2647 set_host_byte(scmd, host_byte); 2648 } 2649 2650 static void pqi_process_io_error(unsigned int iu_type, 2651 struct pqi_io_request *io_request) 2652 { 2653 switch (iu_type) { 2654 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2655 pqi_process_raid_io_error(io_request); 2656 break; 2657 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2658 pqi_process_aio_io_error(io_request); 2659 break; 2660 } 2661 } 2662 2663 static int pqi_interpret_task_management_response( 2664 struct pqi_task_management_response *response) 2665 { 2666 int rc; 2667 2668 switch (response->response_code) { 2669 case SOP_TMF_COMPLETE: 2670 case SOP_TMF_FUNCTION_SUCCEEDED: 2671 rc = 0; 2672 break; 2673 default: 2674 rc = -EIO; 2675 break; 2676 } 2677 2678 return rc; 2679 } 2680 2681 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2682 struct pqi_queue_group *queue_group) 2683 { 2684 unsigned int num_responses; 2685 pqi_index_t oq_pi; 2686 pqi_index_t oq_ci; 2687 struct pqi_io_request *io_request; 2688 struct pqi_io_response *response; 2689 u16 request_id; 2690 2691 num_responses = 0; 2692 oq_ci = queue_group->oq_ci_copy; 2693 2694 while (1) { 2695 oq_pi = *queue_group->oq_pi; 2696 if (oq_pi == oq_ci) 2697 break; 2698 2699 num_responses++; 2700 response = queue_group->oq_element_array + 2701 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2702 2703 request_id = get_unaligned_le16(&response->request_id); 2704 WARN_ON(request_id >= ctrl_info->max_io_slots); 2705 2706 io_request = &ctrl_info->io_request_pool[request_id]; 2707 WARN_ON(atomic_read(&io_request->refcount) == 0); 2708 2709 switch (response->header.iu_type) { 2710 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2711 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2712 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2713 break; 2714 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2715 io_request->status = 2716 pqi_interpret_task_management_response( 2717 (void *)response); 2718 break; 2719 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2720 pqi_aio_path_disabled(io_request); 2721 io_request->status = -EAGAIN; 2722 break; 2723 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2724 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2725 io_request->error_info = ctrl_info->error_buffer + 2726 (get_unaligned_le16(&response->error_index) * 2727 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2728 pqi_process_io_error(response->header.iu_type, 2729 io_request); 2730 break; 2731 default: 2732 dev_err(&ctrl_info->pci_dev->dev, 2733 "unexpected IU type: 0x%x\n", 2734 response->header.iu_type); 2735 break; 2736 } 2737 2738 io_request->io_complete_callback(io_request, 2739 io_request->context); 2740 2741 /* 2742 * Note that the I/O request structure CANNOT BE TOUCHED after 2743 * returning from the I/O completion callback! 2744 */ 2745 2746 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2747 } 2748 2749 if (num_responses) { 2750 queue_group->oq_ci_copy = oq_ci; 2751 writel(oq_ci, queue_group->oq_ci); 2752 } 2753 2754 return num_responses; 2755 } 2756 2757 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2758 unsigned int ci, unsigned int elements_in_queue) 2759 { 2760 unsigned int num_elements_used; 2761 2762 if (pi >= ci) 2763 num_elements_used = pi - ci; 2764 else 2765 num_elements_used = elements_in_queue - ci + pi; 2766 2767 return elements_in_queue - num_elements_used - 1; 2768 } 2769 2770 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 2771 struct pqi_event_acknowledge_request *iu, size_t iu_length) 2772 { 2773 pqi_index_t iq_pi; 2774 pqi_index_t iq_ci; 2775 unsigned long flags; 2776 void *next_element; 2777 struct pqi_queue_group *queue_group; 2778 2779 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 2780 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 2781 2782 while (1) { 2783 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 2784 2785 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 2786 iq_ci = *queue_group->iq_ci[RAID_PATH]; 2787 2788 if (pqi_num_elements_free(iq_pi, iq_ci, 2789 ctrl_info->num_elements_per_iq)) 2790 break; 2791 2792 spin_unlock_irqrestore( 2793 &queue_group->submit_lock[RAID_PATH], flags); 2794 2795 if (pqi_ctrl_offline(ctrl_info)) 2796 return; 2797 } 2798 2799 next_element = queue_group->iq_element_array[RAID_PATH] + 2800 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 2801 2802 memcpy(next_element, iu, iu_length); 2803 2804 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 2805 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 2806 2807 /* 2808 * This write notifies the controller that an IU is available to be 2809 * processed. 2810 */ 2811 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 2812 2813 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 2814 } 2815 2816 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 2817 struct pqi_event *event) 2818 { 2819 struct pqi_event_acknowledge_request request; 2820 2821 memset(&request, 0, sizeof(request)); 2822 2823 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 2824 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 2825 &request.header.iu_length); 2826 request.event_type = event->event_type; 2827 request.event_id = event->event_id; 2828 request.additional_event_id = event->additional_event_id; 2829 2830 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 2831 } 2832 2833 static void pqi_event_worker(struct work_struct *work) 2834 { 2835 unsigned int i; 2836 struct pqi_ctrl_info *ctrl_info; 2837 struct pqi_event *event; 2838 2839 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 2840 2841 pqi_ctrl_busy(ctrl_info); 2842 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 2843 if (pqi_ctrl_offline(ctrl_info)) 2844 goto out; 2845 2846 pqi_schedule_rescan_worker_delayed(ctrl_info); 2847 2848 event = ctrl_info->events; 2849 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 2850 if (event->pending) { 2851 event->pending = false; 2852 pqi_acknowledge_event(ctrl_info, event); 2853 } 2854 event++; 2855 } 2856 2857 out: 2858 pqi_ctrl_unbusy(ctrl_info); 2859 } 2860 2861 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2862 2863 static void pqi_heartbeat_timer_handler(struct timer_list *t) 2864 { 2865 int num_interrupts; 2866 u32 heartbeat_count; 2867 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 2868 heartbeat_timer); 2869 2870 pqi_check_ctrl_health(ctrl_info); 2871 if (pqi_ctrl_offline(ctrl_info)) 2872 return; 2873 2874 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 2875 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 2876 2877 if (num_interrupts == ctrl_info->previous_num_interrupts) { 2878 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 2879 dev_err(&ctrl_info->pci_dev->dev, 2880 "no heartbeat detected - last heartbeat count: %u\n", 2881 heartbeat_count); 2882 pqi_take_ctrl_offline(ctrl_info); 2883 return; 2884 } 2885 } else { 2886 ctrl_info->previous_num_interrupts = num_interrupts; 2887 } 2888 2889 ctrl_info->previous_heartbeat_count = heartbeat_count; 2890 mod_timer(&ctrl_info->heartbeat_timer, 2891 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 2892 } 2893 2894 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2895 { 2896 if (!ctrl_info->heartbeat_counter) 2897 return; 2898 2899 ctrl_info->previous_num_interrupts = 2900 atomic_read(&ctrl_info->num_interrupts); 2901 ctrl_info->previous_heartbeat_count = 2902 pqi_read_heartbeat_counter(ctrl_info); 2903 2904 ctrl_info->heartbeat_timer.expires = 2905 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2906 add_timer(&ctrl_info->heartbeat_timer); 2907 } 2908 2909 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2910 { 2911 del_timer_sync(&ctrl_info->heartbeat_timer); 2912 } 2913 2914 static inline int pqi_event_type_to_event_index(unsigned int event_type) 2915 { 2916 int index; 2917 2918 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 2919 if (event_type == pqi_supported_event_types[index]) 2920 return index; 2921 2922 return -1; 2923 } 2924 2925 static inline bool pqi_is_supported_event(unsigned int event_type) 2926 { 2927 return pqi_event_type_to_event_index(event_type) != -1; 2928 } 2929 2930 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 2931 { 2932 unsigned int num_events; 2933 pqi_index_t oq_pi; 2934 pqi_index_t oq_ci; 2935 struct pqi_event_queue *event_queue; 2936 struct pqi_event_response *response; 2937 struct pqi_event *event; 2938 int event_index; 2939 2940 event_queue = &ctrl_info->event_queue; 2941 num_events = 0; 2942 oq_ci = event_queue->oq_ci_copy; 2943 2944 while (1) { 2945 oq_pi = *event_queue->oq_pi; 2946 if (oq_pi == oq_ci) 2947 break; 2948 2949 num_events++; 2950 response = event_queue->oq_element_array + 2951 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 2952 2953 event_index = 2954 pqi_event_type_to_event_index(response->event_type); 2955 2956 if (event_index >= 0) { 2957 if (response->request_acknowlege) { 2958 event = &ctrl_info->events[event_index]; 2959 event->pending = true; 2960 event->event_type = response->event_type; 2961 event->event_id = response->event_id; 2962 event->additional_event_id = 2963 response->additional_event_id; 2964 } 2965 } 2966 2967 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 2968 } 2969 2970 if (num_events) { 2971 event_queue->oq_ci_copy = oq_ci; 2972 writel(oq_ci, event_queue->oq_ci); 2973 schedule_work(&ctrl_info->event_work); 2974 } 2975 2976 return num_events; 2977 } 2978 2979 #define PQI_LEGACY_INTX_MASK 0x1 2980 2981 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 2982 bool enable_intx) 2983 { 2984 u32 intx_mask; 2985 struct pqi_device_registers __iomem *pqi_registers; 2986 volatile void __iomem *register_addr; 2987 2988 pqi_registers = ctrl_info->pqi_registers; 2989 2990 if (enable_intx) 2991 register_addr = &pqi_registers->legacy_intx_mask_clear; 2992 else 2993 register_addr = &pqi_registers->legacy_intx_mask_set; 2994 2995 intx_mask = readl(register_addr); 2996 intx_mask |= PQI_LEGACY_INTX_MASK; 2997 writel(intx_mask, register_addr); 2998 } 2999 3000 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3001 enum pqi_irq_mode new_mode) 3002 { 3003 switch (ctrl_info->irq_mode) { 3004 case IRQ_MODE_MSIX: 3005 switch (new_mode) { 3006 case IRQ_MODE_MSIX: 3007 break; 3008 case IRQ_MODE_INTX: 3009 pqi_configure_legacy_intx(ctrl_info, true); 3010 sis_enable_intx(ctrl_info); 3011 break; 3012 case IRQ_MODE_NONE: 3013 break; 3014 } 3015 break; 3016 case IRQ_MODE_INTX: 3017 switch (new_mode) { 3018 case IRQ_MODE_MSIX: 3019 pqi_configure_legacy_intx(ctrl_info, false); 3020 sis_enable_msix(ctrl_info); 3021 break; 3022 case IRQ_MODE_INTX: 3023 break; 3024 case IRQ_MODE_NONE: 3025 pqi_configure_legacy_intx(ctrl_info, false); 3026 break; 3027 } 3028 break; 3029 case IRQ_MODE_NONE: 3030 switch (new_mode) { 3031 case IRQ_MODE_MSIX: 3032 sis_enable_msix(ctrl_info); 3033 break; 3034 case IRQ_MODE_INTX: 3035 pqi_configure_legacy_intx(ctrl_info, true); 3036 sis_enable_intx(ctrl_info); 3037 break; 3038 case IRQ_MODE_NONE: 3039 break; 3040 } 3041 break; 3042 } 3043 3044 ctrl_info->irq_mode = new_mode; 3045 } 3046 3047 #define PQI_LEGACY_INTX_PENDING 0x1 3048 3049 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3050 { 3051 bool valid_irq; 3052 u32 intx_status; 3053 3054 switch (ctrl_info->irq_mode) { 3055 case IRQ_MODE_MSIX: 3056 valid_irq = true; 3057 break; 3058 case IRQ_MODE_INTX: 3059 intx_status = 3060 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3061 if (intx_status & PQI_LEGACY_INTX_PENDING) 3062 valid_irq = true; 3063 else 3064 valid_irq = false; 3065 break; 3066 case IRQ_MODE_NONE: 3067 default: 3068 valid_irq = false; 3069 break; 3070 } 3071 3072 return valid_irq; 3073 } 3074 3075 static irqreturn_t pqi_irq_handler(int irq, void *data) 3076 { 3077 struct pqi_ctrl_info *ctrl_info; 3078 struct pqi_queue_group *queue_group; 3079 unsigned int num_responses_handled; 3080 3081 queue_group = data; 3082 ctrl_info = queue_group->ctrl_info; 3083 3084 if (!pqi_is_valid_irq(ctrl_info)) 3085 return IRQ_NONE; 3086 3087 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3088 3089 if (irq == ctrl_info->event_irq) 3090 num_responses_handled += pqi_process_event_intr(ctrl_info); 3091 3092 if (num_responses_handled) 3093 atomic_inc(&ctrl_info->num_interrupts); 3094 3095 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3096 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3097 3098 return IRQ_HANDLED; 3099 } 3100 3101 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3102 { 3103 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3104 int i; 3105 int rc; 3106 3107 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3108 3109 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3110 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3111 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3112 if (rc) { 3113 dev_err(&pci_dev->dev, 3114 "irq %u init failed with error %d\n", 3115 pci_irq_vector(pci_dev, i), rc); 3116 return rc; 3117 } 3118 ctrl_info->num_msix_vectors_initialized++; 3119 } 3120 3121 return 0; 3122 } 3123 3124 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3125 { 3126 int i; 3127 3128 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3129 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3130 &ctrl_info->queue_groups[i]); 3131 3132 ctrl_info->num_msix_vectors_initialized = 0; 3133 } 3134 3135 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3136 { 3137 int num_vectors_enabled; 3138 3139 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3140 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3141 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3142 if (num_vectors_enabled < 0) { 3143 dev_err(&ctrl_info->pci_dev->dev, 3144 "MSI-X init failed with error %d\n", 3145 num_vectors_enabled); 3146 return num_vectors_enabled; 3147 } 3148 3149 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3150 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3151 return 0; 3152 } 3153 3154 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3155 { 3156 if (ctrl_info->num_msix_vectors_enabled) { 3157 pci_free_irq_vectors(ctrl_info->pci_dev); 3158 ctrl_info->num_msix_vectors_enabled = 0; 3159 } 3160 } 3161 3162 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3163 { 3164 unsigned int i; 3165 size_t alloc_length; 3166 size_t element_array_length_per_iq; 3167 size_t element_array_length_per_oq; 3168 void *element_array; 3169 void *next_queue_index; 3170 void *aligned_pointer; 3171 unsigned int num_inbound_queues; 3172 unsigned int num_outbound_queues; 3173 unsigned int num_queue_indexes; 3174 struct pqi_queue_group *queue_group; 3175 3176 element_array_length_per_iq = 3177 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3178 ctrl_info->num_elements_per_iq; 3179 element_array_length_per_oq = 3180 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3181 ctrl_info->num_elements_per_oq; 3182 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3183 num_outbound_queues = ctrl_info->num_queue_groups; 3184 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3185 3186 aligned_pointer = NULL; 3187 3188 for (i = 0; i < num_inbound_queues; i++) { 3189 aligned_pointer = PTR_ALIGN(aligned_pointer, 3190 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3191 aligned_pointer += element_array_length_per_iq; 3192 } 3193 3194 for (i = 0; i < num_outbound_queues; i++) { 3195 aligned_pointer = PTR_ALIGN(aligned_pointer, 3196 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3197 aligned_pointer += element_array_length_per_oq; 3198 } 3199 3200 aligned_pointer = PTR_ALIGN(aligned_pointer, 3201 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3202 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3203 PQI_EVENT_OQ_ELEMENT_LENGTH; 3204 3205 for (i = 0; i < num_queue_indexes; i++) { 3206 aligned_pointer = PTR_ALIGN(aligned_pointer, 3207 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3208 aligned_pointer += sizeof(pqi_index_t); 3209 } 3210 3211 alloc_length = (size_t)aligned_pointer + 3212 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3213 3214 alloc_length += PQI_EXTRA_SGL_MEMORY; 3215 3216 ctrl_info->queue_memory_base = 3217 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3218 alloc_length, 3219 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3220 3221 if (!ctrl_info->queue_memory_base) 3222 return -ENOMEM; 3223 3224 ctrl_info->queue_memory_length = alloc_length; 3225 3226 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3227 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3228 3229 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3230 queue_group = &ctrl_info->queue_groups[i]; 3231 queue_group->iq_element_array[RAID_PATH] = element_array; 3232 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3233 ctrl_info->queue_memory_base_dma_handle + 3234 (element_array - ctrl_info->queue_memory_base); 3235 element_array += element_array_length_per_iq; 3236 element_array = PTR_ALIGN(element_array, 3237 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3238 queue_group->iq_element_array[AIO_PATH] = element_array; 3239 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3240 ctrl_info->queue_memory_base_dma_handle + 3241 (element_array - ctrl_info->queue_memory_base); 3242 element_array += element_array_length_per_iq; 3243 element_array = PTR_ALIGN(element_array, 3244 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3245 } 3246 3247 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3248 queue_group = &ctrl_info->queue_groups[i]; 3249 queue_group->oq_element_array = element_array; 3250 queue_group->oq_element_array_bus_addr = 3251 ctrl_info->queue_memory_base_dma_handle + 3252 (element_array - ctrl_info->queue_memory_base); 3253 element_array += element_array_length_per_oq; 3254 element_array = PTR_ALIGN(element_array, 3255 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3256 } 3257 3258 ctrl_info->event_queue.oq_element_array = element_array; 3259 ctrl_info->event_queue.oq_element_array_bus_addr = 3260 ctrl_info->queue_memory_base_dma_handle + 3261 (element_array - ctrl_info->queue_memory_base); 3262 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3263 PQI_EVENT_OQ_ELEMENT_LENGTH; 3264 3265 next_queue_index = PTR_ALIGN(element_array, 3266 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3267 3268 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3269 queue_group = &ctrl_info->queue_groups[i]; 3270 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3271 queue_group->iq_ci_bus_addr[RAID_PATH] = 3272 ctrl_info->queue_memory_base_dma_handle + 3273 (next_queue_index - ctrl_info->queue_memory_base); 3274 next_queue_index += sizeof(pqi_index_t); 3275 next_queue_index = PTR_ALIGN(next_queue_index, 3276 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3277 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3278 queue_group->iq_ci_bus_addr[AIO_PATH] = 3279 ctrl_info->queue_memory_base_dma_handle + 3280 (next_queue_index - ctrl_info->queue_memory_base); 3281 next_queue_index += sizeof(pqi_index_t); 3282 next_queue_index = PTR_ALIGN(next_queue_index, 3283 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3284 queue_group->oq_pi = next_queue_index; 3285 queue_group->oq_pi_bus_addr = 3286 ctrl_info->queue_memory_base_dma_handle + 3287 (next_queue_index - ctrl_info->queue_memory_base); 3288 next_queue_index += sizeof(pqi_index_t); 3289 next_queue_index = PTR_ALIGN(next_queue_index, 3290 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3291 } 3292 3293 ctrl_info->event_queue.oq_pi = next_queue_index; 3294 ctrl_info->event_queue.oq_pi_bus_addr = 3295 ctrl_info->queue_memory_base_dma_handle + 3296 (next_queue_index - ctrl_info->queue_memory_base); 3297 3298 return 0; 3299 } 3300 3301 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3302 { 3303 unsigned int i; 3304 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3305 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3306 3307 /* 3308 * Initialize the backpointers to the controller structure in 3309 * each operational queue group structure. 3310 */ 3311 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3312 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3313 3314 /* 3315 * Assign IDs to all operational queues. Note that the IDs 3316 * assigned to operational IQs are independent of the IDs 3317 * assigned to operational OQs. 3318 */ 3319 ctrl_info->event_queue.oq_id = next_oq_id++; 3320 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3321 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3322 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3323 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3324 } 3325 3326 /* 3327 * Assign MSI-X table entry indexes to all queues. Note that the 3328 * interrupt for the event queue is shared with the first queue group. 3329 */ 3330 ctrl_info->event_queue.int_msg_num = 0; 3331 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3332 ctrl_info->queue_groups[i].int_msg_num = i; 3333 3334 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3335 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3336 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3337 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3338 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3339 } 3340 } 3341 3342 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3343 { 3344 size_t alloc_length; 3345 struct pqi_admin_queues_aligned *admin_queues_aligned; 3346 struct pqi_admin_queues *admin_queues; 3347 3348 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3349 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3350 3351 ctrl_info->admin_queue_memory_base = 3352 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3353 alloc_length, 3354 &ctrl_info->admin_queue_memory_base_dma_handle, 3355 GFP_KERNEL); 3356 3357 if (!ctrl_info->admin_queue_memory_base) 3358 return -ENOMEM; 3359 3360 ctrl_info->admin_queue_memory_length = alloc_length; 3361 3362 admin_queues = &ctrl_info->admin_queues; 3363 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3364 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3365 admin_queues->iq_element_array = 3366 &admin_queues_aligned->iq_element_array; 3367 admin_queues->oq_element_array = 3368 &admin_queues_aligned->oq_element_array; 3369 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3370 admin_queues->oq_pi = &admin_queues_aligned->oq_pi; 3371 3372 admin_queues->iq_element_array_bus_addr = 3373 ctrl_info->admin_queue_memory_base_dma_handle + 3374 (admin_queues->iq_element_array - 3375 ctrl_info->admin_queue_memory_base); 3376 admin_queues->oq_element_array_bus_addr = 3377 ctrl_info->admin_queue_memory_base_dma_handle + 3378 (admin_queues->oq_element_array - 3379 ctrl_info->admin_queue_memory_base); 3380 admin_queues->iq_ci_bus_addr = 3381 ctrl_info->admin_queue_memory_base_dma_handle + 3382 ((void *)admin_queues->iq_ci - 3383 ctrl_info->admin_queue_memory_base); 3384 admin_queues->oq_pi_bus_addr = 3385 ctrl_info->admin_queue_memory_base_dma_handle + 3386 ((void *)admin_queues->oq_pi - 3387 ctrl_info->admin_queue_memory_base); 3388 3389 return 0; 3390 } 3391 3392 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3393 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3394 3395 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3396 { 3397 struct pqi_device_registers __iomem *pqi_registers; 3398 struct pqi_admin_queues *admin_queues; 3399 unsigned long timeout; 3400 u8 status; 3401 u32 reg; 3402 3403 pqi_registers = ctrl_info->pqi_registers; 3404 admin_queues = &ctrl_info->admin_queues; 3405 3406 writeq((u64)admin_queues->iq_element_array_bus_addr, 3407 &pqi_registers->admin_iq_element_array_addr); 3408 writeq((u64)admin_queues->oq_element_array_bus_addr, 3409 &pqi_registers->admin_oq_element_array_addr); 3410 writeq((u64)admin_queues->iq_ci_bus_addr, 3411 &pqi_registers->admin_iq_ci_addr); 3412 writeq((u64)admin_queues->oq_pi_bus_addr, 3413 &pqi_registers->admin_oq_pi_addr); 3414 3415 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3416 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3417 (admin_queues->int_msg_num << 16); 3418 writel(reg, &pqi_registers->admin_iq_num_elements); 3419 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3420 &pqi_registers->function_and_status_code); 3421 3422 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3423 while (1) { 3424 status = readb(&pqi_registers->function_and_status_code); 3425 if (status == PQI_STATUS_IDLE) 3426 break; 3427 if (time_after(jiffies, timeout)) 3428 return -ETIMEDOUT; 3429 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3430 } 3431 3432 /* 3433 * The offset registers are not initialized to the correct 3434 * offsets until *after* the create admin queue pair command 3435 * completes successfully. 3436 */ 3437 admin_queues->iq_pi = ctrl_info->iomem_base + 3438 PQI_DEVICE_REGISTERS_OFFSET + 3439 readq(&pqi_registers->admin_iq_pi_offset); 3440 admin_queues->oq_ci = ctrl_info->iomem_base + 3441 PQI_DEVICE_REGISTERS_OFFSET + 3442 readq(&pqi_registers->admin_oq_ci_offset); 3443 3444 return 0; 3445 } 3446 3447 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3448 struct pqi_general_admin_request *request) 3449 { 3450 struct pqi_admin_queues *admin_queues; 3451 void *next_element; 3452 pqi_index_t iq_pi; 3453 3454 admin_queues = &ctrl_info->admin_queues; 3455 iq_pi = admin_queues->iq_pi_copy; 3456 3457 next_element = admin_queues->iq_element_array + 3458 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3459 3460 memcpy(next_element, request, sizeof(*request)); 3461 3462 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3463 admin_queues->iq_pi_copy = iq_pi; 3464 3465 /* 3466 * This write notifies the controller that an IU is available to be 3467 * processed. 3468 */ 3469 writel(iq_pi, admin_queues->iq_pi); 3470 } 3471 3472 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3473 3474 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3475 struct pqi_general_admin_response *response) 3476 { 3477 struct pqi_admin_queues *admin_queues; 3478 pqi_index_t oq_pi; 3479 pqi_index_t oq_ci; 3480 unsigned long timeout; 3481 3482 admin_queues = &ctrl_info->admin_queues; 3483 oq_ci = admin_queues->oq_ci_copy; 3484 3485 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3486 3487 while (1) { 3488 oq_pi = *admin_queues->oq_pi; 3489 if (oq_pi != oq_ci) 3490 break; 3491 if (time_after(jiffies, timeout)) { 3492 dev_err(&ctrl_info->pci_dev->dev, 3493 "timed out waiting for admin response\n"); 3494 return -ETIMEDOUT; 3495 } 3496 if (!sis_is_firmware_running(ctrl_info)) 3497 return -ENXIO; 3498 usleep_range(1000, 2000); 3499 } 3500 3501 memcpy(response, admin_queues->oq_element_array + 3502 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3503 3504 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3505 admin_queues->oq_ci_copy = oq_ci; 3506 writel(oq_ci, admin_queues->oq_ci); 3507 3508 return 0; 3509 } 3510 3511 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3512 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3513 struct pqi_io_request *io_request) 3514 { 3515 struct pqi_io_request *next; 3516 void *next_element; 3517 pqi_index_t iq_pi; 3518 pqi_index_t iq_ci; 3519 size_t iu_length; 3520 unsigned long flags; 3521 unsigned int num_elements_needed; 3522 unsigned int num_elements_to_end_of_queue; 3523 size_t copy_count; 3524 struct pqi_iu_header *request; 3525 3526 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3527 3528 if (io_request) { 3529 io_request->queue_group = queue_group; 3530 list_add_tail(&io_request->request_list_entry, 3531 &queue_group->request_list[path]); 3532 } 3533 3534 iq_pi = queue_group->iq_pi_copy[path]; 3535 3536 list_for_each_entry_safe(io_request, next, 3537 &queue_group->request_list[path], request_list_entry) { 3538 3539 request = io_request->iu; 3540 3541 iu_length = get_unaligned_le16(&request->iu_length) + 3542 PQI_REQUEST_HEADER_LENGTH; 3543 num_elements_needed = 3544 DIV_ROUND_UP(iu_length, 3545 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3546 3547 iq_ci = *queue_group->iq_ci[path]; 3548 3549 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3550 ctrl_info->num_elements_per_iq)) 3551 break; 3552 3553 put_unaligned_le16(queue_group->oq_id, 3554 &request->response_queue_id); 3555 3556 next_element = queue_group->iq_element_array[path] + 3557 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3558 3559 num_elements_to_end_of_queue = 3560 ctrl_info->num_elements_per_iq - iq_pi; 3561 3562 if (num_elements_needed <= num_elements_to_end_of_queue) { 3563 memcpy(next_element, request, iu_length); 3564 } else { 3565 copy_count = num_elements_to_end_of_queue * 3566 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3567 memcpy(next_element, request, copy_count); 3568 memcpy(queue_group->iq_element_array[path], 3569 (u8 *)request + copy_count, 3570 iu_length - copy_count); 3571 } 3572 3573 iq_pi = (iq_pi + num_elements_needed) % 3574 ctrl_info->num_elements_per_iq; 3575 3576 list_del(&io_request->request_list_entry); 3577 } 3578 3579 if (iq_pi != queue_group->iq_pi_copy[path]) { 3580 queue_group->iq_pi_copy[path] = iq_pi; 3581 /* 3582 * This write notifies the controller that one or more IUs are 3583 * available to be processed. 3584 */ 3585 writel(iq_pi, queue_group->iq_pi[path]); 3586 } 3587 3588 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3589 } 3590 3591 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3592 3593 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3594 struct completion *wait) 3595 { 3596 int rc; 3597 3598 while (1) { 3599 if (wait_for_completion_io_timeout(wait, 3600 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3601 rc = 0; 3602 break; 3603 } 3604 3605 pqi_check_ctrl_health(ctrl_info); 3606 if (pqi_ctrl_offline(ctrl_info)) { 3607 rc = -ENXIO; 3608 break; 3609 } 3610 } 3611 3612 return rc; 3613 } 3614 3615 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3616 void *context) 3617 { 3618 struct completion *waiting = context; 3619 3620 complete(waiting); 3621 } 3622 3623 static int pqi_submit_raid_request_synchronous_with_io_request( 3624 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 3625 unsigned long timeout_msecs) 3626 { 3627 int rc = 0; 3628 DECLARE_COMPLETION_ONSTACK(wait); 3629 3630 io_request->io_complete_callback = pqi_raid_synchronous_complete; 3631 io_request->context = &wait; 3632 3633 pqi_start_io(ctrl_info, 3634 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 3635 io_request); 3636 3637 if (timeout_msecs == NO_TIMEOUT) { 3638 pqi_wait_for_completion_io(ctrl_info, &wait); 3639 } else { 3640 if (!wait_for_completion_io_timeout(&wait, 3641 msecs_to_jiffies(timeout_msecs))) { 3642 dev_warn(&ctrl_info->pci_dev->dev, 3643 "command timed out\n"); 3644 rc = -ETIMEDOUT; 3645 } 3646 } 3647 3648 return rc; 3649 } 3650 3651 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 3652 struct pqi_iu_header *request, unsigned int flags, 3653 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 3654 { 3655 int rc; 3656 struct pqi_io_request *io_request; 3657 unsigned long start_jiffies; 3658 unsigned long msecs_blocked; 3659 size_t iu_length; 3660 3661 /* 3662 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 3663 * are mutually exclusive. 3664 */ 3665 3666 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 3667 if (down_interruptible(&ctrl_info->sync_request_sem)) 3668 return -ERESTARTSYS; 3669 } else { 3670 if (timeout_msecs == NO_TIMEOUT) { 3671 down(&ctrl_info->sync_request_sem); 3672 } else { 3673 start_jiffies = jiffies; 3674 if (down_timeout(&ctrl_info->sync_request_sem, 3675 msecs_to_jiffies(timeout_msecs))) 3676 return -ETIMEDOUT; 3677 msecs_blocked = 3678 jiffies_to_msecs(jiffies - start_jiffies); 3679 if (msecs_blocked >= timeout_msecs) 3680 return -ETIMEDOUT; 3681 timeout_msecs -= msecs_blocked; 3682 } 3683 } 3684 3685 pqi_ctrl_busy(ctrl_info); 3686 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 3687 if (timeout_msecs == 0) { 3688 rc = -ETIMEDOUT; 3689 goto out; 3690 } 3691 3692 if (pqi_ctrl_offline(ctrl_info)) { 3693 rc = -ENXIO; 3694 goto out; 3695 } 3696 3697 io_request = pqi_alloc_io_request(ctrl_info); 3698 3699 put_unaligned_le16(io_request->index, 3700 &(((struct pqi_raid_path_request *)request)->request_id)); 3701 3702 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 3703 ((struct pqi_raid_path_request *)request)->error_index = 3704 ((struct pqi_raid_path_request *)request)->request_id; 3705 3706 iu_length = get_unaligned_le16(&request->iu_length) + 3707 PQI_REQUEST_HEADER_LENGTH; 3708 memcpy(io_request->iu, request, iu_length); 3709 3710 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, 3711 io_request, timeout_msecs); 3712 3713 if (error_info) { 3714 if (io_request->error_info) 3715 memcpy(error_info, io_request->error_info, 3716 sizeof(*error_info)); 3717 else 3718 memset(error_info, 0, sizeof(*error_info)); 3719 } else if (rc == 0 && io_request->error_info) { 3720 u8 scsi_status; 3721 struct pqi_raid_error_info *raid_error_info; 3722 3723 raid_error_info = io_request->error_info; 3724 scsi_status = raid_error_info->status; 3725 3726 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3727 raid_error_info->data_out_result == 3728 PQI_DATA_IN_OUT_UNDERFLOW) 3729 scsi_status = SAM_STAT_GOOD; 3730 3731 if (scsi_status != SAM_STAT_GOOD) 3732 rc = -EIO; 3733 } 3734 3735 pqi_free_io_request(io_request); 3736 3737 out: 3738 pqi_ctrl_unbusy(ctrl_info); 3739 up(&ctrl_info->sync_request_sem); 3740 3741 return rc; 3742 } 3743 3744 static int pqi_validate_admin_response( 3745 struct pqi_general_admin_response *response, u8 expected_function_code) 3746 { 3747 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 3748 return -EINVAL; 3749 3750 if (get_unaligned_le16(&response->header.iu_length) != 3751 PQI_GENERAL_ADMIN_IU_LENGTH) 3752 return -EINVAL; 3753 3754 if (response->function_code != expected_function_code) 3755 return -EINVAL; 3756 3757 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 3758 return -EINVAL; 3759 3760 return 0; 3761 } 3762 3763 static int pqi_submit_admin_request_synchronous( 3764 struct pqi_ctrl_info *ctrl_info, 3765 struct pqi_general_admin_request *request, 3766 struct pqi_general_admin_response *response) 3767 { 3768 int rc; 3769 3770 pqi_submit_admin_request(ctrl_info, request); 3771 3772 rc = pqi_poll_for_admin_response(ctrl_info, response); 3773 3774 if (rc == 0) 3775 rc = pqi_validate_admin_response(response, 3776 request->function_code); 3777 3778 return rc; 3779 } 3780 3781 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 3782 { 3783 int rc; 3784 struct pqi_general_admin_request request; 3785 struct pqi_general_admin_response response; 3786 struct pqi_device_capability *capability; 3787 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 3788 3789 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 3790 if (!capability) 3791 return -ENOMEM; 3792 3793 memset(&request, 0, sizeof(request)); 3794 3795 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3796 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3797 &request.header.iu_length); 3798 request.function_code = 3799 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 3800 put_unaligned_le32(sizeof(*capability), 3801 &request.data.report_device_capability.buffer_length); 3802 3803 rc = pqi_map_single(ctrl_info->pci_dev, 3804 &request.data.report_device_capability.sg_descriptor, 3805 capability, sizeof(*capability), 3806 PCI_DMA_FROMDEVICE); 3807 if (rc) 3808 goto out; 3809 3810 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3811 &response); 3812 3813 pqi_pci_unmap(ctrl_info->pci_dev, 3814 &request.data.report_device_capability.sg_descriptor, 1, 3815 PCI_DMA_FROMDEVICE); 3816 3817 if (rc) 3818 goto out; 3819 3820 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 3821 rc = -EIO; 3822 goto out; 3823 } 3824 3825 ctrl_info->max_inbound_queues = 3826 get_unaligned_le16(&capability->max_inbound_queues); 3827 ctrl_info->max_elements_per_iq = 3828 get_unaligned_le16(&capability->max_elements_per_iq); 3829 ctrl_info->max_iq_element_length = 3830 get_unaligned_le16(&capability->max_iq_element_length) 3831 * 16; 3832 ctrl_info->max_outbound_queues = 3833 get_unaligned_le16(&capability->max_outbound_queues); 3834 ctrl_info->max_elements_per_oq = 3835 get_unaligned_le16(&capability->max_elements_per_oq); 3836 ctrl_info->max_oq_element_length = 3837 get_unaligned_le16(&capability->max_oq_element_length) 3838 * 16; 3839 3840 sop_iu_layer_descriptor = 3841 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 3842 3843 ctrl_info->max_inbound_iu_length_per_firmware = 3844 get_unaligned_le16( 3845 &sop_iu_layer_descriptor->max_inbound_iu_length); 3846 ctrl_info->inbound_spanning_supported = 3847 sop_iu_layer_descriptor->inbound_spanning_supported; 3848 ctrl_info->outbound_spanning_supported = 3849 sop_iu_layer_descriptor->outbound_spanning_supported; 3850 3851 out: 3852 kfree(capability); 3853 3854 return rc; 3855 } 3856 3857 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 3858 { 3859 if (ctrl_info->max_iq_element_length < 3860 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3861 dev_err(&ctrl_info->pci_dev->dev, 3862 "max. inbound queue element length of %d is less than the required length of %d\n", 3863 ctrl_info->max_iq_element_length, 3864 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3865 return -EINVAL; 3866 } 3867 3868 if (ctrl_info->max_oq_element_length < 3869 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 3870 dev_err(&ctrl_info->pci_dev->dev, 3871 "max. outbound queue element length of %d is less than the required length of %d\n", 3872 ctrl_info->max_oq_element_length, 3873 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3874 return -EINVAL; 3875 } 3876 3877 if (ctrl_info->max_inbound_iu_length_per_firmware < 3878 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3879 dev_err(&ctrl_info->pci_dev->dev, 3880 "max. inbound IU length of %u is less than the min. required length of %d\n", 3881 ctrl_info->max_inbound_iu_length_per_firmware, 3882 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3883 return -EINVAL; 3884 } 3885 3886 if (!ctrl_info->inbound_spanning_supported) { 3887 dev_err(&ctrl_info->pci_dev->dev, 3888 "the controller does not support inbound spanning\n"); 3889 return -EINVAL; 3890 } 3891 3892 if (ctrl_info->outbound_spanning_supported) { 3893 dev_err(&ctrl_info->pci_dev->dev, 3894 "the controller supports outbound spanning but this driver does not\n"); 3895 return -EINVAL; 3896 } 3897 3898 return 0; 3899 } 3900 3901 static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info, 3902 bool inbound_queue, u16 queue_id) 3903 { 3904 struct pqi_general_admin_request request; 3905 struct pqi_general_admin_response response; 3906 3907 memset(&request, 0, sizeof(request)); 3908 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3909 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3910 &request.header.iu_length); 3911 if (inbound_queue) 3912 request.function_code = 3913 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ; 3914 else 3915 request.function_code = 3916 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ; 3917 put_unaligned_le16(queue_id, 3918 &request.data.delete_operational_queue.queue_id); 3919 3920 return pqi_submit_admin_request_synchronous(ctrl_info, &request, 3921 &response); 3922 } 3923 3924 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 3925 { 3926 int rc; 3927 struct pqi_event_queue *event_queue; 3928 struct pqi_general_admin_request request; 3929 struct pqi_general_admin_response response; 3930 3931 event_queue = &ctrl_info->event_queue; 3932 3933 /* 3934 * Create OQ (Outbound Queue - device to host queue) to dedicate 3935 * to events. 3936 */ 3937 memset(&request, 0, sizeof(request)); 3938 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3939 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3940 &request.header.iu_length); 3941 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 3942 put_unaligned_le16(event_queue->oq_id, 3943 &request.data.create_operational_oq.queue_id); 3944 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 3945 &request.data.create_operational_oq.element_array_addr); 3946 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 3947 &request.data.create_operational_oq.pi_addr); 3948 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 3949 &request.data.create_operational_oq.num_elements); 3950 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 3951 &request.data.create_operational_oq.element_length); 3952 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 3953 put_unaligned_le16(event_queue->int_msg_num, 3954 &request.data.create_operational_oq.int_msg_num); 3955 3956 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3957 &response); 3958 if (rc) 3959 return rc; 3960 3961 event_queue->oq_ci = ctrl_info->iomem_base + 3962 PQI_DEVICE_REGISTERS_OFFSET + 3963 get_unaligned_le64( 3964 &response.data.create_operational_oq.oq_ci_offset); 3965 3966 return 0; 3967 } 3968 3969 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 3970 unsigned int group_number) 3971 { 3972 int rc; 3973 struct pqi_queue_group *queue_group; 3974 struct pqi_general_admin_request request; 3975 struct pqi_general_admin_response response; 3976 3977 queue_group = &ctrl_info->queue_groups[group_number]; 3978 3979 /* 3980 * Create IQ (Inbound Queue - host to device queue) for 3981 * RAID path. 3982 */ 3983 memset(&request, 0, sizeof(request)); 3984 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3985 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3986 &request.header.iu_length); 3987 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 3988 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 3989 &request.data.create_operational_iq.queue_id); 3990 put_unaligned_le64( 3991 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 3992 &request.data.create_operational_iq.element_array_addr); 3993 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 3994 &request.data.create_operational_iq.ci_addr); 3995 put_unaligned_le16(ctrl_info->num_elements_per_iq, 3996 &request.data.create_operational_iq.num_elements); 3997 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 3998 &request.data.create_operational_iq.element_length); 3999 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4000 4001 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4002 &response); 4003 if (rc) { 4004 dev_err(&ctrl_info->pci_dev->dev, 4005 "error creating inbound RAID queue\n"); 4006 return rc; 4007 } 4008 4009 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4010 PQI_DEVICE_REGISTERS_OFFSET + 4011 get_unaligned_le64( 4012 &response.data.create_operational_iq.iq_pi_offset); 4013 4014 /* 4015 * Create IQ (Inbound Queue - host to device queue) for 4016 * Advanced I/O (AIO) path. 4017 */ 4018 memset(&request, 0, sizeof(request)); 4019 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4020 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4021 &request.header.iu_length); 4022 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4023 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4024 &request.data.create_operational_iq.queue_id); 4025 put_unaligned_le64((u64)queue_group-> 4026 iq_element_array_bus_addr[AIO_PATH], 4027 &request.data.create_operational_iq.element_array_addr); 4028 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4029 &request.data.create_operational_iq.ci_addr); 4030 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4031 &request.data.create_operational_iq.num_elements); 4032 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4033 &request.data.create_operational_iq.element_length); 4034 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4035 4036 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4037 &response); 4038 if (rc) { 4039 dev_err(&ctrl_info->pci_dev->dev, 4040 "error creating inbound AIO queue\n"); 4041 goto delete_inbound_queue_raid; 4042 } 4043 4044 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4045 PQI_DEVICE_REGISTERS_OFFSET + 4046 get_unaligned_le64( 4047 &response.data.create_operational_iq.iq_pi_offset); 4048 4049 /* 4050 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4051 * assumed to be for RAID path I/O unless we change the queue's 4052 * property. 4053 */ 4054 memset(&request, 0, sizeof(request)); 4055 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4056 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4057 &request.header.iu_length); 4058 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4059 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4060 &request.data.change_operational_iq_properties.queue_id); 4061 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4062 &request.data.change_operational_iq_properties.vendor_specific); 4063 4064 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4065 &response); 4066 if (rc) { 4067 dev_err(&ctrl_info->pci_dev->dev, 4068 "error changing queue property\n"); 4069 goto delete_inbound_queue_aio; 4070 } 4071 4072 /* 4073 * Create OQ (Outbound Queue - device to host queue). 4074 */ 4075 memset(&request, 0, sizeof(request)); 4076 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4077 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4078 &request.header.iu_length); 4079 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4080 put_unaligned_le16(queue_group->oq_id, 4081 &request.data.create_operational_oq.queue_id); 4082 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4083 &request.data.create_operational_oq.element_array_addr); 4084 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4085 &request.data.create_operational_oq.pi_addr); 4086 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4087 &request.data.create_operational_oq.num_elements); 4088 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4089 &request.data.create_operational_oq.element_length); 4090 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4091 put_unaligned_le16(queue_group->int_msg_num, 4092 &request.data.create_operational_oq.int_msg_num); 4093 4094 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4095 &response); 4096 if (rc) { 4097 dev_err(&ctrl_info->pci_dev->dev, 4098 "error creating outbound queue\n"); 4099 goto delete_inbound_queue_aio; 4100 } 4101 4102 queue_group->oq_ci = ctrl_info->iomem_base + 4103 PQI_DEVICE_REGISTERS_OFFSET + 4104 get_unaligned_le64( 4105 &response.data.create_operational_oq.oq_ci_offset); 4106 4107 return 0; 4108 4109 delete_inbound_queue_aio: 4110 pqi_delete_operational_queue(ctrl_info, true, 4111 queue_group->iq_id[AIO_PATH]); 4112 4113 delete_inbound_queue_raid: 4114 pqi_delete_operational_queue(ctrl_info, true, 4115 queue_group->iq_id[RAID_PATH]); 4116 4117 return rc; 4118 } 4119 4120 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4121 { 4122 int rc; 4123 unsigned int i; 4124 4125 rc = pqi_create_event_queue(ctrl_info); 4126 if (rc) { 4127 dev_err(&ctrl_info->pci_dev->dev, 4128 "error creating event queue\n"); 4129 return rc; 4130 } 4131 4132 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4133 rc = pqi_create_queue_group(ctrl_info, i); 4134 if (rc) { 4135 dev_err(&ctrl_info->pci_dev->dev, 4136 "error creating queue group number %u/%u\n", 4137 i, ctrl_info->num_queue_groups); 4138 return rc; 4139 } 4140 } 4141 4142 return 0; 4143 } 4144 4145 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4146 (offsetof(struct pqi_event_config, descriptors) + \ 4147 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4148 4149 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4150 bool enable_events) 4151 { 4152 int rc; 4153 unsigned int i; 4154 struct pqi_event_config *event_config; 4155 struct pqi_event_descriptor *event_descriptor; 4156 struct pqi_general_management_request request; 4157 4158 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4159 GFP_KERNEL); 4160 if (!event_config) 4161 return -ENOMEM; 4162 4163 memset(&request, 0, sizeof(request)); 4164 4165 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4166 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4167 data.report_event_configuration.sg_descriptors[1]) - 4168 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4169 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4170 &request.data.report_event_configuration.buffer_length); 4171 4172 rc = pqi_map_single(ctrl_info->pci_dev, 4173 request.data.report_event_configuration.sg_descriptors, 4174 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4175 PCI_DMA_FROMDEVICE); 4176 if (rc) 4177 goto out; 4178 4179 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4180 0, NULL, NO_TIMEOUT); 4181 4182 pqi_pci_unmap(ctrl_info->pci_dev, 4183 request.data.report_event_configuration.sg_descriptors, 1, 4184 PCI_DMA_FROMDEVICE); 4185 4186 if (rc) 4187 goto out; 4188 4189 for (i = 0; i < event_config->num_event_descriptors; i++) { 4190 event_descriptor = &event_config->descriptors[i]; 4191 if (enable_events && 4192 pqi_is_supported_event(event_descriptor->event_type)) 4193 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4194 &event_descriptor->oq_id); 4195 else 4196 put_unaligned_le16(0, &event_descriptor->oq_id); 4197 } 4198 4199 memset(&request, 0, sizeof(request)); 4200 4201 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4202 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4203 data.report_event_configuration.sg_descriptors[1]) - 4204 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4205 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4206 &request.data.report_event_configuration.buffer_length); 4207 4208 rc = pqi_map_single(ctrl_info->pci_dev, 4209 request.data.report_event_configuration.sg_descriptors, 4210 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4211 PCI_DMA_TODEVICE); 4212 if (rc) 4213 goto out; 4214 4215 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4216 NULL, NO_TIMEOUT); 4217 4218 pqi_pci_unmap(ctrl_info->pci_dev, 4219 request.data.report_event_configuration.sg_descriptors, 1, 4220 PCI_DMA_TODEVICE); 4221 4222 out: 4223 kfree(event_config); 4224 4225 return rc; 4226 } 4227 4228 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4229 { 4230 return pqi_configure_events(ctrl_info, true); 4231 } 4232 4233 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4234 { 4235 return pqi_configure_events(ctrl_info, false); 4236 } 4237 4238 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4239 { 4240 unsigned int i; 4241 struct device *dev; 4242 size_t sg_chain_buffer_length; 4243 struct pqi_io_request *io_request; 4244 4245 if (!ctrl_info->io_request_pool) 4246 return; 4247 4248 dev = &ctrl_info->pci_dev->dev; 4249 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4250 io_request = ctrl_info->io_request_pool; 4251 4252 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4253 kfree(io_request->iu); 4254 if (!io_request->sg_chain_buffer) 4255 break; 4256 dma_free_coherent(dev, sg_chain_buffer_length, 4257 io_request->sg_chain_buffer, 4258 io_request->sg_chain_buffer_dma_handle); 4259 io_request++; 4260 } 4261 4262 kfree(ctrl_info->io_request_pool); 4263 ctrl_info->io_request_pool = NULL; 4264 } 4265 4266 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4267 { 4268 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4269 ctrl_info->error_buffer_length, 4270 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4271 4272 if (!ctrl_info->error_buffer) 4273 return -ENOMEM; 4274 4275 return 0; 4276 } 4277 4278 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4279 { 4280 unsigned int i; 4281 void *sg_chain_buffer; 4282 size_t sg_chain_buffer_length; 4283 dma_addr_t sg_chain_buffer_dma_handle; 4284 struct device *dev; 4285 struct pqi_io_request *io_request; 4286 4287 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots * 4288 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4289 4290 if (!ctrl_info->io_request_pool) { 4291 dev_err(&ctrl_info->pci_dev->dev, 4292 "failed to allocate I/O request pool\n"); 4293 goto error; 4294 } 4295 4296 dev = &ctrl_info->pci_dev->dev; 4297 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4298 io_request = ctrl_info->io_request_pool; 4299 4300 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4301 io_request->iu = 4302 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4303 4304 if (!io_request->iu) { 4305 dev_err(&ctrl_info->pci_dev->dev, 4306 "failed to allocate IU buffers\n"); 4307 goto error; 4308 } 4309 4310 sg_chain_buffer = dma_alloc_coherent(dev, 4311 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4312 GFP_KERNEL); 4313 4314 if (!sg_chain_buffer) { 4315 dev_err(&ctrl_info->pci_dev->dev, 4316 "failed to allocate PQI scatter-gather chain buffers\n"); 4317 goto error; 4318 } 4319 4320 io_request->index = i; 4321 io_request->sg_chain_buffer = sg_chain_buffer; 4322 io_request->sg_chain_buffer_dma_handle = 4323 sg_chain_buffer_dma_handle; 4324 io_request++; 4325 } 4326 4327 return 0; 4328 4329 error: 4330 pqi_free_all_io_requests(ctrl_info); 4331 4332 return -ENOMEM; 4333 } 4334 4335 /* 4336 * Calculate required resources that are sized based on max. outstanding 4337 * requests and max. transfer size. 4338 */ 4339 4340 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4341 { 4342 u32 max_transfer_size; 4343 u32 max_sg_entries; 4344 4345 ctrl_info->scsi_ml_can_queue = 4346 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4347 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4348 4349 ctrl_info->error_buffer_length = 4350 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4351 4352 if (reset_devices) 4353 max_transfer_size = min(ctrl_info->max_transfer_size, 4354 PQI_MAX_TRANSFER_SIZE_KDUMP); 4355 else 4356 max_transfer_size = min(ctrl_info->max_transfer_size, 4357 PQI_MAX_TRANSFER_SIZE); 4358 4359 max_sg_entries = max_transfer_size / PAGE_SIZE; 4360 4361 /* +1 to cover when the buffer is not page-aligned. */ 4362 max_sg_entries++; 4363 4364 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4365 4366 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4367 4368 ctrl_info->sg_chain_buffer_length = 4369 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4370 PQI_EXTRA_SGL_MEMORY; 4371 ctrl_info->sg_tablesize = max_sg_entries; 4372 ctrl_info->max_sectors = max_transfer_size / 512; 4373 } 4374 4375 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4376 { 4377 int num_queue_groups; 4378 u16 num_elements_per_iq; 4379 u16 num_elements_per_oq; 4380 4381 if (reset_devices) { 4382 num_queue_groups = 1; 4383 } else { 4384 int num_cpus; 4385 int max_queue_groups; 4386 4387 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4388 ctrl_info->max_outbound_queues - 1); 4389 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4390 4391 num_cpus = num_online_cpus(); 4392 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4393 num_queue_groups = min(num_queue_groups, max_queue_groups); 4394 } 4395 4396 ctrl_info->num_queue_groups = num_queue_groups; 4397 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4398 4399 /* 4400 * Make sure that the max. inbound IU length is an even multiple 4401 * of our inbound element length. 4402 */ 4403 ctrl_info->max_inbound_iu_length = 4404 (ctrl_info->max_inbound_iu_length_per_firmware / 4405 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4406 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4407 4408 num_elements_per_iq = 4409 (ctrl_info->max_inbound_iu_length / 4410 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4411 4412 /* Add one because one element in each queue is unusable. */ 4413 num_elements_per_iq++; 4414 4415 num_elements_per_iq = min(num_elements_per_iq, 4416 ctrl_info->max_elements_per_iq); 4417 4418 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4419 num_elements_per_oq = min(num_elements_per_oq, 4420 ctrl_info->max_elements_per_oq); 4421 4422 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4423 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4424 4425 ctrl_info->max_sg_per_iu = 4426 ((ctrl_info->max_inbound_iu_length - 4427 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4428 sizeof(struct pqi_sg_descriptor)) + 4429 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4430 } 4431 4432 static inline void pqi_set_sg_descriptor( 4433 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4434 { 4435 u64 address = (u64)sg_dma_address(sg); 4436 unsigned int length = sg_dma_len(sg); 4437 4438 put_unaligned_le64(address, &sg_descriptor->address); 4439 put_unaligned_le32(length, &sg_descriptor->length); 4440 put_unaligned_le32(0, &sg_descriptor->flags); 4441 } 4442 4443 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4444 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4445 struct pqi_io_request *io_request) 4446 { 4447 int i; 4448 u16 iu_length; 4449 int sg_count; 4450 bool chained; 4451 unsigned int num_sg_in_iu; 4452 unsigned int max_sg_per_iu; 4453 struct scatterlist *sg; 4454 struct pqi_sg_descriptor *sg_descriptor; 4455 4456 sg_count = scsi_dma_map(scmd); 4457 if (sg_count < 0) 4458 return sg_count; 4459 4460 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4461 PQI_REQUEST_HEADER_LENGTH; 4462 4463 if (sg_count == 0) 4464 goto out; 4465 4466 sg = scsi_sglist(scmd); 4467 sg_descriptor = request->sg_descriptors; 4468 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4469 chained = false; 4470 num_sg_in_iu = 0; 4471 i = 0; 4472 4473 while (1) { 4474 pqi_set_sg_descriptor(sg_descriptor, sg); 4475 if (!chained) 4476 num_sg_in_iu++; 4477 i++; 4478 if (i == sg_count) 4479 break; 4480 sg_descriptor++; 4481 if (i == max_sg_per_iu) { 4482 put_unaligned_le64( 4483 (u64)io_request->sg_chain_buffer_dma_handle, 4484 &sg_descriptor->address); 4485 put_unaligned_le32((sg_count - num_sg_in_iu) 4486 * sizeof(*sg_descriptor), 4487 &sg_descriptor->length); 4488 put_unaligned_le32(CISS_SG_CHAIN, 4489 &sg_descriptor->flags); 4490 chained = true; 4491 num_sg_in_iu++; 4492 sg_descriptor = io_request->sg_chain_buffer; 4493 } 4494 sg = sg_next(sg); 4495 } 4496 4497 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4498 request->partial = chained; 4499 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4500 4501 out: 4502 put_unaligned_le16(iu_length, &request->header.iu_length); 4503 4504 return 0; 4505 } 4506 4507 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4508 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4509 struct pqi_io_request *io_request) 4510 { 4511 int i; 4512 u16 iu_length; 4513 int sg_count; 4514 bool chained; 4515 unsigned int num_sg_in_iu; 4516 unsigned int max_sg_per_iu; 4517 struct scatterlist *sg; 4518 struct pqi_sg_descriptor *sg_descriptor; 4519 4520 sg_count = scsi_dma_map(scmd); 4521 if (sg_count < 0) 4522 return sg_count; 4523 4524 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4525 PQI_REQUEST_HEADER_LENGTH; 4526 num_sg_in_iu = 0; 4527 4528 if (sg_count == 0) 4529 goto out; 4530 4531 sg = scsi_sglist(scmd); 4532 sg_descriptor = request->sg_descriptors; 4533 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4534 chained = false; 4535 i = 0; 4536 4537 while (1) { 4538 pqi_set_sg_descriptor(sg_descriptor, sg); 4539 if (!chained) 4540 num_sg_in_iu++; 4541 i++; 4542 if (i == sg_count) 4543 break; 4544 sg_descriptor++; 4545 if (i == max_sg_per_iu) { 4546 put_unaligned_le64( 4547 (u64)io_request->sg_chain_buffer_dma_handle, 4548 &sg_descriptor->address); 4549 put_unaligned_le32((sg_count - num_sg_in_iu) 4550 * sizeof(*sg_descriptor), 4551 &sg_descriptor->length); 4552 put_unaligned_le32(CISS_SG_CHAIN, 4553 &sg_descriptor->flags); 4554 chained = true; 4555 num_sg_in_iu++; 4556 sg_descriptor = io_request->sg_chain_buffer; 4557 } 4558 sg = sg_next(sg); 4559 } 4560 4561 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4562 request->partial = chained; 4563 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4564 4565 out: 4566 put_unaligned_le16(iu_length, &request->header.iu_length); 4567 request->num_sg_descriptors = num_sg_in_iu; 4568 4569 return 0; 4570 } 4571 4572 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4573 void *context) 4574 { 4575 struct scsi_cmnd *scmd; 4576 4577 scmd = io_request->scmd; 4578 pqi_free_io_request(io_request); 4579 scsi_dma_unmap(scmd); 4580 pqi_scsi_done(scmd); 4581 } 4582 4583 static int pqi_raid_submit_scsi_cmd_with_io_request( 4584 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4585 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4586 struct pqi_queue_group *queue_group) 4587 { 4588 int rc; 4589 size_t cdb_length; 4590 struct pqi_raid_path_request *request; 4591 4592 io_request->io_complete_callback = pqi_raid_io_complete; 4593 io_request->scmd = scmd; 4594 4595 request = io_request->iu; 4596 memset(request, 0, 4597 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4598 4599 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4600 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4601 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4602 put_unaligned_le16(io_request->index, &request->request_id); 4603 request->error_index = request->request_id; 4604 memcpy(request->lun_number, device->scsi3addr, 4605 sizeof(request->lun_number)); 4606 4607 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4608 memcpy(request->cdb, scmd->cmnd, cdb_length); 4609 4610 switch (cdb_length) { 4611 case 6: 4612 case 10: 4613 case 12: 4614 case 16: 4615 /* No bytes in the Additional CDB bytes field */ 4616 request->additional_cdb_bytes_usage = 4617 SOP_ADDITIONAL_CDB_BYTES_0; 4618 break; 4619 case 20: 4620 /* 4 bytes in the Additional cdb field */ 4621 request->additional_cdb_bytes_usage = 4622 SOP_ADDITIONAL_CDB_BYTES_4; 4623 break; 4624 case 24: 4625 /* 8 bytes in the Additional cdb field */ 4626 request->additional_cdb_bytes_usage = 4627 SOP_ADDITIONAL_CDB_BYTES_8; 4628 break; 4629 case 28: 4630 /* 12 bytes in the Additional cdb field */ 4631 request->additional_cdb_bytes_usage = 4632 SOP_ADDITIONAL_CDB_BYTES_12; 4633 break; 4634 case 32: 4635 default: 4636 /* 16 bytes in the Additional cdb field */ 4637 request->additional_cdb_bytes_usage = 4638 SOP_ADDITIONAL_CDB_BYTES_16; 4639 break; 4640 } 4641 4642 switch (scmd->sc_data_direction) { 4643 case DMA_TO_DEVICE: 4644 request->data_direction = SOP_READ_FLAG; 4645 break; 4646 case DMA_FROM_DEVICE: 4647 request->data_direction = SOP_WRITE_FLAG; 4648 break; 4649 case DMA_NONE: 4650 request->data_direction = SOP_NO_DIRECTION_FLAG; 4651 break; 4652 case DMA_BIDIRECTIONAL: 4653 request->data_direction = SOP_BIDIRECTIONAL; 4654 break; 4655 default: 4656 dev_err(&ctrl_info->pci_dev->dev, 4657 "unknown data direction: %d\n", 4658 scmd->sc_data_direction); 4659 break; 4660 } 4661 4662 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 4663 if (rc) { 4664 pqi_free_io_request(io_request); 4665 return SCSI_MLQUEUE_HOST_BUSY; 4666 } 4667 4668 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 4669 4670 return 0; 4671 } 4672 4673 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4674 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4675 struct pqi_queue_group *queue_group) 4676 { 4677 struct pqi_io_request *io_request; 4678 4679 io_request = pqi_alloc_io_request(ctrl_info); 4680 4681 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4682 device, scmd, queue_group); 4683 } 4684 4685 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 4686 { 4687 if (!pqi_ctrl_blocked(ctrl_info)) 4688 schedule_work(&ctrl_info->raid_bypass_retry_work); 4689 } 4690 4691 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 4692 { 4693 struct scsi_cmnd *scmd; 4694 struct pqi_scsi_dev *device; 4695 struct pqi_ctrl_info *ctrl_info; 4696 4697 if (!io_request->raid_bypass) 4698 return false; 4699 4700 scmd = io_request->scmd; 4701 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 4702 return false; 4703 if (host_byte(scmd->result) == DID_NO_CONNECT) 4704 return false; 4705 4706 device = scmd->device->hostdata; 4707 if (pqi_device_offline(device)) 4708 return false; 4709 4710 ctrl_info = shost_to_hba(scmd->device->host); 4711 if (pqi_ctrl_offline(ctrl_info)) 4712 return false; 4713 4714 return true; 4715 } 4716 4717 static inline void pqi_add_to_raid_bypass_retry_list( 4718 struct pqi_ctrl_info *ctrl_info, 4719 struct pqi_io_request *io_request, bool at_head) 4720 { 4721 unsigned long flags; 4722 4723 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4724 if (at_head) 4725 list_add(&io_request->request_list_entry, 4726 &ctrl_info->raid_bypass_retry_list); 4727 else 4728 list_add_tail(&io_request->request_list_entry, 4729 &ctrl_info->raid_bypass_retry_list); 4730 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4731 } 4732 4733 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 4734 void *context) 4735 { 4736 struct scsi_cmnd *scmd; 4737 4738 scmd = io_request->scmd; 4739 pqi_free_io_request(io_request); 4740 pqi_scsi_done(scmd); 4741 } 4742 4743 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 4744 { 4745 struct scsi_cmnd *scmd; 4746 struct pqi_ctrl_info *ctrl_info; 4747 4748 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 4749 scmd = io_request->scmd; 4750 scmd->result = 0; 4751 ctrl_info = shost_to_hba(scmd->device->host); 4752 4753 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 4754 pqi_schedule_bypass_retry(ctrl_info); 4755 } 4756 4757 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 4758 { 4759 struct scsi_cmnd *scmd; 4760 struct pqi_scsi_dev *device; 4761 struct pqi_ctrl_info *ctrl_info; 4762 struct pqi_queue_group *queue_group; 4763 4764 scmd = io_request->scmd; 4765 device = scmd->device->hostdata; 4766 if (pqi_device_in_reset(device)) { 4767 pqi_free_io_request(io_request); 4768 set_host_byte(scmd, DID_RESET); 4769 pqi_scsi_done(scmd); 4770 return 0; 4771 } 4772 4773 ctrl_info = shost_to_hba(scmd->device->host); 4774 queue_group = io_request->queue_group; 4775 4776 pqi_reinit_io_request(io_request); 4777 4778 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4779 device, scmd, queue_group); 4780 } 4781 4782 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 4783 struct pqi_ctrl_info *ctrl_info) 4784 { 4785 unsigned long flags; 4786 struct pqi_io_request *io_request; 4787 4788 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4789 io_request = list_first_entry_or_null( 4790 &ctrl_info->raid_bypass_retry_list, 4791 struct pqi_io_request, request_list_entry); 4792 if (io_request) 4793 list_del(&io_request->request_list_entry); 4794 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4795 4796 return io_request; 4797 } 4798 4799 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 4800 { 4801 int rc; 4802 struct pqi_io_request *io_request; 4803 4804 pqi_ctrl_busy(ctrl_info); 4805 4806 while (1) { 4807 if (pqi_ctrl_blocked(ctrl_info)) 4808 break; 4809 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 4810 if (!io_request) 4811 break; 4812 rc = pqi_retry_raid_bypass(io_request); 4813 if (rc) { 4814 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 4815 true); 4816 pqi_schedule_bypass_retry(ctrl_info); 4817 break; 4818 } 4819 } 4820 4821 pqi_ctrl_unbusy(ctrl_info); 4822 } 4823 4824 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 4825 { 4826 struct pqi_ctrl_info *ctrl_info; 4827 4828 ctrl_info = container_of(work, struct pqi_ctrl_info, 4829 raid_bypass_retry_work); 4830 pqi_retry_raid_bypass_requests(ctrl_info); 4831 } 4832 4833 static void pqi_clear_all_queued_raid_bypass_retries( 4834 struct pqi_ctrl_info *ctrl_info) 4835 { 4836 unsigned long flags; 4837 4838 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4839 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 4840 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4841 } 4842 4843 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 4844 void *context) 4845 { 4846 struct scsi_cmnd *scmd; 4847 4848 scmd = io_request->scmd; 4849 scsi_dma_unmap(scmd); 4850 if (io_request->status == -EAGAIN) 4851 set_host_byte(scmd, DID_IMM_RETRY); 4852 else if (pqi_raid_bypass_retry_needed(io_request)) { 4853 pqi_queue_raid_bypass_retry(io_request); 4854 return; 4855 } 4856 pqi_free_io_request(io_request); 4857 pqi_scsi_done(scmd); 4858 } 4859 4860 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4861 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4862 struct pqi_queue_group *queue_group) 4863 { 4864 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 4865 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 4866 } 4867 4868 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 4869 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 4870 unsigned int cdb_length, struct pqi_queue_group *queue_group, 4871 struct pqi_encryption_info *encryption_info, bool raid_bypass) 4872 { 4873 int rc; 4874 struct pqi_io_request *io_request; 4875 struct pqi_aio_path_request *request; 4876 4877 io_request = pqi_alloc_io_request(ctrl_info); 4878 io_request->io_complete_callback = pqi_aio_io_complete; 4879 io_request->scmd = scmd; 4880 io_request->raid_bypass = raid_bypass; 4881 4882 request = io_request->iu; 4883 memset(request, 0, 4884 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4885 4886 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 4887 put_unaligned_le32(aio_handle, &request->nexus_id); 4888 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4889 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4890 put_unaligned_le16(io_request->index, &request->request_id); 4891 request->error_index = request->request_id; 4892 if (cdb_length > sizeof(request->cdb)) 4893 cdb_length = sizeof(request->cdb); 4894 request->cdb_length = cdb_length; 4895 memcpy(request->cdb, cdb, cdb_length); 4896 4897 switch (scmd->sc_data_direction) { 4898 case DMA_TO_DEVICE: 4899 request->data_direction = SOP_READ_FLAG; 4900 break; 4901 case DMA_FROM_DEVICE: 4902 request->data_direction = SOP_WRITE_FLAG; 4903 break; 4904 case DMA_NONE: 4905 request->data_direction = SOP_NO_DIRECTION_FLAG; 4906 break; 4907 case DMA_BIDIRECTIONAL: 4908 request->data_direction = SOP_BIDIRECTIONAL; 4909 break; 4910 default: 4911 dev_err(&ctrl_info->pci_dev->dev, 4912 "unknown data direction: %d\n", 4913 scmd->sc_data_direction); 4914 break; 4915 } 4916 4917 if (encryption_info) { 4918 request->encryption_enable = true; 4919 put_unaligned_le16(encryption_info->data_encryption_key_index, 4920 &request->data_encryption_key_index); 4921 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 4922 &request->encrypt_tweak_lower); 4923 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 4924 &request->encrypt_tweak_upper); 4925 } 4926 4927 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 4928 if (rc) { 4929 pqi_free_io_request(io_request); 4930 return SCSI_MLQUEUE_HOST_BUSY; 4931 } 4932 4933 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 4934 4935 return 0; 4936 } 4937 4938 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 4939 struct scsi_cmnd *scmd) 4940 { 4941 u16 hw_queue; 4942 4943 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 4944 if (hw_queue > ctrl_info->max_hw_queue_index) 4945 hw_queue = 0; 4946 4947 return hw_queue; 4948 } 4949 4950 /* 4951 * This function gets called just before we hand the completed SCSI request 4952 * back to the SML. 4953 */ 4954 4955 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 4956 { 4957 struct pqi_scsi_dev *device; 4958 4959 device = scmd->device->hostdata; 4960 atomic_dec(&device->scsi_cmds_outstanding); 4961 } 4962 4963 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 4964 struct scsi_cmnd *scmd) 4965 { 4966 int rc; 4967 struct pqi_ctrl_info *ctrl_info; 4968 struct pqi_scsi_dev *device; 4969 u16 hw_queue; 4970 struct pqi_queue_group *queue_group; 4971 bool raid_bypassed; 4972 4973 device = scmd->device->hostdata; 4974 ctrl_info = shost_to_hba(shost); 4975 4976 atomic_inc(&device->scsi_cmds_outstanding); 4977 4978 if (pqi_ctrl_offline(ctrl_info)) { 4979 set_host_byte(scmd, DID_NO_CONNECT); 4980 pqi_scsi_done(scmd); 4981 return 0; 4982 } 4983 4984 pqi_ctrl_busy(ctrl_info); 4985 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 4986 rc = SCSI_MLQUEUE_HOST_BUSY; 4987 goto out; 4988 } 4989 4990 /* 4991 * This is necessary because the SML doesn't zero out this field during 4992 * error recovery. 4993 */ 4994 scmd->result = 0; 4995 4996 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 4997 queue_group = &ctrl_info->queue_groups[hw_queue]; 4998 4999 if (pqi_is_logical_device(device)) { 5000 raid_bypassed = false; 5001 if (device->raid_bypass_enabled && 5002 !blk_rq_is_passthrough(scmd->request)) { 5003 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5004 scmd, queue_group); 5005 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 5006 raid_bypassed = true; 5007 } 5008 if (!raid_bypassed) 5009 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5010 queue_group); 5011 } else { 5012 if (device->aio_enabled) 5013 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 5014 queue_group); 5015 else 5016 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5017 queue_group); 5018 } 5019 5020 out: 5021 pqi_ctrl_unbusy(ctrl_info); 5022 if (rc) 5023 atomic_dec(&device->scsi_cmds_outstanding); 5024 5025 return rc; 5026 } 5027 5028 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5029 struct pqi_queue_group *queue_group) 5030 { 5031 unsigned int path; 5032 unsigned long flags; 5033 bool list_is_empty; 5034 5035 for (path = 0; path < 2; path++) { 5036 while (1) { 5037 spin_lock_irqsave( 5038 &queue_group->submit_lock[path], flags); 5039 list_is_empty = 5040 list_empty(&queue_group->request_list[path]); 5041 spin_unlock_irqrestore( 5042 &queue_group->submit_lock[path], flags); 5043 if (list_is_empty) 5044 break; 5045 pqi_check_ctrl_health(ctrl_info); 5046 if (pqi_ctrl_offline(ctrl_info)) 5047 return -ENXIO; 5048 usleep_range(1000, 2000); 5049 } 5050 } 5051 5052 return 0; 5053 } 5054 5055 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5056 { 5057 int rc; 5058 unsigned int i; 5059 unsigned int path; 5060 struct pqi_queue_group *queue_group; 5061 pqi_index_t iq_pi; 5062 pqi_index_t iq_ci; 5063 5064 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5065 queue_group = &ctrl_info->queue_groups[i]; 5066 5067 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5068 if (rc) 5069 return rc; 5070 5071 for (path = 0; path < 2; path++) { 5072 iq_pi = queue_group->iq_pi_copy[path]; 5073 5074 while (1) { 5075 iq_ci = *queue_group->iq_ci[path]; 5076 if (iq_ci == iq_pi) 5077 break; 5078 pqi_check_ctrl_health(ctrl_info); 5079 if (pqi_ctrl_offline(ctrl_info)) 5080 return -ENXIO; 5081 usleep_range(1000, 2000); 5082 } 5083 } 5084 } 5085 5086 return 0; 5087 } 5088 5089 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5090 struct pqi_scsi_dev *device) 5091 { 5092 unsigned int i; 5093 unsigned int path; 5094 struct pqi_queue_group *queue_group; 5095 unsigned long flags; 5096 struct pqi_io_request *io_request; 5097 struct pqi_io_request *next; 5098 struct scsi_cmnd *scmd; 5099 struct pqi_scsi_dev *scsi_device; 5100 5101 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5102 queue_group = &ctrl_info->queue_groups[i]; 5103 5104 for (path = 0; path < 2; path++) { 5105 spin_lock_irqsave( 5106 &queue_group->submit_lock[path], flags); 5107 5108 list_for_each_entry_safe(io_request, next, 5109 &queue_group->request_list[path], 5110 request_list_entry) { 5111 scmd = io_request->scmd; 5112 if (!scmd) 5113 continue; 5114 5115 scsi_device = scmd->device->hostdata; 5116 if (scsi_device != device) 5117 continue; 5118 5119 list_del(&io_request->request_list_entry); 5120 set_host_byte(scmd, DID_RESET); 5121 pqi_scsi_done(scmd); 5122 } 5123 5124 spin_unlock_irqrestore( 5125 &queue_group->submit_lock[path], flags); 5126 } 5127 } 5128 } 5129 5130 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5131 struct pqi_scsi_dev *device) 5132 { 5133 while (atomic_read(&device->scsi_cmds_outstanding)) { 5134 pqi_check_ctrl_health(ctrl_info); 5135 if (pqi_ctrl_offline(ctrl_info)) 5136 return -ENXIO; 5137 usleep_range(1000, 2000); 5138 } 5139 5140 return 0; 5141 } 5142 5143 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5144 { 5145 bool io_pending; 5146 unsigned long flags; 5147 struct pqi_scsi_dev *device; 5148 5149 while (1) { 5150 io_pending = false; 5151 5152 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5153 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5154 scsi_device_list_entry) { 5155 if (atomic_read(&device->scsi_cmds_outstanding)) { 5156 io_pending = true; 5157 break; 5158 } 5159 } 5160 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5161 flags); 5162 5163 if (!io_pending) 5164 break; 5165 5166 pqi_check_ctrl_health(ctrl_info); 5167 if (pqi_ctrl_offline(ctrl_info)) 5168 return -ENXIO; 5169 5170 usleep_range(1000, 2000); 5171 } 5172 5173 return 0; 5174 } 5175 5176 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5177 void *context) 5178 { 5179 struct completion *waiting = context; 5180 5181 complete(waiting); 5182 } 5183 5184 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5185 5186 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5187 struct pqi_scsi_dev *device, struct completion *wait) 5188 { 5189 int rc; 5190 5191 while (1) { 5192 if (wait_for_completion_io_timeout(wait, 5193 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5194 rc = 0; 5195 break; 5196 } 5197 5198 pqi_check_ctrl_health(ctrl_info); 5199 if (pqi_ctrl_offline(ctrl_info)) { 5200 rc = -ENXIO; 5201 break; 5202 } 5203 } 5204 5205 return rc; 5206 } 5207 5208 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5209 struct pqi_scsi_dev *device) 5210 { 5211 int rc; 5212 struct pqi_io_request *io_request; 5213 DECLARE_COMPLETION_ONSTACK(wait); 5214 struct pqi_task_management_request *request; 5215 5216 io_request = pqi_alloc_io_request(ctrl_info); 5217 io_request->io_complete_callback = pqi_lun_reset_complete; 5218 io_request->context = &wait; 5219 5220 request = io_request->iu; 5221 memset(request, 0, sizeof(*request)); 5222 5223 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5224 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5225 &request->header.iu_length); 5226 put_unaligned_le16(io_request->index, &request->request_id); 5227 memcpy(request->lun_number, device->scsi3addr, 5228 sizeof(request->lun_number)); 5229 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5230 5231 pqi_start_io(ctrl_info, 5232 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5233 io_request); 5234 5235 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5236 if (rc == 0) 5237 rc = io_request->status; 5238 5239 pqi_free_io_request(io_request); 5240 5241 return rc; 5242 } 5243 5244 /* Performs a reset at the LUN level. */ 5245 5246 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5247 struct pqi_scsi_dev *device) 5248 { 5249 int rc; 5250 5251 rc = pqi_lun_reset(ctrl_info, device); 5252 if (rc == 0) 5253 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5254 5255 return rc == 0 ? SUCCESS : FAILED; 5256 } 5257 5258 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5259 { 5260 int rc; 5261 struct Scsi_Host *shost; 5262 struct pqi_ctrl_info *ctrl_info; 5263 struct pqi_scsi_dev *device; 5264 5265 shost = scmd->device->host; 5266 ctrl_info = shost_to_hba(shost); 5267 device = scmd->device->hostdata; 5268 5269 dev_err(&ctrl_info->pci_dev->dev, 5270 "resetting scsi %d:%d:%d:%d\n", 5271 shost->host_no, device->bus, device->target, device->lun); 5272 5273 pqi_check_ctrl_health(ctrl_info); 5274 if (pqi_ctrl_offline(ctrl_info)) { 5275 rc = FAILED; 5276 goto out; 5277 } 5278 5279 mutex_lock(&ctrl_info->lun_reset_mutex); 5280 5281 pqi_ctrl_block_requests(ctrl_info); 5282 pqi_ctrl_wait_until_quiesced(ctrl_info); 5283 pqi_fail_io_queued_for_device(ctrl_info, device); 5284 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5285 pqi_device_reset_start(device); 5286 pqi_ctrl_unblock_requests(ctrl_info); 5287 5288 if (rc) 5289 rc = FAILED; 5290 else 5291 rc = pqi_device_reset(ctrl_info, device); 5292 5293 pqi_device_reset_done(device); 5294 5295 mutex_unlock(&ctrl_info->lun_reset_mutex); 5296 5297 out: 5298 dev_err(&ctrl_info->pci_dev->dev, 5299 "reset of scsi %d:%d:%d:%d: %s\n", 5300 shost->host_no, device->bus, device->target, device->lun, 5301 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5302 5303 return rc; 5304 } 5305 5306 static int pqi_slave_alloc(struct scsi_device *sdev) 5307 { 5308 struct pqi_scsi_dev *device; 5309 unsigned long flags; 5310 struct pqi_ctrl_info *ctrl_info; 5311 struct scsi_target *starget; 5312 struct sas_rphy *rphy; 5313 5314 ctrl_info = shost_to_hba(sdev->host); 5315 5316 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5317 5318 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5319 starget = scsi_target(sdev); 5320 rphy = target_to_rphy(starget); 5321 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5322 if (device) { 5323 device->target = sdev_id(sdev); 5324 device->lun = sdev->lun; 5325 device->target_lun_valid = true; 5326 } 5327 } else { 5328 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5329 sdev_id(sdev), sdev->lun); 5330 } 5331 5332 if (device) { 5333 sdev->hostdata = device; 5334 device->sdev = sdev; 5335 if (device->queue_depth) { 5336 device->advertised_queue_depth = device->queue_depth; 5337 scsi_change_queue_depth(sdev, 5338 device->advertised_queue_depth); 5339 } 5340 } 5341 5342 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5343 5344 return 0; 5345 } 5346 5347 static int pqi_map_queues(struct Scsi_Host *shost) 5348 { 5349 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5350 5351 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); 5352 } 5353 5354 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5355 void __user *arg) 5356 { 5357 struct pci_dev *pci_dev; 5358 u32 subsystem_vendor; 5359 u32 subsystem_device; 5360 cciss_pci_info_struct pciinfo; 5361 5362 if (!arg) 5363 return -EINVAL; 5364 5365 pci_dev = ctrl_info->pci_dev; 5366 5367 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5368 pciinfo.bus = pci_dev->bus->number; 5369 pciinfo.dev_fn = pci_dev->devfn; 5370 subsystem_vendor = pci_dev->subsystem_vendor; 5371 subsystem_device = pci_dev->subsystem_device; 5372 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5373 subsystem_vendor; 5374 5375 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5376 return -EFAULT; 5377 5378 return 0; 5379 } 5380 5381 static int pqi_getdrivver_ioctl(void __user *arg) 5382 { 5383 u32 version; 5384 5385 if (!arg) 5386 return -EINVAL; 5387 5388 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5389 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5390 5391 if (copy_to_user(arg, &version, sizeof(version))) 5392 return -EFAULT; 5393 5394 return 0; 5395 } 5396 5397 struct ciss_error_info { 5398 u8 scsi_status; 5399 int command_status; 5400 size_t sense_data_length; 5401 }; 5402 5403 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5404 struct ciss_error_info *ciss_error_info) 5405 { 5406 int ciss_cmd_status; 5407 size_t sense_data_length; 5408 5409 switch (pqi_error_info->data_out_result) { 5410 case PQI_DATA_IN_OUT_GOOD: 5411 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5412 break; 5413 case PQI_DATA_IN_OUT_UNDERFLOW: 5414 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5415 break; 5416 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5417 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5418 break; 5419 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5420 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5421 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5422 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5423 case PQI_DATA_IN_OUT_ERROR: 5424 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5425 break; 5426 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5427 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5428 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5429 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5430 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5431 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5432 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5433 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5434 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5435 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5436 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5437 break; 5438 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5439 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5440 break; 5441 case PQI_DATA_IN_OUT_ABORTED: 5442 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5443 break; 5444 case PQI_DATA_IN_OUT_TIMEOUT: 5445 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5446 break; 5447 default: 5448 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5449 break; 5450 } 5451 5452 sense_data_length = 5453 get_unaligned_le16(&pqi_error_info->sense_data_length); 5454 if (sense_data_length == 0) 5455 sense_data_length = 5456 get_unaligned_le16(&pqi_error_info->response_data_length); 5457 if (sense_data_length) 5458 if (sense_data_length > sizeof(pqi_error_info->data)) 5459 sense_data_length = sizeof(pqi_error_info->data); 5460 5461 ciss_error_info->scsi_status = pqi_error_info->status; 5462 ciss_error_info->command_status = ciss_cmd_status; 5463 ciss_error_info->sense_data_length = sense_data_length; 5464 } 5465 5466 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5467 { 5468 int rc; 5469 char *kernel_buffer = NULL; 5470 u16 iu_length; 5471 size_t sense_data_length; 5472 IOCTL_Command_struct iocommand; 5473 struct pqi_raid_path_request request; 5474 struct pqi_raid_error_info pqi_error_info; 5475 struct ciss_error_info ciss_error_info; 5476 5477 if (pqi_ctrl_offline(ctrl_info)) 5478 return -ENXIO; 5479 if (!arg) 5480 return -EINVAL; 5481 if (!capable(CAP_SYS_RAWIO)) 5482 return -EPERM; 5483 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5484 return -EFAULT; 5485 if (iocommand.buf_size < 1 && 5486 iocommand.Request.Type.Direction != XFER_NONE) 5487 return -EINVAL; 5488 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5489 return -EINVAL; 5490 if (iocommand.Request.Type.Type != TYPE_CMD) 5491 return -EINVAL; 5492 5493 switch (iocommand.Request.Type.Direction) { 5494 case XFER_NONE: 5495 case XFER_WRITE: 5496 case XFER_READ: 5497 case XFER_READ | XFER_WRITE: 5498 break; 5499 default: 5500 return -EINVAL; 5501 } 5502 5503 if (iocommand.buf_size > 0) { 5504 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5505 if (!kernel_buffer) 5506 return -ENOMEM; 5507 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5508 if (copy_from_user(kernel_buffer, iocommand.buf, 5509 iocommand.buf_size)) { 5510 rc = -EFAULT; 5511 goto out; 5512 } 5513 } else { 5514 memset(kernel_buffer, 0, iocommand.buf_size); 5515 } 5516 } 5517 5518 memset(&request, 0, sizeof(request)); 5519 5520 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5521 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5522 PQI_REQUEST_HEADER_LENGTH; 5523 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5524 sizeof(request.lun_number)); 5525 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5526 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5527 5528 switch (iocommand.Request.Type.Direction) { 5529 case XFER_NONE: 5530 request.data_direction = SOP_NO_DIRECTION_FLAG; 5531 break; 5532 case XFER_WRITE: 5533 request.data_direction = SOP_WRITE_FLAG; 5534 break; 5535 case XFER_READ: 5536 request.data_direction = SOP_READ_FLAG; 5537 break; 5538 case XFER_READ | XFER_WRITE: 5539 request.data_direction = SOP_BIDIRECTIONAL; 5540 break; 5541 } 5542 5543 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5544 5545 if (iocommand.buf_size > 0) { 5546 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5547 5548 rc = pqi_map_single(ctrl_info->pci_dev, 5549 &request.sg_descriptors[0], kernel_buffer, 5550 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); 5551 if (rc) 5552 goto out; 5553 5554 iu_length += sizeof(request.sg_descriptors[0]); 5555 } 5556 5557 put_unaligned_le16(iu_length, &request.header.iu_length); 5558 5559 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 5560 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 5561 5562 if (iocommand.buf_size > 0) 5563 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5564 PCI_DMA_BIDIRECTIONAL); 5565 5566 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5567 5568 if (rc == 0) { 5569 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 5570 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 5571 iocommand.error_info.CommandStatus = 5572 ciss_error_info.command_status; 5573 sense_data_length = ciss_error_info.sense_data_length; 5574 if (sense_data_length) { 5575 if (sense_data_length > 5576 sizeof(iocommand.error_info.SenseInfo)) 5577 sense_data_length = 5578 sizeof(iocommand.error_info.SenseInfo); 5579 memcpy(iocommand.error_info.SenseInfo, 5580 pqi_error_info.data, sense_data_length); 5581 iocommand.error_info.SenseLen = sense_data_length; 5582 } 5583 } 5584 5585 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 5586 rc = -EFAULT; 5587 goto out; 5588 } 5589 5590 if (rc == 0 && iocommand.buf_size > 0 && 5591 (iocommand.Request.Type.Direction & XFER_READ)) { 5592 if (copy_to_user(iocommand.buf, kernel_buffer, 5593 iocommand.buf_size)) { 5594 rc = -EFAULT; 5595 } 5596 } 5597 5598 out: 5599 kfree(kernel_buffer); 5600 5601 return rc; 5602 } 5603 5604 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5605 { 5606 int rc; 5607 struct pqi_ctrl_info *ctrl_info; 5608 5609 ctrl_info = shost_to_hba(sdev->host); 5610 5611 switch (cmd) { 5612 case CCISS_DEREGDISK: 5613 case CCISS_REGNEWDISK: 5614 case CCISS_REGNEWD: 5615 rc = pqi_scan_scsi_devices(ctrl_info); 5616 break; 5617 case CCISS_GETPCIINFO: 5618 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 5619 break; 5620 case CCISS_GETDRIVVER: 5621 rc = pqi_getdrivver_ioctl(arg); 5622 break; 5623 case CCISS_PASSTHRU: 5624 rc = pqi_passthru_ioctl(ctrl_info, arg); 5625 break; 5626 default: 5627 rc = -EINVAL; 5628 break; 5629 } 5630 5631 return rc; 5632 } 5633 5634 static ssize_t pqi_version_show(struct device *dev, 5635 struct device_attribute *attr, char *buffer) 5636 { 5637 ssize_t count = 0; 5638 struct Scsi_Host *shost; 5639 struct pqi_ctrl_info *ctrl_info; 5640 5641 shost = class_to_shost(dev); 5642 ctrl_info = shost_to_hba(shost); 5643 5644 count += snprintf(buffer + count, PAGE_SIZE - count, 5645 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 5646 5647 count += snprintf(buffer + count, PAGE_SIZE - count, 5648 "firmware: %s\n", ctrl_info->firmware_version); 5649 5650 return count; 5651 } 5652 5653 static ssize_t pqi_host_rescan_store(struct device *dev, 5654 struct device_attribute *attr, const char *buffer, size_t count) 5655 { 5656 struct Scsi_Host *shost = class_to_shost(dev); 5657 5658 pqi_scan_start(shost); 5659 5660 return count; 5661 } 5662 5663 static ssize_t pqi_lockup_action_show(struct device *dev, 5664 struct device_attribute *attr, char *buffer) 5665 { 5666 int count = 0; 5667 unsigned int i; 5668 5669 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5670 if (pqi_lockup_actions[i].action == pqi_lockup_action) 5671 count += snprintf(buffer + count, PAGE_SIZE - count, 5672 "[%s] ", pqi_lockup_actions[i].name); 5673 else 5674 count += snprintf(buffer + count, PAGE_SIZE - count, 5675 "%s ", pqi_lockup_actions[i].name); 5676 } 5677 5678 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 5679 5680 return count; 5681 } 5682 5683 static ssize_t pqi_lockup_action_store(struct device *dev, 5684 struct device_attribute *attr, const char *buffer, size_t count) 5685 { 5686 unsigned int i; 5687 char *action_name; 5688 char action_name_buffer[32]; 5689 5690 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 5691 action_name = strstrip(action_name_buffer); 5692 5693 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5694 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 5695 pqi_lockup_action = pqi_lockup_actions[i].action; 5696 return count; 5697 } 5698 } 5699 5700 return -EINVAL; 5701 } 5702 5703 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 5704 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 5705 static DEVICE_ATTR(lockup_action, 0644, 5706 pqi_lockup_action_show, pqi_lockup_action_store); 5707 5708 static struct device_attribute *pqi_shost_attrs[] = { 5709 &dev_attr_version, 5710 &dev_attr_rescan, 5711 &dev_attr_lockup_action, 5712 NULL 5713 }; 5714 5715 static ssize_t pqi_sas_address_show(struct device *dev, 5716 struct device_attribute *attr, char *buffer) 5717 { 5718 struct pqi_ctrl_info *ctrl_info; 5719 struct scsi_device *sdev; 5720 struct pqi_scsi_dev *device; 5721 unsigned long flags; 5722 u64 sas_address; 5723 5724 sdev = to_scsi_device(dev); 5725 ctrl_info = shost_to_hba(sdev->host); 5726 5727 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5728 5729 device = sdev->hostdata; 5730 if (pqi_is_logical_device(device)) { 5731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5732 flags); 5733 return -ENODEV; 5734 } 5735 sas_address = device->sas_address; 5736 5737 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5738 5739 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 5740 } 5741 5742 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 5743 struct device_attribute *attr, char *buffer) 5744 { 5745 struct pqi_ctrl_info *ctrl_info; 5746 struct scsi_device *sdev; 5747 struct pqi_scsi_dev *device; 5748 unsigned long flags; 5749 5750 sdev = to_scsi_device(dev); 5751 ctrl_info = shost_to_hba(sdev->host); 5752 5753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5754 5755 device = sdev->hostdata; 5756 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 5757 buffer[1] = '\n'; 5758 buffer[2] = '\0'; 5759 5760 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5761 5762 return 2; 5763 } 5764 5765 static ssize_t pqi_raid_level_show(struct device *dev, 5766 struct device_attribute *attr, char *buffer) 5767 { 5768 struct pqi_ctrl_info *ctrl_info; 5769 struct scsi_device *sdev; 5770 struct pqi_scsi_dev *device; 5771 unsigned long flags; 5772 char *raid_level; 5773 5774 sdev = to_scsi_device(dev); 5775 ctrl_info = shost_to_hba(sdev->host); 5776 5777 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5778 5779 device = sdev->hostdata; 5780 5781 if (pqi_is_logical_device(device)) 5782 raid_level = pqi_raid_level_to_string(device->raid_level); 5783 else 5784 raid_level = "N/A"; 5785 5786 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5787 5788 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 5789 } 5790 5791 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 5792 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 5793 pqi_ssd_smart_path_enabled_show, NULL); 5794 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 5795 5796 static struct device_attribute *pqi_sdev_attrs[] = { 5797 &dev_attr_sas_address, 5798 &dev_attr_ssd_smart_path_enabled, 5799 &dev_attr_raid_level, 5800 NULL 5801 }; 5802 5803 static struct scsi_host_template pqi_driver_template = { 5804 .module = THIS_MODULE, 5805 .name = DRIVER_NAME_SHORT, 5806 .proc_name = DRIVER_NAME_SHORT, 5807 .queuecommand = pqi_scsi_queue_command, 5808 .scan_start = pqi_scan_start, 5809 .scan_finished = pqi_scan_finished, 5810 .this_id = -1, 5811 .use_clustering = ENABLE_CLUSTERING, 5812 .eh_device_reset_handler = pqi_eh_device_reset_handler, 5813 .ioctl = pqi_ioctl, 5814 .slave_alloc = pqi_slave_alloc, 5815 .map_queues = pqi_map_queues, 5816 .sdev_attrs = pqi_sdev_attrs, 5817 .shost_attrs = pqi_shost_attrs, 5818 }; 5819 5820 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 5821 { 5822 int rc; 5823 struct Scsi_Host *shost; 5824 5825 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 5826 if (!shost) { 5827 dev_err(&ctrl_info->pci_dev->dev, 5828 "scsi_host_alloc failed for controller %u\n", 5829 ctrl_info->ctrl_id); 5830 return -ENOMEM; 5831 } 5832 5833 shost->io_port = 0; 5834 shost->n_io_port = 0; 5835 shost->this_id = -1; 5836 shost->max_channel = PQI_MAX_BUS; 5837 shost->max_cmd_len = MAX_COMMAND_SIZE; 5838 shost->max_lun = ~0; 5839 shost->max_id = ~0; 5840 shost->max_sectors = ctrl_info->max_sectors; 5841 shost->can_queue = ctrl_info->scsi_ml_can_queue; 5842 shost->cmd_per_lun = shost->can_queue; 5843 shost->sg_tablesize = ctrl_info->sg_tablesize; 5844 shost->transportt = pqi_sas_transport_template; 5845 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 5846 shost->unique_id = shost->irq; 5847 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5848 shost->hostdata[0] = (unsigned long)ctrl_info; 5849 5850 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 5851 if (rc) { 5852 dev_err(&ctrl_info->pci_dev->dev, 5853 "scsi_add_host failed for controller %u\n", 5854 ctrl_info->ctrl_id); 5855 goto free_host; 5856 } 5857 5858 rc = pqi_add_sas_host(shost, ctrl_info); 5859 if (rc) { 5860 dev_err(&ctrl_info->pci_dev->dev, 5861 "add SAS host failed for controller %u\n", 5862 ctrl_info->ctrl_id); 5863 goto remove_host; 5864 } 5865 5866 ctrl_info->scsi_host = shost; 5867 5868 return 0; 5869 5870 remove_host: 5871 scsi_remove_host(shost); 5872 free_host: 5873 scsi_host_put(shost); 5874 5875 return rc; 5876 } 5877 5878 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 5879 { 5880 struct Scsi_Host *shost; 5881 5882 pqi_delete_sas_host(ctrl_info); 5883 5884 shost = ctrl_info->scsi_host; 5885 if (!shost) 5886 return; 5887 5888 scsi_remove_host(shost); 5889 scsi_host_put(shost); 5890 } 5891 5892 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 5893 { 5894 int rc = 0; 5895 struct pqi_device_registers __iomem *pqi_registers; 5896 unsigned long timeout; 5897 unsigned int timeout_msecs; 5898 union pqi_reset_register reset_reg; 5899 5900 pqi_registers = ctrl_info->pqi_registers; 5901 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 5902 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 5903 5904 while (1) { 5905 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 5906 reset_reg.all_bits = readl(&pqi_registers->device_reset); 5907 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 5908 break; 5909 pqi_check_ctrl_health(ctrl_info); 5910 if (pqi_ctrl_offline(ctrl_info)) { 5911 rc = -ENXIO; 5912 break; 5913 } 5914 if (time_after(jiffies, timeout)) { 5915 rc = -ETIMEDOUT; 5916 break; 5917 } 5918 } 5919 5920 return rc; 5921 } 5922 5923 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 5924 { 5925 int rc; 5926 union pqi_reset_register reset_reg; 5927 5928 if (ctrl_info->pqi_reset_quiesce_supported) { 5929 rc = sis_pqi_reset_quiesce(ctrl_info); 5930 if (rc) { 5931 dev_err(&ctrl_info->pci_dev->dev, 5932 "PQI reset failed during quiesce with error %d\n", 5933 rc); 5934 return rc; 5935 } 5936 } 5937 5938 reset_reg.all_bits = 0; 5939 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 5940 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 5941 5942 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 5943 5944 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 5945 if (rc) 5946 dev_err(&ctrl_info->pci_dev->dev, 5947 "PQI reset failed with error %d\n", rc); 5948 5949 return rc; 5950 } 5951 5952 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 5953 { 5954 int rc; 5955 struct bmic_identify_controller *identify; 5956 5957 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 5958 if (!identify) 5959 return -ENOMEM; 5960 5961 rc = pqi_identify_controller(ctrl_info, identify); 5962 if (rc) 5963 goto out; 5964 5965 memcpy(ctrl_info->firmware_version, identify->firmware_version, 5966 sizeof(identify->firmware_version)); 5967 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 5968 snprintf(ctrl_info->firmware_version + 5969 strlen(ctrl_info->firmware_version), 5970 sizeof(ctrl_info->firmware_version), 5971 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 5972 5973 out: 5974 kfree(identify); 5975 5976 return rc; 5977 } 5978 5979 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 5980 { 5981 u32 table_length; 5982 u32 section_offset; 5983 void __iomem *table_iomem_addr; 5984 struct pqi_config_table *config_table; 5985 struct pqi_config_table_section_header *section; 5986 5987 table_length = ctrl_info->config_table_length; 5988 5989 config_table = kmalloc(table_length, GFP_KERNEL); 5990 if (!config_table) { 5991 dev_err(&ctrl_info->pci_dev->dev, 5992 "failed to allocate memory for PQI configuration table\n"); 5993 return -ENOMEM; 5994 } 5995 5996 /* 5997 * Copy the config table contents from I/O memory space into the 5998 * temporary buffer. 5999 */ 6000 table_iomem_addr = ctrl_info->iomem_base + 6001 ctrl_info->config_table_offset; 6002 memcpy_fromio(config_table, table_iomem_addr, table_length); 6003 6004 section_offset = 6005 get_unaligned_le32(&config_table->first_section_offset); 6006 6007 while (section_offset) { 6008 section = (void *)config_table + section_offset; 6009 6010 switch (get_unaligned_le16(§ion->section_id)) { 6011 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 6012 if (pqi_disable_heartbeat) 6013 dev_warn(&ctrl_info->pci_dev->dev, 6014 "heartbeat disabled by module parameter\n"); 6015 else 6016 ctrl_info->heartbeat_counter = 6017 table_iomem_addr + 6018 section_offset + 6019 offsetof( 6020 struct pqi_config_table_heartbeat, 6021 heartbeat_counter); 6022 break; 6023 } 6024 6025 section_offset = 6026 get_unaligned_le16(§ion->next_section_offset); 6027 } 6028 6029 kfree(config_table); 6030 6031 return 0; 6032 } 6033 6034 /* Switches the controller from PQI mode back into SIS mode. */ 6035 6036 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6037 { 6038 int rc; 6039 6040 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6041 rc = pqi_reset(ctrl_info); 6042 if (rc) 6043 return rc; 6044 rc = sis_reenable_sis_mode(ctrl_info); 6045 if (rc) { 6046 dev_err(&ctrl_info->pci_dev->dev, 6047 "re-enabling SIS mode failed with error %d\n", rc); 6048 return rc; 6049 } 6050 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6051 6052 return 0; 6053 } 6054 6055 /* 6056 * If the controller isn't already in SIS mode, this function forces it into 6057 * SIS mode. 6058 */ 6059 6060 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6061 { 6062 if (!sis_is_firmware_running(ctrl_info)) 6063 return -ENXIO; 6064 6065 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6066 return 0; 6067 6068 if (sis_is_kernel_up(ctrl_info)) { 6069 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6070 return 0; 6071 } 6072 6073 return pqi_revert_to_sis_mode(ctrl_info); 6074 } 6075 6076 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6077 { 6078 int rc; 6079 6080 rc = pqi_force_sis_mode(ctrl_info); 6081 if (rc) 6082 return rc; 6083 6084 /* 6085 * Wait until the controller is ready to start accepting SIS 6086 * commands. 6087 */ 6088 rc = sis_wait_for_ctrl_ready(ctrl_info); 6089 if (rc) 6090 return rc; 6091 6092 /* 6093 * Get the controller properties. This allows us to determine 6094 * whether or not it supports PQI mode. 6095 */ 6096 rc = sis_get_ctrl_properties(ctrl_info); 6097 if (rc) { 6098 dev_err(&ctrl_info->pci_dev->dev, 6099 "error obtaining controller properties\n"); 6100 return rc; 6101 } 6102 6103 rc = sis_get_pqi_capabilities(ctrl_info); 6104 if (rc) { 6105 dev_err(&ctrl_info->pci_dev->dev, 6106 "error obtaining controller capabilities\n"); 6107 return rc; 6108 } 6109 6110 if (reset_devices) { 6111 if (ctrl_info->max_outstanding_requests > 6112 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6113 ctrl_info->max_outstanding_requests = 6114 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6115 } else { 6116 if (ctrl_info->max_outstanding_requests > 6117 PQI_MAX_OUTSTANDING_REQUESTS) 6118 ctrl_info->max_outstanding_requests = 6119 PQI_MAX_OUTSTANDING_REQUESTS; 6120 } 6121 6122 pqi_calculate_io_resources(ctrl_info); 6123 6124 rc = pqi_alloc_error_buffer(ctrl_info); 6125 if (rc) { 6126 dev_err(&ctrl_info->pci_dev->dev, 6127 "failed to allocate PQI error buffer\n"); 6128 return rc; 6129 } 6130 6131 /* 6132 * If the function we are about to call succeeds, the 6133 * controller will transition from legacy SIS mode 6134 * into PQI mode. 6135 */ 6136 rc = sis_init_base_struct_addr(ctrl_info); 6137 if (rc) { 6138 dev_err(&ctrl_info->pci_dev->dev, 6139 "error initializing PQI mode\n"); 6140 return rc; 6141 } 6142 6143 /* Wait for the controller to complete the SIS -> PQI transition. */ 6144 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6145 if (rc) { 6146 dev_err(&ctrl_info->pci_dev->dev, 6147 "transition to PQI mode failed\n"); 6148 return rc; 6149 } 6150 6151 /* From here on, we are running in PQI mode. */ 6152 ctrl_info->pqi_mode_enabled = true; 6153 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6154 6155 rc = pqi_process_config_table(ctrl_info); 6156 if (rc) 6157 return rc; 6158 6159 rc = pqi_alloc_admin_queues(ctrl_info); 6160 if (rc) { 6161 dev_err(&ctrl_info->pci_dev->dev, 6162 "failed to allocate admin queues\n"); 6163 return rc; 6164 } 6165 6166 rc = pqi_create_admin_queues(ctrl_info); 6167 if (rc) { 6168 dev_err(&ctrl_info->pci_dev->dev, 6169 "error creating admin queues\n"); 6170 return rc; 6171 } 6172 6173 rc = pqi_report_device_capability(ctrl_info); 6174 if (rc) { 6175 dev_err(&ctrl_info->pci_dev->dev, 6176 "obtaining device capability failed\n"); 6177 return rc; 6178 } 6179 6180 rc = pqi_validate_device_capability(ctrl_info); 6181 if (rc) 6182 return rc; 6183 6184 pqi_calculate_queue_resources(ctrl_info); 6185 6186 rc = pqi_enable_msix_interrupts(ctrl_info); 6187 if (rc) 6188 return rc; 6189 6190 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 6191 ctrl_info->max_msix_vectors = 6192 ctrl_info->num_msix_vectors_enabled; 6193 pqi_calculate_queue_resources(ctrl_info); 6194 } 6195 6196 rc = pqi_alloc_io_resources(ctrl_info); 6197 if (rc) 6198 return rc; 6199 6200 rc = pqi_alloc_operational_queues(ctrl_info); 6201 if (rc) { 6202 dev_err(&ctrl_info->pci_dev->dev, 6203 "failed to allocate operational queues\n"); 6204 return rc; 6205 } 6206 6207 pqi_init_operational_queues(ctrl_info); 6208 6209 rc = pqi_request_irqs(ctrl_info); 6210 if (rc) 6211 return rc; 6212 6213 rc = pqi_create_queues(ctrl_info); 6214 if (rc) 6215 return rc; 6216 6217 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6218 6219 ctrl_info->controller_online = true; 6220 pqi_start_heartbeat_timer(ctrl_info); 6221 6222 rc = pqi_enable_events(ctrl_info); 6223 if (rc) { 6224 dev_err(&ctrl_info->pci_dev->dev, 6225 "error enabling events\n"); 6226 return rc; 6227 } 6228 6229 /* Register with the SCSI subsystem. */ 6230 rc = pqi_register_scsi(ctrl_info); 6231 if (rc) 6232 return rc; 6233 6234 rc = pqi_get_ctrl_firmware_version(ctrl_info); 6235 if (rc) { 6236 dev_err(&ctrl_info->pci_dev->dev, 6237 "error obtaining firmware version\n"); 6238 return rc; 6239 } 6240 6241 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6242 if (rc) { 6243 dev_err(&ctrl_info->pci_dev->dev, 6244 "error updating host wellness\n"); 6245 return rc; 6246 } 6247 6248 pqi_schedule_update_time_worker(ctrl_info); 6249 6250 pqi_scan_scsi_devices(ctrl_info); 6251 6252 return 0; 6253 } 6254 6255 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 6256 { 6257 unsigned int i; 6258 struct pqi_admin_queues *admin_queues; 6259 struct pqi_event_queue *event_queue; 6260 6261 admin_queues = &ctrl_info->admin_queues; 6262 admin_queues->iq_pi_copy = 0; 6263 admin_queues->oq_ci_copy = 0; 6264 *admin_queues->oq_pi = 0; 6265 6266 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6267 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 6268 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 6269 ctrl_info->queue_groups[i].oq_ci_copy = 0; 6270 6271 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; 6272 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; 6273 *ctrl_info->queue_groups[i].oq_pi = 0; 6274 } 6275 6276 event_queue = &ctrl_info->event_queue; 6277 *event_queue->oq_pi = 0; 6278 event_queue->oq_ci_copy = 0; 6279 } 6280 6281 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 6282 { 6283 int rc; 6284 6285 rc = pqi_force_sis_mode(ctrl_info); 6286 if (rc) 6287 return rc; 6288 6289 /* 6290 * Wait until the controller is ready to start accepting SIS 6291 * commands. 6292 */ 6293 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 6294 if (rc) 6295 return rc; 6296 6297 /* 6298 * If the function we are about to call succeeds, the 6299 * controller will transition from legacy SIS mode 6300 * into PQI mode. 6301 */ 6302 rc = sis_init_base_struct_addr(ctrl_info); 6303 if (rc) { 6304 dev_err(&ctrl_info->pci_dev->dev, 6305 "error initializing PQI mode\n"); 6306 return rc; 6307 } 6308 6309 /* Wait for the controller to complete the SIS -> PQI transition. */ 6310 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6311 if (rc) { 6312 dev_err(&ctrl_info->pci_dev->dev, 6313 "transition to PQI mode failed\n"); 6314 return rc; 6315 } 6316 6317 /* From here on, we are running in PQI mode. */ 6318 ctrl_info->pqi_mode_enabled = true; 6319 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6320 6321 pqi_reinit_queues(ctrl_info); 6322 6323 rc = pqi_create_admin_queues(ctrl_info); 6324 if (rc) { 6325 dev_err(&ctrl_info->pci_dev->dev, 6326 "error creating admin queues\n"); 6327 return rc; 6328 } 6329 6330 rc = pqi_create_queues(ctrl_info); 6331 if (rc) 6332 return rc; 6333 6334 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6335 6336 ctrl_info->controller_online = true; 6337 pqi_start_heartbeat_timer(ctrl_info); 6338 pqi_ctrl_unblock_requests(ctrl_info); 6339 6340 rc = pqi_enable_events(ctrl_info); 6341 if (rc) { 6342 dev_err(&ctrl_info->pci_dev->dev, 6343 "error enabling events\n"); 6344 return rc; 6345 } 6346 6347 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6348 if (rc) { 6349 dev_err(&ctrl_info->pci_dev->dev, 6350 "error updating host wellness\n"); 6351 return rc; 6352 } 6353 6354 pqi_schedule_update_time_worker(ctrl_info); 6355 6356 pqi_scan_scsi_devices(ctrl_info); 6357 6358 return 0; 6359 } 6360 6361 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 6362 u16 timeout) 6363 { 6364 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 6365 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 6366 } 6367 6368 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 6369 { 6370 int rc; 6371 u64 mask; 6372 6373 rc = pci_enable_device(ctrl_info->pci_dev); 6374 if (rc) { 6375 dev_err(&ctrl_info->pci_dev->dev, 6376 "failed to enable PCI device\n"); 6377 return rc; 6378 } 6379 6380 if (sizeof(dma_addr_t) > 4) 6381 mask = DMA_BIT_MASK(64); 6382 else 6383 mask = DMA_BIT_MASK(32); 6384 6385 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 6386 if (rc) { 6387 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 6388 goto disable_device; 6389 } 6390 6391 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 6392 if (rc) { 6393 dev_err(&ctrl_info->pci_dev->dev, 6394 "failed to obtain PCI resources\n"); 6395 goto disable_device; 6396 } 6397 6398 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 6399 ctrl_info->pci_dev, 0), 6400 sizeof(struct pqi_ctrl_registers)); 6401 if (!ctrl_info->iomem_base) { 6402 dev_err(&ctrl_info->pci_dev->dev, 6403 "failed to map memory for controller registers\n"); 6404 rc = -ENOMEM; 6405 goto release_regions; 6406 } 6407 6408 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 6409 6410 /* Increase the PCIe completion timeout. */ 6411 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 6412 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 6413 if (rc) { 6414 dev_err(&ctrl_info->pci_dev->dev, 6415 "failed to set PCIe completion timeout\n"); 6416 goto release_regions; 6417 } 6418 6419 /* Enable bus mastering. */ 6420 pci_set_master(ctrl_info->pci_dev); 6421 6422 ctrl_info->registers = ctrl_info->iomem_base; 6423 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 6424 6425 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 6426 6427 return 0; 6428 6429 release_regions: 6430 pci_release_regions(ctrl_info->pci_dev); 6431 disable_device: 6432 pci_disable_device(ctrl_info->pci_dev); 6433 6434 return rc; 6435 } 6436 6437 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 6438 { 6439 iounmap(ctrl_info->iomem_base); 6440 pci_release_regions(ctrl_info->pci_dev); 6441 if (pci_is_enabled(ctrl_info->pci_dev)) 6442 pci_disable_device(ctrl_info->pci_dev); 6443 pci_set_drvdata(ctrl_info->pci_dev, NULL); 6444 } 6445 6446 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 6447 { 6448 struct pqi_ctrl_info *ctrl_info; 6449 6450 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 6451 GFP_KERNEL, numa_node); 6452 if (!ctrl_info) 6453 return NULL; 6454 6455 mutex_init(&ctrl_info->scan_mutex); 6456 mutex_init(&ctrl_info->lun_reset_mutex); 6457 6458 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 6459 spin_lock_init(&ctrl_info->scsi_device_list_lock); 6460 6461 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 6462 atomic_set(&ctrl_info->num_interrupts, 0); 6463 6464 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6465 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6466 6467 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 6468 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6469 6470 sema_init(&ctrl_info->sync_request_sem, 6471 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 6472 init_waitqueue_head(&ctrl_info->block_requests_wait); 6473 6474 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 6475 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 6476 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 6477 pqi_raid_bypass_retry_worker); 6478 6479 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 6480 ctrl_info->irq_mode = IRQ_MODE_NONE; 6481 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 6482 6483 return ctrl_info; 6484 } 6485 6486 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 6487 { 6488 kfree(ctrl_info); 6489 } 6490 6491 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 6492 { 6493 pqi_free_irqs(ctrl_info); 6494 pqi_disable_msix_interrupts(ctrl_info); 6495 } 6496 6497 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 6498 { 6499 pqi_stop_heartbeat_timer(ctrl_info); 6500 pqi_free_interrupts(ctrl_info); 6501 if (ctrl_info->queue_memory_base) 6502 dma_free_coherent(&ctrl_info->pci_dev->dev, 6503 ctrl_info->queue_memory_length, 6504 ctrl_info->queue_memory_base, 6505 ctrl_info->queue_memory_base_dma_handle); 6506 if (ctrl_info->admin_queue_memory_base) 6507 dma_free_coherent(&ctrl_info->pci_dev->dev, 6508 ctrl_info->admin_queue_memory_length, 6509 ctrl_info->admin_queue_memory_base, 6510 ctrl_info->admin_queue_memory_base_dma_handle); 6511 pqi_free_all_io_requests(ctrl_info); 6512 if (ctrl_info->error_buffer) 6513 dma_free_coherent(&ctrl_info->pci_dev->dev, 6514 ctrl_info->error_buffer_length, 6515 ctrl_info->error_buffer, 6516 ctrl_info->error_buffer_dma_handle); 6517 if (ctrl_info->iomem_base) 6518 pqi_cleanup_pci_init(ctrl_info); 6519 pqi_free_ctrl_info(ctrl_info); 6520 } 6521 6522 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 6523 { 6524 pqi_cancel_rescan_worker(ctrl_info); 6525 pqi_cancel_update_time_worker(ctrl_info); 6526 pqi_remove_all_scsi_devices(ctrl_info); 6527 pqi_unregister_scsi(ctrl_info); 6528 if (ctrl_info->pqi_mode_enabled) 6529 pqi_revert_to_sis_mode(ctrl_info); 6530 pqi_free_ctrl_resources(ctrl_info); 6531 } 6532 6533 static void pqi_perform_lockup_action(void) 6534 { 6535 switch (pqi_lockup_action) { 6536 case PANIC: 6537 panic("FATAL: Smart Family Controller lockup detected"); 6538 break; 6539 case REBOOT: 6540 emergency_restart(); 6541 break; 6542 case NONE: 6543 default: 6544 break; 6545 } 6546 } 6547 6548 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 6549 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 6550 .status = SAM_STAT_CHECK_CONDITION, 6551 }; 6552 6553 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 6554 { 6555 unsigned int i; 6556 struct pqi_io_request *io_request; 6557 struct scsi_cmnd *scmd; 6558 6559 for (i = 0; i < ctrl_info->max_io_slots; i++) { 6560 io_request = &ctrl_info->io_request_pool[i]; 6561 if (atomic_read(&io_request->refcount) == 0) 6562 continue; 6563 6564 scmd = io_request->scmd; 6565 if (scmd) { 6566 set_host_byte(scmd, DID_NO_CONNECT); 6567 } else { 6568 io_request->status = -ENXIO; 6569 io_request->error_info = 6570 &pqi_ctrl_offline_raid_error_info; 6571 } 6572 6573 io_request->io_complete_callback(io_request, 6574 io_request->context); 6575 } 6576 } 6577 6578 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 6579 { 6580 pqi_perform_lockup_action(); 6581 pqi_stop_heartbeat_timer(ctrl_info); 6582 pqi_free_interrupts(ctrl_info); 6583 pqi_cancel_rescan_worker(ctrl_info); 6584 pqi_cancel_update_time_worker(ctrl_info); 6585 pqi_ctrl_wait_until_quiesced(ctrl_info); 6586 pqi_fail_all_outstanding_requests(ctrl_info); 6587 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 6588 pqi_ctrl_unblock_requests(ctrl_info); 6589 } 6590 6591 static void pqi_ctrl_offline_worker(struct work_struct *work) 6592 { 6593 struct pqi_ctrl_info *ctrl_info; 6594 6595 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 6596 pqi_take_ctrl_offline_deferred(ctrl_info); 6597 } 6598 6599 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 6600 { 6601 if (!ctrl_info->controller_online) 6602 return; 6603 6604 ctrl_info->controller_online = false; 6605 ctrl_info->pqi_mode_enabled = false; 6606 pqi_ctrl_block_requests(ctrl_info); 6607 if (!pqi_disable_ctrl_shutdown) 6608 sis_shutdown_ctrl(ctrl_info); 6609 pci_disable_device(ctrl_info->pci_dev); 6610 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 6611 schedule_work(&ctrl_info->ctrl_offline_work); 6612 } 6613 6614 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 6615 const struct pci_device_id *id) 6616 { 6617 char *ctrl_description; 6618 6619 if (id->driver_data) 6620 ctrl_description = (char *)id->driver_data; 6621 else 6622 ctrl_description = "Microsemi Smart Family Controller"; 6623 6624 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 6625 } 6626 6627 static int pqi_pci_probe(struct pci_dev *pci_dev, 6628 const struct pci_device_id *id) 6629 { 6630 int rc; 6631 int node; 6632 struct pqi_ctrl_info *ctrl_info; 6633 6634 pqi_print_ctrl_info(pci_dev, id); 6635 6636 if (pqi_disable_device_id_wildcards && 6637 id->subvendor == PCI_ANY_ID && 6638 id->subdevice == PCI_ANY_ID) { 6639 dev_warn(&pci_dev->dev, 6640 "controller not probed because device ID wildcards are disabled\n"); 6641 return -ENODEV; 6642 } 6643 6644 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 6645 dev_warn(&pci_dev->dev, 6646 "controller device ID matched using wildcards\n"); 6647 6648 node = dev_to_node(&pci_dev->dev); 6649 if (node == NUMA_NO_NODE) 6650 set_dev_node(&pci_dev->dev, 0); 6651 6652 ctrl_info = pqi_alloc_ctrl_info(node); 6653 if (!ctrl_info) { 6654 dev_err(&pci_dev->dev, 6655 "failed to allocate controller info block\n"); 6656 return -ENOMEM; 6657 } 6658 6659 ctrl_info->pci_dev = pci_dev; 6660 6661 rc = pqi_pci_init(ctrl_info); 6662 if (rc) 6663 goto error; 6664 6665 rc = pqi_ctrl_init(ctrl_info); 6666 if (rc) 6667 goto error; 6668 6669 return 0; 6670 6671 error: 6672 pqi_remove_ctrl(ctrl_info); 6673 6674 return rc; 6675 } 6676 6677 static void pqi_pci_remove(struct pci_dev *pci_dev) 6678 { 6679 struct pqi_ctrl_info *ctrl_info; 6680 6681 ctrl_info = pci_get_drvdata(pci_dev); 6682 if (!ctrl_info) 6683 return; 6684 6685 pqi_remove_ctrl(ctrl_info); 6686 } 6687 6688 static void pqi_shutdown(struct pci_dev *pci_dev) 6689 { 6690 int rc; 6691 struct pqi_ctrl_info *ctrl_info; 6692 6693 ctrl_info = pci_get_drvdata(pci_dev); 6694 if (!ctrl_info) 6695 goto error; 6696 6697 /* 6698 * Write all data in the controller's battery-backed cache to 6699 * storage. 6700 */ 6701 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 6702 pqi_reset(ctrl_info); 6703 if (rc == 0) 6704 return; 6705 6706 error: 6707 dev_warn(&pci_dev->dev, 6708 "unable to flush controller cache\n"); 6709 } 6710 6711 static void pqi_process_lockup_action_param(void) 6712 { 6713 unsigned int i; 6714 6715 if (!pqi_lockup_action_param) 6716 return; 6717 6718 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6719 if (strcmp(pqi_lockup_action_param, 6720 pqi_lockup_actions[i].name) == 0) { 6721 pqi_lockup_action = pqi_lockup_actions[i].action; 6722 return; 6723 } 6724 } 6725 6726 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 6727 DRIVER_NAME_SHORT, pqi_lockup_action_param); 6728 } 6729 6730 static void pqi_process_module_params(void) 6731 { 6732 pqi_process_lockup_action_param(); 6733 } 6734 6735 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 6736 { 6737 struct pqi_ctrl_info *ctrl_info; 6738 6739 ctrl_info = pci_get_drvdata(pci_dev); 6740 6741 pqi_disable_events(ctrl_info); 6742 pqi_cancel_update_time_worker(ctrl_info); 6743 pqi_cancel_rescan_worker(ctrl_info); 6744 pqi_wait_until_scan_finished(ctrl_info); 6745 pqi_wait_until_lun_reset_finished(ctrl_info); 6746 pqi_flush_cache(ctrl_info, SUSPEND); 6747 pqi_ctrl_block_requests(ctrl_info); 6748 pqi_ctrl_wait_until_quiesced(ctrl_info); 6749 pqi_wait_until_inbound_queues_empty(ctrl_info); 6750 pqi_ctrl_wait_for_pending_io(ctrl_info); 6751 pqi_stop_heartbeat_timer(ctrl_info); 6752 6753 if (state.event == PM_EVENT_FREEZE) 6754 return 0; 6755 6756 pci_save_state(pci_dev); 6757 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 6758 6759 ctrl_info->controller_online = false; 6760 ctrl_info->pqi_mode_enabled = false; 6761 6762 return 0; 6763 } 6764 6765 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 6766 { 6767 int rc; 6768 struct pqi_ctrl_info *ctrl_info; 6769 6770 ctrl_info = pci_get_drvdata(pci_dev); 6771 6772 if (pci_dev->current_state != PCI_D0) { 6773 ctrl_info->max_hw_queue_index = 0; 6774 pqi_free_interrupts(ctrl_info); 6775 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 6776 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 6777 IRQF_SHARED, DRIVER_NAME_SHORT, 6778 &ctrl_info->queue_groups[0]); 6779 if (rc) { 6780 dev_err(&ctrl_info->pci_dev->dev, 6781 "irq %u init failed with error %d\n", 6782 pci_dev->irq, rc); 6783 return rc; 6784 } 6785 pqi_start_heartbeat_timer(ctrl_info); 6786 pqi_ctrl_unblock_requests(ctrl_info); 6787 return 0; 6788 } 6789 6790 pci_set_power_state(pci_dev, PCI_D0); 6791 pci_restore_state(pci_dev); 6792 6793 return pqi_ctrl_init_resume(ctrl_info); 6794 } 6795 6796 /* Define the PCI IDs for the controllers that we support. */ 6797 static const struct pci_device_id pqi_pci_id_table[] = { 6798 { 6799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6800 0x152d, 0x8a22) 6801 }, 6802 { 6803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6804 0x152d, 0x8a23) 6805 }, 6806 { 6807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6808 0x152d, 0x8a24) 6809 }, 6810 { 6811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6812 0x152d, 0x8a36) 6813 }, 6814 { 6815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6816 0x152d, 0x8a37) 6817 }, 6818 { 6819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6820 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 6821 }, 6822 { 6823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6824 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 6825 }, 6826 { 6827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6828 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 6829 }, 6830 { 6831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6832 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 6833 }, 6834 { 6835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6836 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 6837 }, 6838 { 6839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6840 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 6841 }, 6842 { 6843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6844 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 6845 }, 6846 { 6847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6848 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 6849 }, 6850 { 6851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6852 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 6853 }, 6854 { 6855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6856 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 6857 }, 6858 { 6859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6860 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 6861 }, 6862 { 6863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6864 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 6865 }, 6866 { 6867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6868 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 6869 }, 6870 { 6871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6872 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 6873 }, 6874 { 6875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6876 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 6877 }, 6878 { 6879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6880 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 6881 }, 6882 { 6883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6884 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 6885 }, 6886 { 6887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6888 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 6889 }, 6890 { 6891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6892 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 6893 }, 6894 { 6895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6896 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 6897 }, 6898 { 6899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6900 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 6901 }, 6902 { 6903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6904 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 6905 }, 6906 { 6907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6908 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 6909 }, 6910 { 6911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6912 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 6913 }, 6914 { 6915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6916 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 6917 }, 6918 { 6919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6920 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 6921 }, 6922 { 6923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6924 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 6925 }, 6926 { 6927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6928 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 6929 }, 6930 { 6931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6932 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 6933 }, 6934 { 6935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6936 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 6937 }, 6938 { 6939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6940 PCI_VENDOR_ID_DELL, 0x1fe0) 6941 }, 6942 { 6943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6944 PCI_VENDOR_ID_HP, 0x0600) 6945 }, 6946 { 6947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6948 PCI_VENDOR_ID_HP, 0x0601) 6949 }, 6950 { 6951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6952 PCI_VENDOR_ID_HP, 0x0602) 6953 }, 6954 { 6955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6956 PCI_VENDOR_ID_HP, 0x0603) 6957 }, 6958 { 6959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6960 PCI_VENDOR_ID_HP, 0x0609) 6961 }, 6962 { 6963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6964 PCI_VENDOR_ID_HP, 0x0650) 6965 }, 6966 { 6967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6968 PCI_VENDOR_ID_HP, 0x0651) 6969 }, 6970 { 6971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6972 PCI_VENDOR_ID_HP, 0x0652) 6973 }, 6974 { 6975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6976 PCI_VENDOR_ID_HP, 0x0653) 6977 }, 6978 { 6979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6980 PCI_VENDOR_ID_HP, 0x0654) 6981 }, 6982 { 6983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6984 PCI_VENDOR_ID_HP, 0x0655) 6985 }, 6986 { 6987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6988 PCI_VENDOR_ID_HP, 0x0700) 6989 }, 6990 { 6991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6992 PCI_VENDOR_ID_HP, 0x0701) 6993 }, 6994 { 6995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6996 PCI_VENDOR_ID_HP, 0x1001) 6997 }, 6998 { 6999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7000 PCI_VENDOR_ID_HP, 0x1100) 7001 }, 7002 { 7003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7004 PCI_VENDOR_ID_HP, 0x1101) 7005 }, 7006 { 7007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7008 PCI_ANY_ID, PCI_ANY_ID) 7009 }, 7010 { 0 } 7011 }; 7012 7013 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 7014 7015 static struct pci_driver pqi_pci_driver = { 7016 .name = DRIVER_NAME_SHORT, 7017 .id_table = pqi_pci_id_table, 7018 .probe = pqi_pci_probe, 7019 .remove = pqi_pci_remove, 7020 .shutdown = pqi_shutdown, 7021 #if defined(CONFIG_PM) 7022 .suspend = pqi_suspend, 7023 .resume = pqi_resume, 7024 #endif 7025 }; 7026 7027 static int __init pqi_init(void) 7028 { 7029 int rc; 7030 7031 pr_info(DRIVER_NAME "\n"); 7032 7033 pqi_sas_transport_template = 7034 sas_attach_transport(&pqi_sas_transport_functions); 7035 if (!pqi_sas_transport_template) 7036 return -ENODEV; 7037 7038 pqi_process_module_params(); 7039 7040 rc = pci_register_driver(&pqi_pci_driver); 7041 if (rc) 7042 sas_release_transport(pqi_sas_transport_template); 7043 7044 return rc; 7045 } 7046 7047 static void __exit pqi_cleanup(void) 7048 { 7049 pci_unregister_driver(&pqi_pci_driver); 7050 sas_release_transport(pqi_sas_transport_template); 7051 } 7052 7053 module_init(pqi_init); 7054 module_exit(pqi_cleanup); 7055 7056 static void __attribute__((unused)) verify_structures(void) 7057 { 7058 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7059 sis_host_to_ctrl_doorbell) != 0x20); 7060 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7061 sis_interrupt_mask) != 0x34); 7062 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7063 sis_ctrl_to_host_doorbell) != 0x9c); 7064 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7065 sis_ctrl_to_host_doorbell_clear) != 0xa0); 7066 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7067 sis_driver_scratch) != 0xb0); 7068 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7069 sis_firmware_status) != 0xbc); 7070 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7071 sis_mailbox) != 0x1000); 7072 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7073 pqi_registers) != 0x4000); 7074 7075 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7076 iu_type) != 0x0); 7077 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7078 iu_length) != 0x2); 7079 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7080 response_queue_id) != 0x4); 7081 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7082 work_area) != 0x6); 7083 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 7084 7085 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7086 status) != 0x0); 7087 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7088 service_response) != 0x1); 7089 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7090 data_present) != 0x2); 7091 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7092 reserved) != 0x3); 7093 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7094 residual_count) != 0x4); 7095 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7096 data_length) != 0x8); 7097 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7098 reserved1) != 0xa); 7099 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7100 data) != 0xc); 7101 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 7102 7103 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7104 data_in_result) != 0x0); 7105 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7106 data_out_result) != 0x1); 7107 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7108 reserved) != 0x2); 7109 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7110 status) != 0x5); 7111 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7112 status_qualifier) != 0x6); 7113 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7114 sense_data_length) != 0x8); 7115 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7116 response_data_length) != 0xa); 7117 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7118 data_in_transferred) != 0xc); 7119 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7120 data_out_transferred) != 0x10); 7121 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7122 data) != 0x14); 7123 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 7124 7125 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7126 signature) != 0x0); 7127 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7128 function_and_status_code) != 0x8); 7129 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7130 max_admin_iq_elements) != 0x10); 7131 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7132 max_admin_oq_elements) != 0x11); 7133 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7134 admin_iq_element_length) != 0x12); 7135 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7136 admin_oq_element_length) != 0x13); 7137 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7138 max_reset_timeout) != 0x14); 7139 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7140 legacy_intx_status) != 0x18); 7141 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7142 legacy_intx_mask_set) != 0x1c); 7143 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7144 legacy_intx_mask_clear) != 0x20); 7145 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7146 device_status) != 0x40); 7147 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7148 admin_iq_pi_offset) != 0x48); 7149 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7150 admin_oq_ci_offset) != 0x50); 7151 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7152 admin_iq_element_array_addr) != 0x58); 7153 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7154 admin_oq_element_array_addr) != 0x60); 7155 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7156 admin_iq_ci_addr) != 0x68); 7157 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7158 admin_oq_pi_addr) != 0x70); 7159 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7160 admin_iq_num_elements) != 0x78); 7161 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7162 admin_oq_num_elements) != 0x79); 7163 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7164 admin_queue_int_msg_num) != 0x7a); 7165 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7166 device_error) != 0x80); 7167 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7168 error_details) != 0x88); 7169 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7170 device_reset) != 0x90); 7171 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7172 power_action) != 0x94); 7173 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 7174 7175 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7176 header.iu_type) != 0); 7177 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7178 header.iu_length) != 2); 7179 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7180 header.work_area) != 6); 7181 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7182 request_id) != 8); 7183 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7184 function_code) != 10); 7185 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7186 data.report_device_capability.buffer_length) != 44); 7187 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7188 data.report_device_capability.sg_descriptor) != 48); 7189 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7190 data.create_operational_iq.queue_id) != 12); 7191 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7192 data.create_operational_iq.element_array_addr) != 16); 7193 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7194 data.create_operational_iq.ci_addr) != 24); 7195 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7196 data.create_operational_iq.num_elements) != 32); 7197 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7198 data.create_operational_iq.element_length) != 34); 7199 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7200 data.create_operational_iq.queue_protocol) != 36); 7201 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7202 data.create_operational_oq.queue_id) != 12); 7203 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7204 data.create_operational_oq.element_array_addr) != 16); 7205 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7206 data.create_operational_oq.pi_addr) != 24); 7207 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7208 data.create_operational_oq.num_elements) != 32); 7209 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7210 data.create_operational_oq.element_length) != 34); 7211 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7212 data.create_operational_oq.queue_protocol) != 36); 7213 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7214 data.create_operational_oq.int_msg_num) != 40); 7215 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7216 data.create_operational_oq.coalescing_count) != 42); 7217 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7218 data.create_operational_oq.min_coalescing_time) != 44); 7219 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7220 data.create_operational_oq.max_coalescing_time) != 48); 7221 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7222 data.delete_operational_queue.queue_id) != 12); 7223 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 7224 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7225 data.create_operational_iq) != 64 - 11); 7226 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7227 data.create_operational_oq) != 64 - 11); 7228 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7229 data.delete_operational_queue) != 64 - 11); 7230 7231 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7232 header.iu_type) != 0); 7233 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7234 header.iu_length) != 2); 7235 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7236 header.work_area) != 6); 7237 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7238 request_id) != 8); 7239 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7240 function_code) != 10); 7241 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7242 status) != 11); 7243 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7244 data.create_operational_iq.status_descriptor) != 12); 7245 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7246 data.create_operational_iq.iq_pi_offset) != 16); 7247 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7248 data.create_operational_oq.status_descriptor) != 12); 7249 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7250 data.create_operational_oq.oq_ci_offset) != 16); 7251 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 7252 7253 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7254 header.iu_type) != 0); 7255 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7256 header.iu_length) != 2); 7257 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7258 header.response_queue_id) != 4); 7259 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7260 header.work_area) != 6); 7261 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7262 request_id) != 8); 7263 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7264 nexus_id) != 10); 7265 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7266 buffer_length) != 12); 7267 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7268 lun_number) != 16); 7269 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7270 protocol_specific) != 24); 7271 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7272 error_index) != 27); 7273 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7274 cdb) != 32); 7275 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7276 sg_descriptors) != 64); 7277 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 7278 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7279 7280 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7281 header.iu_type) != 0); 7282 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7283 header.iu_length) != 2); 7284 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7285 header.response_queue_id) != 4); 7286 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7287 header.work_area) != 6); 7288 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7289 request_id) != 8); 7290 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7291 nexus_id) != 12); 7292 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7293 buffer_length) != 16); 7294 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7295 data_encryption_key_index) != 22); 7296 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7297 encrypt_tweak_lower) != 24); 7298 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7299 encrypt_tweak_upper) != 28); 7300 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7301 cdb) != 32); 7302 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7303 error_index) != 48); 7304 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7305 num_sg_descriptors) != 50); 7306 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7307 cdb_length) != 51); 7308 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7309 lun_number) != 52); 7310 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7311 sg_descriptors) != 64); 7312 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 7313 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7314 7315 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7316 header.iu_type) != 0); 7317 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7318 header.iu_length) != 2); 7319 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7320 request_id) != 8); 7321 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7322 error_index) != 10); 7323 7324 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7325 header.iu_type) != 0); 7326 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7327 header.iu_length) != 2); 7328 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7329 header.response_queue_id) != 4); 7330 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7331 request_id) != 8); 7332 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7333 data.report_event_configuration.buffer_length) != 12); 7334 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7335 data.report_event_configuration.sg_descriptors) != 16); 7336 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7337 data.set_event_configuration.global_event_oq_id) != 10); 7338 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7339 data.set_event_configuration.buffer_length) != 12); 7340 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7341 data.set_event_configuration.sg_descriptors) != 16); 7342 7343 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7344 max_inbound_iu_length) != 6); 7345 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7346 max_outbound_iu_length) != 14); 7347 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 7348 7349 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7350 data_length) != 0); 7351 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7352 iq_arbitration_priority_support_bitmask) != 8); 7353 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7354 maximum_aw_a) != 9); 7355 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7356 maximum_aw_b) != 10); 7357 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7358 maximum_aw_c) != 11); 7359 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7360 max_inbound_queues) != 16); 7361 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7362 max_elements_per_iq) != 18); 7363 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7364 max_iq_element_length) != 24); 7365 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7366 min_iq_element_length) != 26); 7367 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7368 max_outbound_queues) != 30); 7369 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7370 max_elements_per_oq) != 32); 7371 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7372 intr_coalescing_time_granularity) != 34); 7373 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7374 max_oq_element_length) != 36); 7375 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7376 min_oq_element_length) != 38); 7377 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7378 iu_layer_descriptors) != 64); 7379 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 7380 7381 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7382 event_type) != 0); 7383 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7384 oq_id) != 2); 7385 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 7386 7387 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7388 num_event_descriptors) != 2); 7389 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7390 descriptors) != 4); 7391 7392 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 7393 ARRAY_SIZE(pqi_supported_event_types)); 7394 7395 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7396 header.iu_type) != 0); 7397 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7398 header.iu_length) != 2); 7399 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7400 event_type) != 8); 7401 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7402 event_id) != 10); 7403 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7404 additional_event_id) != 12); 7405 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7406 data) != 16); 7407 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 7408 7409 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7410 header.iu_type) != 0); 7411 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7412 header.iu_length) != 2); 7413 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7414 event_type) != 8); 7415 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7416 event_id) != 10); 7417 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7418 additional_event_id) != 12); 7419 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 7420 7421 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7422 header.iu_type) != 0); 7423 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7424 header.iu_length) != 2); 7425 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7426 request_id) != 8); 7427 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7428 nexus_id) != 10); 7429 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7430 lun_number) != 16); 7431 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7432 protocol_specific) != 24); 7433 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7434 outbound_queue_id_to_manage) != 26); 7435 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7436 request_id_to_manage) != 28); 7437 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7438 task_management_function) != 30); 7439 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 7440 7441 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7442 header.iu_type) != 0); 7443 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7444 header.iu_length) != 2); 7445 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7446 request_id) != 8); 7447 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7448 nexus_id) != 10); 7449 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7450 additional_response_info) != 12); 7451 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7452 response_code) != 15); 7453 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 7454 7455 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7456 configured_logical_drive_count) != 0); 7457 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7458 configuration_signature) != 1); 7459 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7460 firmware_version) != 5); 7461 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7462 extended_logical_unit_count) != 154); 7463 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7464 firmware_build_number) != 190); 7465 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7466 controller_mode) != 292); 7467 7468 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7469 phys_bay_in_box) != 115); 7470 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7471 device_type) != 120); 7472 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7473 redundant_path_present_map) != 1736); 7474 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7475 active_path_number) != 1738); 7476 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7477 alternate_paths_phys_connector) != 1739); 7478 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7479 alternate_paths_phys_box_on_port) != 1755); 7480 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7481 current_queue_depth_limit) != 1796); 7482 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 7483 7484 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 7485 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 7486 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 7487 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7488 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 7489 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7490 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 7491 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 7492 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7493 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 7494 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 7495 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7496 7497 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 7498 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 7499 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 7500 } 7501