1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.2.4-070" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 2 46 #define DRIVER_RELEASE 4 47 #define DRIVER_REVISION 70 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 78 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 79 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 80 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 81 u32 bytes_requested); 82 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 83 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 84 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 85 struct pqi_scsi_dev *device, unsigned long timeout_secs); 86 87 /* for flags argument to pqi_submit_raid_request_synchronous() */ 88 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 89 90 static struct scsi_transport_template *pqi_sas_transport_template; 91 92 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 93 94 enum pqi_lockup_action { 95 NONE, 96 REBOOT, 97 PANIC 98 }; 99 100 static enum pqi_lockup_action pqi_lockup_action = NONE; 101 102 static struct { 103 enum pqi_lockup_action action; 104 char *name; 105 } pqi_lockup_actions[] = { 106 { 107 .action = NONE, 108 .name = "none", 109 }, 110 { 111 .action = REBOOT, 112 .name = "reboot", 113 }, 114 { 115 .action = PANIC, 116 .name = "panic", 117 }, 118 }; 119 120 static unsigned int pqi_supported_event_types[] = { 121 PQI_EVENT_TYPE_HOTPLUG, 122 PQI_EVENT_TYPE_HARDWARE, 123 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 124 PQI_EVENT_TYPE_LOGICAL_DEVICE, 125 PQI_EVENT_TYPE_OFA, 126 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 127 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 128 }; 129 130 static int pqi_disable_device_id_wildcards; 131 module_param_named(disable_device_id_wildcards, 132 pqi_disable_device_id_wildcards, int, 0644); 133 MODULE_PARM_DESC(disable_device_id_wildcards, 134 "Disable device ID wildcards."); 135 136 static int pqi_disable_heartbeat; 137 module_param_named(disable_heartbeat, 138 pqi_disable_heartbeat, int, 0644); 139 MODULE_PARM_DESC(disable_heartbeat, 140 "Disable heartbeat."); 141 142 static int pqi_disable_ctrl_shutdown; 143 module_param_named(disable_ctrl_shutdown, 144 pqi_disable_ctrl_shutdown, int, 0644); 145 MODULE_PARM_DESC(disable_ctrl_shutdown, 146 "Disable controller shutdown when controller locked up."); 147 148 static char *pqi_lockup_action_param; 149 module_param_named(lockup_action, 150 pqi_lockup_action_param, charp, 0644); 151 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 152 "\t\tSupported: none, reboot, panic\n" 153 "\t\tDefault: none"); 154 155 static char *raid_levels[] = { 156 "RAID-0", 157 "RAID-4", 158 "RAID-1(1+0)", 159 "RAID-5", 160 "RAID-5+1", 161 "RAID-ADG", 162 "RAID-1(ADM)", 163 }; 164 165 static char *pqi_raid_level_to_string(u8 raid_level) 166 { 167 if (raid_level < ARRAY_SIZE(raid_levels)) 168 return raid_levels[raid_level]; 169 170 return "RAID UNKNOWN"; 171 } 172 173 #define SA_RAID_0 0 174 #define SA_RAID_4 1 175 #define SA_RAID_1 2 /* also used for RAID 10 */ 176 #define SA_RAID_5 3 /* also used for RAID 50 */ 177 #define SA_RAID_51 4 178 #define SA_RAID_6 5 /* also used for RAID 60 */ 179 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 180 #define SA_RAID_MAX SA_RAID_ADM 181 #define SA_RAID_UNKNOWN 0xff 182 183 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 184 { 185 pqi_prep_for_scsi_done(scmd); 186 scmd->scsi_done(scmd); 187 } 188 189 static inline void pqi_disable_write_same(struct scsi_device *sdev) 190 { 191 sdev->no_write_same = 1; 192 } 193 194 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 195 { 196 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 197 } 198 199 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 200 { 201 return !device->is_physical_device; 202 } 203 204 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 205 { 206 return scsi3addr[2] != 0; 207 } 208 209 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 210 { 211 if (ctrl_info->controller_online) 212 if (!sis_is_firmware_running(ctrl_info)) 213 pqi_take_ctrl_offline(ctrl_info); 214 } 215 216 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 217 { 218 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 219 } 220 221 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 222 struct pqi_ctrl_info *ctrl_info) 223 { 224 return sis_read_driver_scratch(ctrl_info); 225 } 226 227 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 228 enum pqi_ctrl_mode mode) 229 { 230 sis_write_driver_scratch(ctrl_info, mode); 231 } 232 233 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 234 { 235 ctrl_info->block_requests = true; 236 scsi_block_requests(ctrl_info->scsi_host); 237 } 238 239 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 240 { 241 ctrl_info->block_requests = false; 242 wake_up_all(&ctrl_info->block_requests_wait); 243 pqi_retry_raid_bypass_requests(ctrl_info); 244 scsi_unblock_requests(ctrl_info->scsi_host); 245 } 246 247 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 248 unsigned long timeout_msecs) 249 { 250 unsigned long remaining_msecs; 251 252 if (!pqi_ctrl_blocked(ctrl_info)) 253 return timeout_msecs; 254 255 atomic_inc(&ctrl_info->num_blocked_threads); 256 257 if (timeout_msecs == NO_TIMEOUT) { 258 wait_event(ctrl_info->block_requests_wait, 259 !pqi_ctrl_blocked(ctrl_info)); 260 remaining_msecs = timeout_msecs; 261 } else { 262 unsigned long remaining_jiffies; 263 264 remaining_jiffies = 265 wait_event_timeout(ctrl_info->block_requests_wait, 266 !pqi_ctrl_blocked(ctrl_info), 267 msecs_to_jiffies(timeout_msecs)); 268 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 269 } 270 271 atomic_dec(&ctrl_info->num_blocked_threads); 272 273 return remaining_msecs; 274 } 275 276 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 277 { 278 while (atomic_read(&ctrl_info->num_busy_threads) > 279 atomic_read(&ctrl_info->num_blocked_threads)) 280 usleep_range(1000, 2000); 281 } 282 283 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 284 { 285 return device->device_offline; 286 } 287 288 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 289 { 290 device->in_reset = true; 291 } 292 293 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 294 { 295 device->in_reset = false; 296 } 297 298 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 299 { 300 return device->in_reset; 301 } 302 303 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 304 { 305 ctrl_info->in_ofa = true; 306 } 307 308 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 309 { 310 ctrl_info->in_ofa = false; 311 } 312 313 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) 314 { 315 return ctrl_info->in_ofa; 316 } 317 318 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 319 { 320 device->in_remove = true; 321 } 322 323 static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 324 struct pqi_scsi_dev *device) 325 { 326 return device->in_remove && !ctrl_info->in_shutdown; 327 } 328 329 static inline void pqi_schedule_rescan_worker_with_delay( 330 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 331 { 332 if (pqi_ctrl_offline(ctrl_info)) 333 return; 334 if (pqi_ctrl_in_ofa(ctrl_info)) 335 return; 336 337 schedule_delayed_work(&ctrl_info->rescan_work, delay); 338 } 339 340 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 341 { 342 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 343 } 344 345 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) 346 347 static inline void pqi_schedule_rescan_worker_delayed( 348 struct pqi_ctrl_info *ctrl_info) 349 { 350 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 351 } 352 353 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 354 { 355 cancel_delayed_work_sync(&ctrl_info->rescan_work); 356 } 357 358 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 359 { 360 if (!ctrl_info->heartbeat_counter) 361 return 0; 362 363 return readl(ctrl_info->heartbeat_counter); 364 } 365 366 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 367 { 368 if (!ctrl_info->soft_reset_status) 369 return 0; 370 371 return readb(ctrl_info->soft_reset_status); 372 } 373 374 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, 375 u8 clear) 376 { 377 u8 status; 378 379 if (!ctrl_info->soft_reset_status) 380 return; 381 382 status = pqi_read_soft_reset_status(ctrl_info); 383 status &= ~clear; 384 writeb(status, ctrl_info->soft_reset_status); 385 } 386 387 static int pqi_map_single(struct pci_dev *pci_dev, 388 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 389 size_t buffer_length, enum dma_data_direction data_direction) 390 { 391 dma_addr_t bus_address; 392 393 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 394 return 0; 395 396 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 397 data_direction); 398 if (dma_mapping_error(&pci_dev->dev, bus_address)) 399 return -ENOMEM; 400 401 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 402 put_unaligned_le32(buffer_length, &sg_descriptor->length); 403 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 404 405 return 0; 406 } 407 408 static void pqi_pci_unmap(struct pci_dev *pci_dev, 409 struct pqi_sg_descriptor *descriptors, int num_descriptors, 410 enum dma_data_direction data_direction) 411 { 412 int i; 413 414 if (data_direction == DMA_NONE) 415 return; 416 417 for (i = 0; i < num_descriptors; i++) 418 dma_unmap_single(&pci_dev->dev, 419 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 420 get_unaligned_le32(&descriptors[i].length), 421 data_direction); 422 } 423 424 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 425 struct pqi_raid_path_request *request, u8 cmd, 426 u8 *scsi3addr, void *buffer, size_t buffer_length, 427 u16 vpd_page, enum dma_data_direction *dir) 428 { 429 u8 *cdb; 430 size_t cdb_length = buffer_length; 431 432 memset(request, 0, sizeof(*request)); 433 434 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 435 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 436 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 437 &request->header.iu_length); 438 put_unaligned_le32(buffer_length, &request->buffer_length); 439 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 440 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 441 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 442 443 cdb = request->cdb; 444 445 switch (cmd) { 446 case INQUIRY: 447 request->data_direction = SOP_READ_FLAG; 448 cdb[0] = INQUIRY; 449 if (vpd_page & VPD_PAGE) { 450 cdb[1] = 0x1; 451 cdb[2] = (u8)vpd_page; 452 } 453 cdb[4] = (u8)cdb_length; 454 break; 455 case CISS_REPORT_LOG: 456 case CISS_REPORT_PHYS: 457 request->data_direction = SOP_READ_FLAG; 458 cdb[0] = cmd; 459 if (cmd == CISS_REPORT_PHYS) 460 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 461 else 462 cdb[1] = CISS_REPORT_LOG_EXTENDED; 463 put_unaligned_be32(cdb_length, &cdb[6]); 464 break; 465 case CISS_GET_RAID_MAP: 466 request->data_direction = SOP_READ_FLAG; 467 cdb[0] = CISS_READ; 468 cdb[1] = CISS_GET_RAID_MAP; 469 put_unaligned_be32(cdb_length, &cdb[6]); 470 break; 471 case SA_FLUSH_CACHE: 472 request->data_direction = SOP_WRITE_FLAG; 473 cdb[0] = BMIC_WRITE; 474 cdb[6] = BMIC_FLUSH_CACHE; 475 put_unaligned_be16(cdb_length, &cdb[7]); 476 break; 477 case BMIC_SENSE_DIAG_OPTIONS: 478 cdb_length = 0; 479 /* fall through */ 480 case BMIC_IDENTIFY_CONTROLLER: 481 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 482 request->data_direction = SOP_READ_FLAG; 483 cdb[0] = BMIC_READ; 484 cdb[6] = cmd; 485 put_unaligned_be16(cdb_length, &cdb[7]); 486 break; 487 case BMIC_SET_DIAG_OPTIONS: 488 cdb_length = 0; 489 /* fall through */ 490 case BMIC_WRITE_HOST_WELLNESS: 491 request->data_direction = SOP_WRITE_FLAG; 492 cdb[0] = BMIC_WRITE; 493 cdb[6] = cmd; 494 put_unaligned_be16(cdb_length, &cdb[7]); 495 break; 496 case BMIC_CSMI_PASSTHRU: 497 request->data_direction = SOP_BIDIRECTIONAL; 498 cdb[0] = BMIC_WRITE; 499 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 500 cdb[6] = cmd; 501 put_unaligned_be16(cdb_length, &cdb[7]); 502 break; 503 default: 504 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 505 cmd); 506 break; 507 } 508 509 switch (request->data_direction) { 510 case SOP_READ_FLAG: 511 *dir = DMA_FROM_DEVICE; 512 break; 513 case SOP_WRITE_FLAG: 514 *dir = DMA_TO_DEVICE; 515 break; 516 case SOP_NO_DIRECTION_FLAG: 517 *dir = DMA_NONE; 518 break; 519 default: 520 *dir = DMA_BIDIRECTIONAL; 521 break; 522 } 523 524 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 525 buffer, buffer_length, *dir); 526 } 527 528 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 529 { 530 io_request->scmd = NULL; 531 io_request->status = 0; 532 io_request->error_info = NULL; 533 io_request->raid_bypass = false; 534 } 535 536 static struct pqi_io_request *pqi_alloc_io_request( 537 struct pqi_ctrl_info *ctrl_info) 538 { 539 struct pqi_io_request *io_request; 540 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 541 542 while (1) { 543 io_request = &ctrl_info->io_request_pool[i]; 544 if (atomic_inc_return(&io_request->refcount) == 1) 545 break; 546 atomic_dec(&io_request->refcount); 547 i = (i + 1) % ctrl_info->max_io_slots; 548 } 549 550 /* benignly racy */ 551 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 552 553 pqi_reinit_io_request(io_request); 554 555 return io_request; 556 } 557 558 static void pqi_free_io_request(struct pqi_io_request *io_request) 559 { 560 atomic_dec(&io_request->refcount); 561 } 562 563 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 564 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 565 struct pqi_raid_error_info *error_info, 566 unsigned long timeout_msecs) 567 { 568 int rc; 569 enum dma_data_direction dir; 570 struct pqi_raid_path_request request; 571 572 rc = pqi_build_raid_path_request(ctrl_info, &request, 573 cmd, scsi3addr, buffer, 574 buffer_length, vpd_page, &dir); 575 if (rc) 576 return rc; 577 578 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 579 0, error_info, timeout_msecs); 580 581 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 582 return rc; 583 } 584 585 /* Helper functions for pqi_send_scsi_raid_request */ 586 587 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 588 u8 cmd, void *buffer, size_t buffer_length) 589 { 590 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 591 buffer, buffer_length, 0, NULL, NO_TIMEOUT); 592 } 593 594 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 595 u8 cmd, void *buffer, size_t buffer_length, 596 struct pqi_raid_error_info *error_info) 597 { 598 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 599 buffer, buffer_length, 0, error_info, NO_TIMEOUT); 600 } 601 602 603 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 604 struct bmic_identify_controller *buffer) 605 { 606 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 607 buffer, sizeof(*buffer)); 608 } 609 610 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 611 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 612 { 613 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 614 buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); 615 } 616 617 static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info, 618 u8 *scsi3addr, u16 vpd_page) 619 { 620 int rc; 621 int i; 622 int pages; 623 unsigned char *buf, bufsize; 624 625 buf = kzalloc(256, GFP_KERNEL); 626 if (!buf) 627 return false; 628 629 /* Get the size of the page list first */ 630 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, 631 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, 632 buf, SCSI_VPD_HEADER_SZ); 633 if (rc != 0) 634 goto exit_unsupported; 635 636 pages = buf[3]; 637 if ((pages + SCSI_VPD_HEADER_SZ) <= 255) 638 bufsize = pages + SCSI_VPD_HEADER_SZ; 639 else 640 bufsize = 255; 641 642 /* Get the whole VPD page list */ 643 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, 644 VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, 645 buf, bufsize); 646 if (rc != 0) 647 goto exit_unsupported; 648 649 pages = buf[3]; 650 for (i = 1; i <= pages; i++) 651 if (buf[3 + i] == vpd_page) 652 goto exit_supported; 653 654 exit_unsupported: 655 kfree(buf); 656 return false; 657 658 exit_supported: 659 kfree(buf); 660 return true; 661 } 662 663 static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info, 664 u8 *scsi3addr, u8 *device_id, int buflen) 665 { 666 int rc; 667 unsigned char *buf; 668 669 if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID)) 670 return 1; /* function not supported */ 671 672 buf = kzalloc(64, GFP_KERNEL); 673 if (!buf) 674 return -ENOMEM; 675 676 rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, 677 VPD_PAGE | SCSI_VPD_DEVICE_ID, 678 buf, 64); 679 if (rc == 0) { 680 if (buflen > 16) 681 buflen = 16; 682 memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen); 683 } 684 685 kfree(buf); 686 687 return rc; 688 } 689 690 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 691 struct pqi_scsi_dev *device, 692 struct bmic_identify_physical_device *buffer, 693 size_t buffer_length) 694 { 695 int rc; 696 enum dma_data_direction dir; 697 u16 bmic_device_index; 698 struct pqi_raid_path_request request; 699 700 rc = pqi_build_raid_path_request(ctrl_info, &request, 701 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 702 buffer_length, 0, &dir); 703 if (rc) 704 return rc; 705 706 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 707 request.cdb[2] = (u8)bmic_device_index; 708 request.cdb[9] = (u8)(bmic_device_index >> 8); 709 710 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 711 0, NULL, NO_TIMEOUT); 712 713 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 714 return rc; 715 } 716 717 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 718 enum bmic_flush_cache_shutdown_event shutdown_event) 719 { 720 int rc; 721 struct bmic_flush_cache *flush_cache; 722 723 /* 724 * Don't bother trying to flush the cache if the controller is 725 * locked up. 726 */ 727 if (pqi_ctrl_offline(ctrl_info)) 728 return -ENXIO; 729 730 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 731 if (!flush_cache) 732 return -ENOMEM; 733 734 flush_cache->shutdown_event = shutdown_event; 735 736 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 737 sizeof(*flush_cache)); 738 739 kfree(flush_cache); 740 741 return rc; 742 } 743 744 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 745 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 746 struct pqi_raid_error_info *error_info) 747 { 748 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 749 buffer, buffer_length, error_info); 750 } 751 752 #define PQI_FETCH_PTRAID_DATA (1UL<<31) 753 754 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 755 { 756 int rc; 757 struct bmic_diag_options *diag; 758 759 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 760 if (!diag) 761 return -ENOMEM; 762 763 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 764 diag, sizeof(*diag)); 765 if (rc) 766 goto out; 767 768 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 769 770 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, 771 diag, sizeof(*diag)); 772 out: 773 kfree(diag); 774 775 return rc; 776 } 777 778 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 779 void *buffer, size_t buffer_length) 780 { 781 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 782 buffer, buffer_length); 783 } 784 785 #pragma pack(1) 786 787 struct bmic_host_wellness_driver_version { 788 u8 start_tag[4]; 789 u8 driver_version_tag[2]; 790 __le16 driver_version_length; 791 char driver_version[32]; 792 u8 dont_write_tag[2]; 793 u8 end_tag[2]; 794 }; 795 796 #pragma pack() 797 798 static int pqi_write_driver_version_to_host_wellness( 799 struct pqi_ctrl_info *ctrl_info) 800 { 801 int rc; 802 struct bmic_host_wellness_driver_version *buffer; 803 size_t buffer_length; 804 805 buffer_length = sizeof(*buffer); 806 807 buffer = kmalloc(buffer_length, GFP_KERNEL); 808 if (!buffer) 809 return -ENOMEM; 810 811 buffer->start_tag[0] = '<'; 812 buffer->start_tag[1] = 'H'; 813 buffer->start_tag[2] = 'W'; 814 buffer->start_tag[3] = '>'; 815 buffer->driver_version_tag[0] = 'D'; 816 buffer->driver_version_tag[1] = 'V'; 817 put_unaligned_le16(sizeof(buffer->driver_version), 818 &buffer->driver_version_length); 819 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 820 sizeof(buffer->driver_version) - 1); 821 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 822 buffer->dont_write_tag[0] = 'D'; 823 buffer->dont_write_tag[1] = 'W'; 824 buffer->end_tag[0] = 'Z'; 825 buffer->end_tag[1] = 'Z'; 826 827 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 828 829 kfree(buffer); 830 831 return rc; 832 } 833 834 #pragma pack(1) 835 836 struct bmic_host_wellness_time { 837 u8 start_tag[4]; 838 u8 time_tag[2]; 839 __le16 time_length; 840 u8 time[8]; 841 u8 dont_write_tag[2]; 842 u8 end_tag[2]; 843 }; 844 845 #pragma pack() 846 847 static int pqi_write_current_time_to_host_wellness( 848 struct pqi_ctrl_info *ctrl_info) 849 { 850 int rc; 851 struct bmic_host_wellness_time *buffer; 852 size_t buffer_length; 853 time64_t local_time; 854 unsigned int year; 855 struct tm tm; 856 857 buffer_length = sizeof(*buffer); 858 859 buffer = kmalloc(buffer_length, GFP_KERNEL); 860 if (!buffer) 861 return -ENOMEM; 862 863 buffer->start_tag[0] = '<'; 864 buffer->start_tag[1] = 'H'; 865 buffer->start_tag[2] = 'W'; 866 buffer->start_tag[3] = '>'; 867 buffer->time_tag[0] = 'T'; 868 buffer->time_tag[1] = 'D'; 869 put_unaligned_le16(sizeof(buffer->time), 870 &buffer->time_length); 871 872 local_time = ktime_get_real_seconds(); 873 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 874 year = tm.tm_year + 1900; 875 876 buffer->time[0] = bin2bcd(tm.tm_hour); 877 buffer->time[1] = bin2bcd(tm.tm_min); 878 buffer->time[2] = bin2bcd(tm.tm_sec); 879 buffer->time[3] = 0; 880 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 881 buffer->time[5] = bin2bcd(tm.tm_mday); 882 buffer->time[6] = bin2bcd(year / 100); 883 buffer->time[7] = bin2bcd(year % 100); 884 885 buffer->dont_write_tag[0] = 'D'; 886 buffer->dont_write_tag[1] = 'W'; 887 buffer->end_tag[0] = 'Z'; 888 buffer->end_tag[1] = 'Z'; 889 890 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 891 892 kfree(buffer); 893 894 return rc; 895 } 896 897 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) 898 899 static void pqi_update_time_worker(struct work_struct *work) 900 { 901 int rc; 902 struct pqi_ctrl_info *ctrl_info; 903 904 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 905 update_time_work); 906 907 if (pqi_ctrl_offline(ctrl_info)) 908 return; 909 910 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 911 if (rc) 912 dev_warn(&ctrl_info->pci_dev->dev, 913 "error updating time on controller\n"); 914 915 schedule_delayed_work(&ctrl_info->update_time_work, 916 PQI_UPDATE_TIME_WORK_INTERVAL); 917 } 918 919 static inline void pqi_schedule_update_time_worker( 920 struct pqi_ctrl_info *ctrl_info) 921 { 922 schedule_delayed_work(&ctrl_info->update_time_work, 0); 923 } 924 925 static inline void pqi_cancel_update_time_worker( 926 struct pqi_ctrl_info *ctrl_info) 927 { 928 cancel_delayed_work_sync(&ctrl_info->update_time_work); 929 } 930 931 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 932 void *buffer, size_t buffer_length) 933 { 934 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, 935 buffer_length); 936 } 937 938 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 939 void **buffer) 940 { 941 int rc; 942 size_t lun_list_length; 943 size_t lun_data_length; 944 size_t new_lun_list_length; 945 void *lun_data = NULL; 946 struct report_lun_header *report_lun_header; 947 948 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 949 if (!report_lun_header) { 950 rc = -ENOMEM; 951 goto out; 952 } 953 954 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 955 sizeof(*report_lun_header)); 956 if (rc) 957 goto out; 958 959 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 960 961 again: 962 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 963 964 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 965 if (!lun_data) { 966 rc = -ENOMEM; 967 goto out; 968 } 969 970 if (lun_list_length == 0) { 971 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 972 goto out; 973 } 974 975 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 976 if (rc) 977 goto out; 978 979 new_lun_list_length = get_unaligned_be32( 980 &((struct report_lun_header *)lun_data)->list_length); 981 982 if (new_lun_list_length > lun_list_length) { 983 lun_list_length = new_lun_list_length; 984 kfree(lun_data); 985 goto again; 986 } 987 988 out: 989 kfree(report_lun_header); 990 991 if (rc) { 992 kfree(lun_data); 993 lun_data = NULL; 994 } 995 996 *buffer = lun_data; 997 998 return rc; 999 } 1000 1001 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 1002 void **buffer) 1003 { 1004 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 1005 buffer); 1006 } 1007 1008 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 1009 void **buffer) 1010 { 1011 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1012 } 1013 1014 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1015 struct report_phys_lun_extended **physdev_list, 1016 struct report_log_lun_extended **logdev_list) 1017 { 1018 int rc; 1019 size_t logdev_list_length; 1020 size_t logdev_data_length; 1021 struct report_log_lun_extended *internal_logdev_list; 1022 struct report_log_lun_extended *logdev_data; 1023 struct report_lun_header report_lun_header; 1024 1025 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1026 if (rc) 1027 dev_err(&ctrl_info->pci_dev->dev, 1028 "report physical LUNs failed\n"); 1029 1030 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1031 if (rc) 1032 dev_err(&ctrl_info->pci_dev->dev, 1033 "report logical LUNs failed\n"); 1034 1035 /* 1036 * Tack the controller itself onto the end of the logical device list. 1037 */ 1038 1039 logdev_data = *logdev_list; 1040 1041 if (logdev_data) { 1042 logdev_list_length = 1043 get_unaligned_be32(&logdev_data->header.list_length); 1044 } else { 1045 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1046 logdev_data = 1047 (struct report_log_lun_extended *)&report_lun_header; 1048 logdev_list_length = 0; 1049 } 1050 1051 logdev_data_length = sizeof(struct report_lun_header) + 1052 logdev_list_length; 1053 1054 internal_logdev_list = kmalloc(logdev_data_length + 1055 sizeof(struct report_log_lun_extended), GFP_KERNEL); 1056 if (!internal_logdev_list) { 1057 kfree(*logdev_list); 1058 *logdev_list = NULL; 1059 return -ENOMEM; 1060 } 1061 1062 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1063 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1064 sizeof(struct report_log_lun_extended_entry)); 1065 put_unaligned_be32(logdev_list_length + 1066 sizeof(struct report_log_lun_extended_entry), 1067 &internal_logdev_list->header.list_length); 1068 1069 kfree(*logdev_list); 1070 *logdev_list = internal_logdev_list; 1071 1072 return 0; 1073 } 1074 1075 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1076 int bus, int target, int lun) 1077 { 1078 device->bus = bus; 1079 device->target = target; 1080 device->lun = lun; 1081 } 1082 1083 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1084 { 1085 u8 *scsi3addr; 1086 u32 lunid; 1087 int bus; 1088 int target; 1089 int lun; 1090 1091 scsi3addr = device->scsi3addr; 1092 lunid = get_unaligned_le32(scsi3addr); 1093 1094 if (pqi_is_hba_lunid(scsi3addr)) { 1095 /* The specified device is the controller. */ 1096 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1097 device->target_lun_valid = true; 1098 return; 1099 } 1100 1101 if (pqi_is_logical_device(device)) { 1102 if (device->is_external_raid_device) { 1103 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1104 target = (lunid >> 16) & 0x3fff; 1105 lun = lunid & 0xff; 1106 } else { 1107 bus = PQI_RAID_VOLUME_BUS; 1108 target = 0; 1109 lun = lunid & 0x3fff; 1110 } 1111 pqi_set_bus_target_lun(device, bus, target, lun); 1112 device->target_lun_valid = true; 1113 return; 1114 } 1115 1116 /* 1117 * Defer target and LUN assignment for non-controller physical devices 1118 * because the SAS transport layer will make these assignments later. 1119 */ 1120 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1121 } 1122 1123 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1124 struct pqi_scsi_dev *device) 1125 { 1126 int rc; 1127 u8 raid_level; 1128 u8 *buffer; 1129 1130 raid_level = SA_RAID_UNKNOWN; 1131 1132 buffer = kmalloc(64, GFP_KERNEL); 1133 if (buffer) { 1134 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1135 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1136 if (rc == 0) { 1137 raid_level = buffer[8]; 1138 if (raid_level > SA_RAID_MAX) 1139 raid_level = SA_RAID_UNKNOWN; 1140 } 1141 kfree(buffer); 1142 } 1143 1144 device->raid_level = raid_level; 1145 } 1146 1147 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1148 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1149 { 1150 char *err_msg; 1151 u32 raid_map_size; 1152 u32 r5or6_blocks_per_row; 1153 1154 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1155 1156 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1157 err_msg = "RAID map too small"; 1158 goto bad_raid_map; 1159 } 1160 1161 if (device->raid_level == SA_RAID_1) { 1162 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1163 err_msg = "invalid RAID-1 map"; 1164 goto bad_raid_map; 1165 } 1166 } else if (device->raid_level == SA_RAID_ADM) { 1167 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1168 err_msg = "invalid RAID-1(ADM) map"; 1169 goto bad_raid_map; 1170 } 1171 } else if ((device->raid_level == SA_RAID_5 || 1172 device->raid_level == SA_RAID_6) && 1173 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1174 /* RAID 50/60 */ 1175 r5or6_blocks_per_row = 1176 get_unaligned_le16(&raid_map->strip_size) * 1177 get_unaligned_le16(&raid_map->data_disks_per_row); 1178 if (r5or6_blocks_per_row == 0) { 1179 err_msg = "invalid RAID-5 or RAID-6 map"; 1180 goto bad_raid_map; 1181 } 1182 } 1183 1184 return 0; 1185 1186 bad_raid_map: 1187 dev_warn(&ctrl_info->pci_dev->dev, 1188 "logical device %08x%08x %s\n", 1189 *((u32 *)&device->scsi3addr), 1190 *((u32 *)&device->scsi3addr[4]), err_msg); 1191 1192 return -EINVAL; 1193 } 1194 1195 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1196 struct pqi_scsi_dev *device) 1197 { 1198 int rc; 1199 u32 raid_map_size; 1200 struct raid_map *raid_map; 1201 1202 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1203 if (!raid_map) 1204 return -ENOMEM; 1205 1206 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1207 device->scsi3addr, raid_map, sizeof(*raid_map), 1208 0, NULL, NO_TIMEOUT); 1209 1210 if (rc) 1211 goto error; 1212 1213 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1214 1215 if (raid_map_size > sizeof(*raid_map)) { 1216 1217 kfree(raid_map); 1218 1219 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1220 if (!raid_map) 1221 return -ENOMEM; 1222 1223 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1224 device->scsi3addr, raid_map, raid_map_size, 1225 0, NULL, NO_TIMEOUT); 1226 if (rc) 1227 goto error; 1228 1229 if (get_unaligned_le32(&raid_map->structure_size) 1230 != raid_map_size) { 1231 dev_warn(&ctrl_info->pci_dev->dev, 1232 "Requested %d bytes, received %d bytes", 1233 raid_map_size, 1234 get_unaligned_le32(&raid_map->structure_size)); 1235 goto error; 1236 } 1237 } 1238 1239 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1240 if (rc) 1241 goto error; 1242 1243 device->raid_map = raid_map; 1244 1245 return 0; 1246 1247 error: 1248 kfree(raid_map); 1249 1250 return rc; 1251 } 1252 1253 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1254 struct pqi_scsi_dev *device) 1255 { 1256 int rc; 1257 u8 *buffer; 1258 u8 bypass_status; 1259 1260 buffer = kmalloc(64, GFP_KERNEL); 1261 if (!buffer) 1262 return; 1263 1264 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1265 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1266 if (rc) 1267 goto out; 1268 1269 #define RAID_BYPASS_STATUS 4 1270 #define RAID_BYPASS_CONFIGURED 0x1 1271 #define RAID_BYPASS_ENABLED 0x2 1272 1273 bypass_status = buffer[RAID_BYPASS_STATUS]; 1274 device->raid_bypass_configured = 1275 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1276 if (device->raid_bypass_configured && 1277 (bypass_status & RAID_BYPASS_ENABLED) && 1278 pqi_get_raid_map(ctrl_info, device) == 0) 1279 device->raid_bypass_enabled = true; 1280 1281 out: 1282 kfree(buffer); 1283 } 1284 1285 /* 1286 * Use vendor-specific VPD to determine online/offline status of a volume. 1287 */ 1288 1289 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1290 struct pqi_scsi_dev *device) 1291 { 1292 int rc; 1293 size_t page_length; 1294 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1295 bool volume_offline = true; 1296 u32 volume_flags; 1297 struct ciss_vpd_logical_volume_status *vpd; 1298 1299 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1300 if (!vpd) 1301 goto no_buffer; 1302 1303 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1304 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1305 if (rc) 1306 goto out; 1307 1308 if (vpd->page_code != CISS_VPD_LV_STATUS) 1309 goto out; 1310 1311 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1312 volume_status) + vpd->page_length; 1313 if (page_length < sizeof(*vpd)) 1314 goto out; 1315 1316 volume_status = vpd->volume_status; 1317 volume_flags = get_unaligned_be32(&vpd->flags); 1318 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1319 1320 out: 1321 kfree(vpd); 1322 no_buffer: 1323 device->volume_status = volume_status; 1324 device->volume_offline = volume_offline; 1325 } 1326 1327 #define PQI_INQUIRY_PAGE0_RETRIES 3 1328 1329 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1330 struct pqi_scsi_dev *device) 1331 { 1332 int rc; 1333 u8 *buffer; 1334 unsigned int retries; 1335 1336 if (device->is_expander_smp_device) 1337 return 0; 1338 1339 buffer = kmalloc(64, GFP_KERNEL); 1340 if (!buffer) 1341 return -ENOMEM; 1342 1343 /* Send an inquiry to the device to see what it is. */ 1344 for (retries = 0;;) { 1345 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, 1346 buffer, 64); 1347 if (rc == 0) 1348 break; 1349 if (pqi_is_logical_device(device) || 1350 rc != PQI_CMD_STATUS_ABORTED || 1351 ++retries > PQI_INQUIRY_PAGE0_RETRIES) 1352 goto out; 1353 } 1354 1355 scsi_sanitize_inquiry_string(&buffer[8], 8); 1356 scsi_sanitize_inquiry_string(&buffer[16], 16); 1357 1358 device->devtype = buffer[0] & 0x1f; 1359 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1360 memcpy(device->model, &buffer[16], sizeof(device->model)); 1361 1362 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1363 if (device->is_external_raid_device) { 1364 device->raid_level = SA_RAID_UNKNOWN; 1365 device->volume_status = CISS_LV_OK; 1366 device->volume_offline = false; 1367 } else { 1368 pqi_get_raid_level(ctrl_info, device); 1369 pqi_get_raid_bypass_status(ctrl_info, device); 1370 pqi_get_volume_status(ctrl_info, device); 1371 } 1372 } 1373 1374 if (pqi_get_device_id(ctrl_info, device->scsi3addr, 1375 device->unique_id, sizeof(device->unique_id)) < 0) 1376 dev_warn(&ctrl_info->pci_dev->dev, 1377 "Can't get device id for scsi %d:%d:%d:%d\n", 1378 ctrl_info->scsi_host->host_no, 1379 device->bus, device->target, 1380 device->lun); 1381 1382 out: 1383 kfree(buffer); 1384 1385 return rc; 1386 } 1387 1388 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1389 struct pqi_scsi_dev *device, 1390 struct bmic_identify_physical_device *id_phys) 1391 { 1392 int rc; 1393 1394 memset(id_phys, 0, sizeof(*id_phys)); 1395 1396 rc = pqi_identify_physical_device(ctrl_info, device, 1397 id_phys, sizeof(*id_phys)); 1398 if (rc) { 1399 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1400 return; 1401 } 1402 1403 device->queue_depth = 1404 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1405 device->device_type = id_phys->device_type; 1406 device->active_path_index = id_phys->active_path_number; 1407 device->path_map = id_phys->redundant_path_present_map; 1408 memcpy(&device->box, 1409 &id_phys->alternate_paths_phys_box_on_port, 1410 sizeof(device->box)); 1411 memcpy(&device->phys_connector, 1412 &id_phys->alternate_paths_phys_connector, 1413 sizeof(device->phys_connector)); 1414 device->bay = id_phys->phys_bay_in_box; 1415 } 1416 1417 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1418 struct pqi_scsi_dev *device) 1419 { 1420 char *status; 1421 static const char unknown_state_str[] = 1422 "Volume is in an unknown state (%u)"; 1423 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1424 1425 switch (device->volume_status) { 1426 case CISS_LV_OK: 1427 status = "Volume online"; 1428 break; 1429 case CISS_LV_FAILED: 1430 status = "Volume failed"; 1431 break; 1432 case CISS_LV_NOT_CONFIGURED: 1433 status = "Volume not configured"; 1434 break; 1435 case CISS_LV_DEGRADED: 1436 status = "Volume degraded"; 1437 break; 1438 case CISS_LV_READY_FOR_RECOVERY: 1439 status = "Volume ready for recovery operation"; 1440 break; 1441 case CISS_LV_UNDERGOING_RECOVERY: 1442 status = "Volume undergoing recovery"; 1443 break; 1444 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1445 status = "Wrong physical drive was replaced"; 1446 break; 1447 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1448 status = "A physical drive not properly connected"; 1449 break; 1450 case CISS_LV_HARDWARE_OVERHEATING: 1451 status = "Hardware is overheating"; 1452 break; 1453 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1454 status = "Hardware has overheated"; 1455 break; 1456 case CISS_LV_UNDERGOING_EXPANSION: 1457 status = "Volume undergoing expansion"; 1458 break; 1459 case CISS_LV_NOT_AVAILABLE: 1460 status = "Volume waiting for transforming volume"; 1461 break; 1462 case CISS_LV_QUEUED_FOR_EXPANSION: 1463 status = "Volume queued for expansion"; 1464 break; 1465 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1466 status = "Volume disabled due to SCSI ID conflict"; 1467 break; 1468 case CISS_LV_EJECTED: 1469 status = "Volume has been ejected"; 1470 break; 1471 case CISS_LV_UNDERGOING_ERASE: 1472 status = "Volume undergoing background erase"; 1473 break; 1474 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1475 status = "Volume ready for predictive spare rebuild"; 1476 break; 1477 case CISS_LV_UNDERGOING_RPI: 1478 status = "Volume undergoing rapid parity initialization"; 1479 break; 1480 case CISS_LV_PENDING_RPI: 1481 status = "Volume queued for rapid parity initialization"; 1482 break; 1483 case CISS_LV_ENCRYPTED_NO_KEY: 1484 status = "Encrypted volume inaccessible - key not present"; 1485 break; 1486 case CISS_LV_UNDERGOING_ENCRYPTION: 1487 status = "Volume undergoing encryption process"; 1488 break; 1489 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1490 status = "Volume undergoing encryption re-keying process"; 1491 break; 1492 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1493 status = "Volume encrypted but encryption is disabled"; 1494 break; 1495 case CISS_LV_PENDING_ENCRYPTION: 1496 status = "Volume pending migration to encrypted state"; 1497 break; 1498 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1499 status = "Volume pending encryption rekeying"; 1500 break; 1501 case CISS_LV_NOT_SUPPORTED: 1502 status = "Volume not supported on this controller"; 1503 break; 1504 case CISS_LV_STATUS_UNAVAILABLE: 1505 status = "Volume status not available"; 1506 break; 1507 default: 1508 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1509 unknown_state_str, device->volume_status); 1510 status = unknown_state_buffer; 1511 break; 1512 } 1513 1514 dev_info(&ctrl_info->pci_dev->dev, 1515 "scsi %d:%d:%d:%d %s\n", 1516 ctrl_info->scsi_host->host_no, 1517 device->bus, device->target, device->lun, status); 1518 } 1519 1520 static void pqi_rescan_worker(struct work_struct *work) 1521 { 1522 struct pqi_ctrl_info *ctrl_info; 1523 1524 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1525 rescan_work); 1526 1527 pqi_scan_scsi_devices(ctrl_info); 1528 } 1529 1530 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1531 struct pqi_scsi_dev *device) 1532 { 1533 int rc; 1534 1535 if (pqi_is_logical_device(device)) 1536 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1537 device->target, device->lun); 1538 else 1539 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1540 1541 return rc; 1542 } 1543 1544 #define PQI_PENDING_IO_TIMEOUT_SECS 20 1545 1546 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1547 struct pqi_scsi_dev *device) 1548 { 1549 int rc; 1550 1551 pqi_device_remove_start(device); 1552 1553 rc = pqi_device_wait_for_pending_io(ctrl_info, device, 1554 PQI_PENDING_IO_TIMEOUT_SECS); 1555 if (rc) 1556 dev_err(&ctrl_info->pci_dev->dev, 1557 "scsi %d:%d:%d:%d removing device with %d outstanding commands\n", 1558 ctrl_info->scsi_host->host_no, device->bus, 1559 device->target, device->lun, 1560 atomic_read(&device->scsi_cmds_outstanding)); 1561 1562 if (pqi_is_logical_device(device)) 1563 scsi_remove_device(device->sdev); 1564 else 1565 pqi_remove_sas_device(device); 1566 } 1567 1568 /* Assumes the SCSI device list lock is held. */ 1569 1570 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1571 int bus, int target, int lun) 1572 { 1573 struct pqi_scsi_dev *device; 1574 1575 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1576 scsi_device_list_entry) 1577 if (device->bus == bus && device->target == target && 1578 device->lun == lun) 1579 return device; 1580 1581 return NULL; 1582 } 1583 1584 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1585 struct pqi_scsi_dev *dev2) 1586 { 1587 if (dev1->is_physical_device != dev2->is_physical_device) 1588 return false; 1589 1590 if (dev1->is_physical_device) 1591 return dev1->wwid == dev2->wwid; 1592 1593 return memcmp(dev1->volume_id, dev2->volume_id, 1594 sizeof(dev1->volume_id)) == 0; 1595 } 1596 1597 enum pqi_find_result { 1598 DEVICE_NOT_FOUND, 1599 DEVICE_CHANGED, 1600 DEVICE_SAME, 1601 }; 1602 1603 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1604 struct pqi_scsi_dev *device_to_find, 1605 struct pqi_scsi_dev **matching_device) 1606 { 1607 struct pqi_scsi_dev *device; 1608 1609 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1610 scsi_device_list_entry) { 1611 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1612 device->scsi3addr)) { 1613 *matching_device = device; 1614 if (pqi_device_equal(device_to_find, device)) { 1615 if (device_to_find->volume_offline) 1616 return DEVICE_CHANGED; 1617 return DEVICE_SAME; 1618 } 1619 return DEVICE_CHANGED; 1620 } 1621 } 1622 1623 return DEVICE_NOT_FOUND; 1624 } 1625 1626 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1627 { 1628 if (device->is_expander_smp_device) 1629 return "Enclosure SMP "; 1630 1631 return scsi_device_type(device->devtype); 1632 } 1633 1634 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1635 1636 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1637 char *action, struct pqi_scsi_dev *device) 1638 { 1639 ssize_t count; 1640 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1641 1642 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1643 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1644 1645 if (device->target_lun_valid) 1646 count += snprintf(buffer + count, 1647 PQI_DEV_INFO_BUFFER_LENGTH - count, 1648 "%d:%d", 1649 device->target, 1650 device->lun); 1651 else 1652 count += snprintf(buffer + count, 1653 PQI_DEV_INFO_BUFFER_LENGTH - count, 1654 "-:-"); 1655 1656 if (pqi_is_logical_device(device)) 1657 count += snprintf(buffer + count, 1658 PQI_DEV_INFO_BUFFER_LENGTH - count, 1659 " %08x%08x", 1660 *((u32 *)&device->scsi3addr), 1661 *((u32 *)&device->scsi3addr[4])); 1662 else 1663 count += snprintf(buffer + count, 1664 PQI_DEV_INFO_BUFFER_LENGTH - count, 1665 " %016llx", device->sas_address); 1666 1667 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1668 " %s %.8s %.16s ", 1669 pqi_device_type(device), 1670 device->vendor, 1671 device->model); 1672 1673 if (pqi_is_logical_device(device)) { 1674 if (device->devtype == TYPE_DISK) 1675 count += snprintf(buffer + count, 1676 PQI_DEV_INFO_BUFFER_LENGTH - count, 1677 "SSDSmartPathCap%c En%c %-12s", 1678 device->raid_bypass_configured ? '+' : '-', 1679 device->raid_bypass_enabled ? '+' : '-', 1680 pqi_raid_level_to_string(device->raid_level)); 1681 } else { 1682 count += snprintf(buffer + count, 1683 PQI_DEV_INFO_BUFFER_LENGTH - count, 1684 "AIO%c", device->aio_enabled ? '+' : '-'); 1685 if (device->devtype == TYPE_DISK || 1686 device->devtype == TYPE_ZBC) 1687 count += snprintf(buffer + count, 1688 PQI_DEV_INFO_BUFFER_LENGTH - count, 1689 " qd=%-6d", device->queue_depth); 1690 } 1691 1692 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1693 } 1694 1695 /* Assumes the SCSI device list lock is held. */ 1696 1697 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1698 struct pqi_scsi_dev *new_device) 1699 { 1700 existing_device->devtype = new_device->devtype; 1701 existing_device->device_type = new_device->device_type; 1702 existing_device->bus = new_device->bus; 1703 if (new_device->target_lun_valid) { 1704 existing_device->target = new_device->target; 1705 existing_device->lun = new_device->lun; 1706 existing_device->target_lun_valid = true; 1707 } 1708 1709 /* By definition, the scsi3addr and wwid fields are already the same. */ 1710 1711 existing_device->is_physical_device = new_device->is_physical_device; 1712 existing_device->is_external_raid_device = 1713 new_device->is_external_raid_device; 1714 existing_device->is_expander_smp_device = 1715 new_device->is_expander_smp_device; 1716 existing_device->aio_enabled = new_device->aio_enabled; 1717 memcpy(existing_device->vendor, new_device->vendor, 1718 sizeof(existing_device->vendor)); 1719 memcpy(existing_device->model, new_device->model, 1720 sizeof(existing_device->model)); 1721 existing_device->sas_address = new_device->sas_address; 1722 existing_device->raid_level = new_device->raid_level; 1723 existing_device->queue_depth = new_device->queue_depth; 1724 existing_device->aio_handle = new_device->aio_handle; 1725 existing_device->volume_status = new_device->volume_status; 1726 existing_device->active_path_index = new_device->active_path_index; 1727 existing_device->path_map = new_device->path_map; 1728 existing_device->bay = new_device->bay; 1729 memcpy(existing_device->box, new_device->box, 1730 sizeof(existing_device->box)); 1731 memcpy(existing_device->phys_connector, new_device->phys_connector, 1732 sizeof(existing_device->phys_connector)); 1733 existing_device->offload_to_mirror = 0; 1734 kfree(existing_device->raid_map); 1735 existing_device->raid_map = new_device->raid_map; 1736 existing_device->raid_bypass_configured = 1737 new_device->raid_bypass_configured; 1738 existing_device->raid_bypass_enabled = 1739 new_device->raid_bypass_enabled; 1740 existing_device->device_offline = false; 1741 1742 /* To prevent this from being freed later. */ 1743 new_device->raid_map = NULL; 1744 } 1745 1746 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1747 { 1748 if (device) { 1749 kfree(device->raid_map); 1750 kfree(device); 1751 } 1752 } 1753 1754 /* 1755 * Called when exposing a new device to the OS fails in order to re-adjust 1756 * our internal SCSI device list to match the SCSI ML's view. 1757 */ 1758 1759 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1760 struct pqi_scsi_dev *device) 1761 { 1762 unsigned long flags; 1763 1764 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1765 list_del(&device->scsi_device_list_entry); 1766 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1767 1768 /* Allow the device structure to be freed later. */ 1769 device->keep_device = false; 1770 } 1771 1772 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 1773 { 1774 if (device->is_expander_smp_device) 1775 return device->sas_port != NULL; 1776 1777 return device->sdev != NULL; 1778 } 1779 1780 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1781 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1782 { 1783 int rc; 1784 unsigned int i; 1785 unsigned long flags; 1786 enum pqi_find_result find_result; 1787 struct pqi_scsi_dev *device; 1788 struct pqi_scsi_dev *next; 1789 struct pqi_scsi_dev *matching_device; 1790 LIST_HEAD(add_list); 1791 LIST_HEAD(delete_list); 1792 1793 /* 1794 * The idea here is to do as little work as possible while holding the 1795 * spinlock. That's why we go to great pains to defer anything other 1796 * than updating the internal device list until after we release the 1797 * spinlock. 1798 */ 1799 1800 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1801 1802 /* Assume that all devices in the existing list have gone away. */ 1803 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1804 scsi_device_list_entry) 1805 device->device_gone = true; 1806 1807 for (i = 0; i < num_new_devices; i++) { 1808 device = new_device_list[i]; 1809 1810 find_result = pqi_scsi_find_entry(ctrl_info, device, 1811 &matching_device); 1812 1813 switch (find_result) { 1814 case DEVICE_SAME: 1815 /* 1816 * The newly found device is already in the existing 1817 * device list. 1818 */ 1819 device->new_device = false; 1820 matching_device->device_gone = false; 1821 pqi_scsi_update_device(matching_device, device); 1822 break; 1823 case DEVICE_NOT_FOUND: 1824 /* 1825 * The newly found device is NOT in the existing device 1826 * list. 1827 */ 1828 device->new_device = true; 1829 break; 1830 case DEVICE_CHANGED: 1831 /* 1832 * The original device has gone away and we need to add 1833 * the new device. 1834 */ 1835 device->new_device = true; 1836 break; 1837 } 1838 } 1839 1840 /* Process all devices that have gone away. */ 1841 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1842 scsi_device_list_entry) { 1843 if (device->device_gone) { 1844 list_del(&device->scsi_device_list_entry); 1845 list_add_tail(&device->delete_list_entry, &delete_list); 1846 } 1847 } 1848 1849 /* Process all new devices. */ 1850 for (i = 0; i < num_new_devices; i++) { 1851 device = new_device_list[i]; 1852 if (!device->new_device) 1853 continue; 1854 if (device->volume_offline) 1855 continue; 1856 list_add_tail(&device->scsi_device_list_entry, 1857 &ctrl_info->scsi_device_list); 1858 list_add_tail(&device->add_list_entry, &add_list); 1859 /* To prevent this device structure from being freed later. */ 1860 device->keep_device = true; 1861 } 1862 1863 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1864 1865 if (pqi_ctrl_in_ofa(ctrl_info)) 1866 pqi_ctrl_ofa_done(ctrl_info); 1867 1868 /* Remove all devices that have gone away. */ 1869 list_for_each_entry_safe(device, next, &delete_list, 1870 delete_list_entry) { 1871 if (device->volume_offline) { 1872 pqi_dev_info(ctrl_info, "offline", device); 1873 pqi_show_volume_status(ctrl_info, device); 1874 } else { 1875 pqi_dev_info(ctrl_info, "removed", device); 1876 } 1877 if (pqi_is_device_added(device)) 1878 pqi_remove_device(ctrl_info, device); 1879 list_del(&device->delete_list_entry); 1880 pqi_free_device(device); 1881 } 1882 1883 /* 1884 * Notify the SCSI ML if the queue depth of any existing device has 1885 * changed. 1886 */ 1887 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1888 scsi_device_list_entry) { 1889 if (device->sdev && device->queue_depth != 1890 device->advertised_queue_depth) { 1891 device->advertised_queue_depth = device->queue_depth; 1892 scsi_change_queue_depth(device->sdev, 1893 device->advertised_queue_depth); 1894 } 1895 } 1896 1897 /* Expose any new devices. */ 1898 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1899 if (!pqi_is_device_added(device)) { 1900 pqi_dev_info(ctrl_info, "added", device); 1901 rc = pqi_add_device(ctrl_info, device); 1902 if (rc) { 1903 dev_warn(&ctrl_info->pci_dev->dev, 1904 "scsi %d:%d:%d:%d addition failed, device not added\n", 1905 ctrl_info->scsi_host->host_no, 1906 device->bus, device->target, 1907 device->lun); 1908 pqi_fixup_botched_add(ctrl_info, device); 1909 } 1910 } 1911 } 1912 } 1913 1914 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1915 { 1916 bool is_supported; 1917 1918 if (device->is_expander_smp_device) 1919 return true; 1920 1921 is_supported = false; 1922 1923 switch (device->devtype) { 1924 case TYPE_DISK: 1925 case TYPE_ZBC: 1926 case TYPE_TAPE: 1927 case TYPE_MEDIUM_CHANGER: 1928 case TYPE_ENCLOSURE: 1929 is_supported = true; 1930 break; 1931 case TYPE_RAID: 1932 /* 1933 * Only support the HBA controller itself as a RAID 1934 * controller. If it's a RAID controller other than 1935 * the HBA itself (an external RAID controller, for 1936 * example), we don't support it. 1937 */ 1938 if (pqi_is_hba_lunid(device->scsi3addr)) 1939 is_supported = true; 1940 break; 1941 } 1942 1943 return is_supported; 1944 } 1945 1946 static inline bool pqi_skip_device(u8 *scsi3addr) 1947 { 1948 /* Ignore all masked devices. */ 1949 if (MASKED_DEVICE(scsi3addr)) 1950 return true; 1951 1952 return false; 1953 } 1954 1955 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) 1956 { 1957 if (!device->is_physical_device) 1958 return false; 1959 1960 if (device->is_expander_smp_device) 1961 return true; 1962 1963 switch (device->devtype) { 1964 case TYPE_DISK: 1965 case TYPE_ZBC: 1966 case TYPE_ENCLOSURE: 1967 return true; 1968 } 1969 1970 return false; 1971 } 1972 1973 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 1974 { 1975 return !device->is_physical_device || 1976 !pqi_skip_device(device->scsi3addr); 1977 } 1978 1979 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1980 { 1981 int i; 1982 int rc; 1983 LIST_HEAD(new_device_list_head); 1984 struct report_phys_lun_extended *physdev_list = NULL; 1985 struct report_log_lun_extended *logdev_list = NULL; 1986 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1987 struct report_log_lun_extended_entry *log_lun_ext_entry; 1988 struct bmic_identify_physical_device *id_phys = NULL; 1989 u32 num_physicals; 1990 u32 num_logicals; 1991 struct pqi_scsi_dev **new_device_list = NULL; 1992 struct pqi_scsi_dev *device; 1993 struct pqi_scsi_dev *next; 1994 unsigned int num_new_devices; 1995 unsigned int num_valid_devices; 1996 bool is_physical_device; 1997 u8 *scsi3addr; 1998 static char *out_of_memory_msg = 1999 "failed to allocate memory, device discovery stopped"; 2000 2001 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2002 if (rc) 2003 goto out; 2004 2005 if (physdev_list) 2006 num_physicals = 2007 get_unaligned_be32(&physdev_list->header.list_length) 2008 / sizeof(physdev_list->lun_entries[0]); 2009 else 2010 num_physicals = 0; 2011 2012 if (logdev_list) 2013 num_logicals = 2014 get_unaligned_be32(&logdev_list->header.list_length) 2015 / sizeof(logdev_list->lun_entries[0]); 2016 else 2017 num_logicals = 0; 2018 2019 if (num_physicals) { 2020 /* 2021 * We need this buffer for calls to pqi_get_physical_disk_info() 2022 * below. We allocate it here instead of inside 2023 * pqi_get_physical_disk_info() because it's a fairly large 2024 * buffer. 2025 */ 2026 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2027 if (!id_phys) { 2028 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2029 out_of_memory_msg); 2030 rc = -ENOMEM; 2031 goto out; 2032 } 2033 } 2034 2035 num_new_devices = num_physicals + num_logicals; 2036 2037 new_device_list = kmalloc_array(num_new_devices, 2038 sizeof(*new_device_list), 2039 GFP_KERNEL); 2040 if (!new_device_list) { 2041 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2042 rc = -ENOMEM; 2043 goto out; 2044 } 2045 2046 for (i = 0; i < num_new_devices; i++) { 2047 device = kzalloc(sizeof(*device), GFP_KERNEL); 2048 if (!device) { 2049 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2050 out_of_memory_msg); 2051 rc = -ENOMEM; 2052 goto out; 2053 } 2054 list_add_tail(&device->new_device_list_entry, 2055 &new_device_list_head); 2056 } 2057 2058 device = NULL; 2059 num_valid_devices = 0; 2060 2061 for (i = 0; i < num_new_devices; i++) { 2062 2063 if (i < num_physicals) { 2064 is_physical_device = true; 2065 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 2066 log_lun_ext_entry = NULL; 2067 scsi3addr = phys_lun_ext_entry->lunid; 2068 } else { 2069 is_physical_device = false; 2070 phys_lun_ext_entry = NULL; 2071 log_lun_ext_entry = 2072 &logdev_list->lun_entries[i - num_physicals]; 2073 scsi3addr = log_lun_ext_entry->lunid; 2074 } 2075 2076 if (is_physical_device && pqi_skip_device(scsi3addr)) 2077 continue; 2078 2079 if (device) 2080 device = list_next_entry(device, new_device_list_entry); 2081 else 2082 device = list_first_entry(&new_device_list_head, 2083 struct pqi_scsi_dev, new_device_list_entry); 2084 2085 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2086 device->is_physical_device = is_physical_device; 2087 if (is_physical_device) { 2088 if (phys_lun_ext_entry->device_type == 2089 SA_EXPANDER_SMP_DEVICE) 2090 device->is_expander_smp_device = true; 2091 } else { 2092 device->is_external_raid_device = 2093 pqi_is_external_raid_addr(scsi3addr); 2094 } 2095 2096 /* Gather information about the device. */ 2097 rc = pqi_get_device_info(ctrl_info, device); 2098 if (rc == -ENOMEM) { 2099 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2100 out_of_memory_msg); 2101 goto out; 2102 } 2103 if (rc) { 2104 if (device->is_physical_device) 2105 dev_warn(&ctrl_info->pci_dev->dev, 2106 "obtaining device info failed, skipping physical device %016llx\n", 2107 get_unaligned_be64( 2108 &phys_lun_ext_entry->wwid)); 2109 else 2110 dev_warn(&ctrl_info->pci_dev->dev, 2111 "obtaining device info failed, skipping logical device %08x%08x\n", 2112 *((u32 *)&device->scsi3addr), 2113 *((u32 *)&device->scsi3addr[4])); 2114 rc = 0; 2115 continue; 2116 } 2117 2118 if (!pqi_is_supported_device(device)) 2119 continue; 2120 2121 pqi_assign_bus_target_lun(device); 2122 2123 if (device->is_physical_device) { 2124 device->wwid = phys_lun_ext_entry->wwid; 2125 if ((phys_lun_ext_entry->device_flags & 2126 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 2127 phys_lun_ext_entry->aio_handle) { 2128 device->aio_enabled = true; 2129 device->aio_handle = 2130 phys_lun_ext_entry->aio_handle; 2131 } 2132 if (device->devtype == TYPE_DISK || 2133 device->devtype == TYPE_ZBC) { 2134 pqi_get_physical_disk_info(ctrl_info, 2135 device, id_phys); 2136 } 2137 } else { 2138 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 2139 sizeof(device->volume_id)); 2140 } 2141 2142 if (pqi_is_device_with_sas_address(device)) 2143 device->sas_address = get_unaligned_be64(&device->wwid); 2144 2145 new_device_list[num_valid_devices++] = device; 2146 } 2147 2148 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2149 2150 out: 2151 list_for_each_entry_safe(device, next, &new_device_list_head, 2152 new_device_list_entry) { 2153 if (device->keep_device) 2154 continue; 2155 list_del(&device->new_device_list_entry); 2156 pqi_free_device(device); 2157 } 2158 2159 kfree(new_device_list); 2160 kfree(physdev_list); 2161 kfree(logdev_list); 2162 kfree(id_phys); 2163 2164 return rc; 2165 } 2166 2167 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2168 { 2169 unsigned long flags; 2170 struct pqi_scsi_dev *device; 2171 2172 while (1) { 2173 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2174 2175 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 2176 struct pqi_scsi_dev, scsi_device_list_entry); 2177 if (device) 2178 list_del(&device->scsi_device_list_entry); 2179 2180 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 2181 flags); 2182 2183 if (!device) 2184 break; 2185 2186 if (pqi_is_device_added(device)) 2187 pqi_remove_device(ctrl_info, device); 2188 pqi_free_device(device); 2189 } 2190 } 2191 2192 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2193 { 2194 int rc; 2195 2196 if (pqi_ctrl_offline(ctrl_info)) 2197 return -ENXIO; 2198 2199 mutex_lock(&ctrl_info->scan_mutex); 2200 2201 rc = pqi_update_scsi_devices(ctrl_info); 2202 if (rc) 2203 pqi_schedule_rescan_worker_delayed(ctrl_info); 2204 2205 mutex_unlock(&ctrl_info->scan_mutex); 2206 2207 return rc; 2208 } 2209 2210 static void pqi_scan_start(struct Scsi_Host *shost) 2211 { 2212 struct pqi_ctrl_info *ctrl_info; 2213 2214 ctrl_info = shost_to_hba(shost); 2215 if (pqi_ctrl_in_ofa(ctrl_info)) 2216 return; 2217 2218 pqi_scan_scsi_devices(ctrl_info); 2219 } 2220 2221 /* Returns TRUE if scan is finished. */ 2222 2223 static int pqi_scan_finished(struct Scsi_Host *shost, 2224 unsigned long elapsed_time) 2225 { 2226 struct pqi_ctrl_info *ctrl_info; 2227 2228 ctrl_info = shost_priv(shost); 2229 2230 return !mutex_is_locked(&ctrl_info->scan_mutex); 2231 } 2232 2233 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2234 { 2235 mutex_lock(&ctrl_info->scan_mutex); 2236 mutex_unlock(&ctrl_info->scan_mutex); 2237 } 2238 2239 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2240 { 2241 mutex_lock(&ctrl_info->lun_reset_mutex); 2242 mutex_unlock(&ctrl_info->lun_reset_mutex); 2243 } 2244 2245 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 2246 { 2247 mutex_lock(&ctrl_info->ofa_mutex); 2248 mutex_unlock(&ctrl_info->ofa_mutex); 2249 } 2250 2251 static inline void pqi_set_encryption_info( 2252 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2253 u64 first_block) 2254 { 2255 u32 volume_blk_size; 2256 2257 /* 2258 * Set the encryption tweak values based on logical block address. 2259 * If the block size is 512, the tweak value is equal to the LBA. 2260 * For other block sizes, tweak value is (LBA * block size) / 512. 2261 */ 2262 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2263 if (volume_blk_size != 512) 2264 first_block = (first_block * volume_blk_size) / 512; 2265 2266 encryption_info->data_encryption_key_index = 2267 get_unaligned_le16(&raid_map->data_encryption_key_index); 2268 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2269 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2270 } 2271 2272 /* 2273 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2274 */ 2275 2276 #define PQI_RAID_BYPASS_INELIGIBLE 1 2277 2278 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2279 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2280 struct pqi_queue_group *queue_group) 2281 { 2282 struct raid_map *raid_map; 2283 bool is_write = false; 2284 u32 map_index; 2285 u64 first_block; 2286 u64 last_block; 2287 u32 block_cnt; 2288 u32 blocks_per_row; 2289 u64 first_row; 2290 u64 last_row; 2291 u32 first_row_offset; 2292 u32 last_row_offset; 2293 u32 first_column; 2294 u32 last_column; 2295 u64 r0_first_row; 2296 u64 r0_last_row; 2297 u32 r5or6_blocks_per_row; 2298 u64 r5or6_first_row; 2299 u64 r5or6_last_row; 2300 u32 r5or6_first_row_offset; 2301 u32 r5or6_last_row_offset; 2302 u32 r5or6_first_column; 2303 u32 r5or6_last_column; 2304 u16 data_disks_per_row; 2305 u32 total_disks_per_row; 2306 u16 layout_map_count; 2307 u32 stripesize; 2308 u16 strip_size; 2309 u32 first_group; 2310 u32 last_group; 2311 u32 current_group; 2312 u32 map_row; 2313 u32 aio_handle; 2314 u64 disk_block; 2315 u32 disk_block_cnt; 2316 u8 cdb[16]; 2317 u8 cdb_length; 2318 int offload_to_mirror; 2319 struct pqi_encryption_info *encryption_info_ptr; 2320 struct pqi_encryption_info encryption_info; 2321 #if BITS_PER_LONG == 32 2322 u64 tmpdiv; 2323 #endif 2324 2325 /* Check for valid opcode, get LBA and block count. */ 2326 switch (scmd->cmnd[0]) { 2327 case WRITE_6: 2328 is_write = true; 2329 /* fall through */ 2330 case READ_6: 2331 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2332 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2333 block_cnt = (u32)scmd->cmnd[4]; 2334 if (block_cnt == 0) 2335 block_cnt = 256; 2336 break; 2337 case WRITE_10: 2338 is_write = true; 2339 /* fall through */ 2340 case READ_10: 2341 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2342 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2343 break; 2344 case WRITE_12: 2345 is_write = true; 2346 /* fall through */ 2347 case READ_12: 2348 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2349 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2350 break; 2351 case WRITE_16: 2352 is_write = true; 2353 /* fall through */ 2354 case READ_16: 2355 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2356 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2357 break; 2358 default: 2359 /* Process via normal I/O path. */ 2360 return PQI_RAID_BYPASS_INELIGIBLE; 2361 } 2362 2363 /* Check for write to non-RAID-0. */ 2364 if (is_write && device->raid_level != SA_RAID_0) 2365 return PQI_RAID_BYPASS_INELIGIBLE; 2366 2367 if (unlikely(block_cnt == 0)) 2368 return PQI_RAID_BYPASS_INELIGIBLE; 2369 2370 last_block = first_block + block_cnt - 1; 2371 raid_map = device->raid_map; 2372 2373 /* Check for invalid block or wraparound. */ 2374 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2375 last_block < first_block) 2376 return PQI_RAID_BYPASS_INELIGIBLE; 2377 2378 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2379 strip_size = get_unaligned_le16(&raid_map->strip_size); 2380 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2381 2382 /* Calculate stripe information for the request. */ 2383 blocks_per_row = data_disks_per_row * strip_size; 2384 #if BITS_PER_LONG == 32 2385 tmpdiv = first_block; 2386 do_div(tmpdiv, blocks_per_row); 2387 first_row = tmpdiv; 2388 tmpdiv = last_block; 2389 do_div(tmpdiv, blocks_per_row); 2390 last_row = tmpdiv; 2391 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2392 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2393 tmpdiv = first_row_offset; 2394 do_div(tmpdiv, strip_size); 2395 first_column = tmpdiv; 2396 tmpdiv = last_row_offset; 2397 do_div(tmpdiv, strip_size); 2398 last_column = tmpdiv; 2399 #else 2400 first_row = first_block / blocks_per_row; 2401 last_row = last_block / blocks_per_row; 2402 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2403 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2404 first_column = first_row_offset / strip_size; 2405 last_column = last_row_offset / strip_size; 2406 #endif 2407 2408 /* If this isn't a single row/column then give to the controller. */ 2409 if (first_row != last_row || first_column != last_column) 2410 return PQI_RAID_BYPASS_INELIGIBLE; 2411 2412 /* Proceeding with driver mapping. */ 2413 total_disks_per_row = data_disks_per_row + 2414 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2415 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2416 get_unaligned_le16(&raid_map->row_cnt); 2417 map_index = (map_row * total_disks_per_row) + first_column; 2418 2419 /* RAID 1 */ 2420 if (device->raid_level == SA_RAID_1) { 2421 if (device->offload_to_mirror) 2422 map_index += data_disks_per_row; 2423 device->offload_to_mirror = !device->offload_to_mirror; 2424 } else if (device->raid_level == SA_RAID_ADM) { 2425 /* RAID ADM */ 2426 /* 2427 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2428 * divisible by 3. 2429 */ 2430 offload_to_mirror = device->offload_to_mirror; 2431 if (offload_to_mirror == 0) { 2432 /* use physical disk in the first mirrored group. */ 2433 map_index %= data_disks_per_row; 2434 } else { 2435 do { 2436 /* 2437 * Determine mirror group that map_index 2438 * indicates. 2439 */ 2440 current_group = map_index / data_disks_per_row; 2441 2442 if (offload_to_mirror != current_group) { 2443 if (current_group < 2444 layout_map_count - 1) { 2445 /* 2446 * Select raid index from 2447 * next group. 2448 */ 2449 map_index += data_disks_per_row; 2450 current_group++; 2451 } else { 2452 /* 2453 * Select raid index from first 2454 * group. 2455 */ 2456 map_index %= data_disks_per_row; 2457 current_group = 0; 2458 } 2459 } 2460 } while (offload_to_mirror != current_group); 2461 } 2462 2463 /* Set mirror group to use next time. */ 2464 offload_to_mirror = 2465 (offload_to_mirror >= layout_map_count - 1) ? 2466 0 : offload_to_mirror + 1; 2467 WARN_ON(offload_to_mirror >= layout_map_count); 2468 device->offload_to_mirror = offload_to_mirror; 2469 /* 2470 * Avoid direct use of device->offload_to_mirror within this 2471 * function since multiple threads might simultaneously 2472 * increment it beyond the range of device->layout_map_count -1. 2473 */ 2474 } else if ((device->raid_level == SA_RAID_5 || 2475 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2476 /* RAID 50/60 */ 2477 /* Verify first and last block are in same RAID group */ 2478 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2479 stripesize = r5or6_blocks_per_row * layout_map_count; 2480 #if BITS_PER_LONG == 32 2481 tmpdiv = first_block; 2482 first_group = do_div(tmpdiv, stripesize); 2483 tmpdiv = first_group; 2484 do_div(tmpdiv, r5or6_blocks_per_row); 2485 first_group = tmpdiv; 2486 tmpdiv = last_block; 2487 last_group = do_div(tmpdiv, stripesize); 2488 tmpdiv = last_group; 2489 do_div(tmpdiv, r5or6_blocks_per_row); 2490 last_group = tmpdiv; 2491 #else 2492 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2493 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2494 #endif 2495 if (first_group != last_group) 2496 return PQI_RAID_BYPASS_INELIGIBLE; 2497 2498 /* Verify request is in a single row of RAID 5/6 */ 2499 #if BITS_PER_LONG == 32 2500 tmpdiv = first_block; 2501 do_div(tmpdiv, stripesize); 2502 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2503 tmpdiv = last_block; 2504 do_div(tmpdiv, stripesize); 2505 r5or6_last_row = r0_last_row = tmpdiv; 2506 #else 2507 first_row = r5or6_first_row = r0_first_row = 2508 first_block / stripesize; 2509 r5or6_last_row = r0_last_row = last_block / stripesize; 2510 #endif 2511 if (r5or6_first_row != r5or6_last_row) 2512 return PQI_RAID_BYPASS_INELIGIBLE; 2513 2514 /* Verify request is in a single column */ 2515 #if BITS_PER_LONG == 32 2516 tmpdiv = first_block; 2517 first_row_offset = do_div(tmpdiv, stripesize); 2518 tmpdiv = first_row_offset; 2519 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2520 r5or6_first_row_offset = first_row_offset; 2521 tmpdiv = last_block; 2522 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2523 tmpdiv = r5or6_last_row_offset; 2524 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2525 tmpdiv = r5or6_first_row_offset; 2526 do_div(tmpdiv, strip_size); 2527 first_column = r5or6_first_column = tmpdiv; 2528 tmpdiv = r5or6_last_row_offset; 2529 do_div(tmpdiv, strip_size); 2530 r5or6_last_column = tmpdiv; 2531 #else 2532 first_row_offset = r5or6_first_row_offset = 2533 (u32)((first_block % stripesize) % 2534 r5or6_blocks_per_row); 2535 2536 r5or6_last_row_offset = 2537 (u32)((last_block % stripesize) % 2538 r5or6_blocks_per_row); 2539 2540 first_column = r5or6_first_row_offset / strip_size; 2541 r5or6_first_column = first_column; 2542 r5or6_last_column = r5or6_last_row_offset / strip_size; 2543 #endif 2544 if (r5or6_first_column != r5or6_last_column) 2545 return PQI_RAID_BYPASS_INELIGIBLE; 2546 2547 /* Request is eligible */ 2548 map_row = 2549 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2550 get_unaligned_le16(&raid_map->row_cnt); 2551 2552 map_index = (first_group * 2553 (get_unaligned_le16(&raid_map->row_cnt) * 2554 total_disks_per_row)) + 2555 (map_row * total_disks_per_row) + first_column; 2556 } 2557 2558 aio_handle = raid_map->disk_data[map_index].aio_handle; 2559 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2560 first_row * strip_size + 2561 (first_row_offset - first_column * strip_size); 2562 disk_block_cnt = block_cnt; 2563 2564 /* Handle differing logical/physical block sizes. */ 2565 if (raid_map->phys_blk_shift) { 2566 disk_block <<= raid_map->phys_blk_shift; 2567 disk_block_cnt <<= raid_map->phys_blk_shift; 2568 } 2569 2570 if (unlikely(disk_block_cnt > 0xffff)) 2571 return PQI_RAID_BYPASS_INELIGIBLE; 2572 2573 /* Build the new CDB for the physical disk I/O. */ 2574 if (disk_block > 0xffffffff) { 2575 cdb[0] = is_write ? WRITE_16 : READ_16; 2576 cdb[1] = 0; 2577 put_unaligned_be64(disk_block, &cdb[2]); 2578 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2579 cdb[14] = 0; 2580 cdb[15] = 0; 2581 cdb_length = 16; 2582 } else { 2583 cdb[0] = is_write ? WRITE_10 : READ_10; 2584 cdb[1] = 0; 2585 put_unaligned_be32((u32)disk_block, &cdb[2]); 2586 cdb[6] = 0; 2587 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2588 cdb[9] = 0; 2589 cdb_length = 10; 2590 } 2591 2592 if (get_unaligned_le16(&raid_map->flags) & 2593 RAID_MAP_ENCRYPTION_ENABLED) { 2594 pqi_set_encryption_info(&encryption_info, raid_map, 2595 first_block); 2596 encryption_info_ptr = &encryption_info; 2597 } else { 2598 encryption_info_ptr = NULL; 2599 } 2600 2601 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2602 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2603 } 2604 2605 #define PQI_STATUS_IDLE 0x0 2606 2607 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2608 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2609 2610 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2611 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2612 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2613 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2614 #define PQI_DEVICE_STATE_ERROR 0x4 2615 2616 #define PQI_MODE_READY_TIMEOUT_SECS 30 2617 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2618 2619 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2620 { 2621 struct pqi_device_registers __iomem *pqi_registers; 2622 unsigned long timeout; 2623 u64 signature; 2624 u8 status; 2625 2626 pqi_registers = ctrl_info->pqi_registers; 2627 timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; 2628 2629 while (1) { 2630 signature = readq(&pqi_registers->signature); 2631 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2632 sizeof(signature)) == 0) 2633 break; 2634 if (time_after(jiffies, timeout)) { 2635 dev_err(&ctrl_info->pci_dev->dev, 2636 "timed out waiting for PQI signature\n"); 2637 return -ETIMEDOUT; 2638 } 2639 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2640 } 2641 2642 while (1) { 2643 status = readb(&pqi_registers->function_and_status_code); 2644 if (status == PQI_STATUS_IDLE) 2645 break; 2646 if (time_after(jiffies, timeout)) { 2647 dev_err(&ctrl_info->pci_dev->dev, 2648 "timed out waiting for PQI IDLE\n"); 2649 return -ETIMEDOUT; 2650 } 2651 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2652 } 2653 2654 while (1) { 2655 if (readl(&pqi_registers->device_status) == 2656 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2657 break; 2658 if (time_after(jiffies, timeout)) { 2659 dev_err(&ctrl_info->pci_dev->dev, 2660 "timed out waiting for PQI all registers ready\n"); 2661 return -ETIMEDOUT; 2662 } 2663 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2664 } 2665 2666 return 0; 2667 } 2668 2669 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2670 { 2671 struct pqi_scsi_dev *device; 2672 2673 device = io_request->scmd->device->hostdata; 2674 device->raid_bypass_enabled = false; 2675 device->aio_enabled = false; 2676 } 2677 2678 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2679 { 2680 struct pqi_ctrl_info *ctrl_info; 2681 struct pqi_scsi_dev *device; 2682 2683 device = sdev->hostdata; 2684 if (device->device_offline) 2685 return; 2686 2687 device->device_offline = true; 2688 ctrl_info = shost_to_hba(sdev->host); 2689 pqi_schedule_rescan_worker(ctrl_info); 2690 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 2691 path, ctrl_info->scsi_host->host_no, device->bus, 2692 device->target, device->lun); 2693 } 2694 2695 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2696 { 2697 u8 scsi_status; 2698 u8 host_byte; 2699 struct scsi_cmnd *scmd; 2700 struct pqi_raid_error_info *error_info; 2701 size_t sense_data_length; 2702 int residual_count; 2703 int xfer_count; 2704 struct scsi_sense_hdr sshdr; 2705 2706 scmd = io_request->scmd; 2707 if (!scmd) 2708 return; 2709 2710 error_info = io_request->error_info; 2711 scsi_status = error_info->status; 2712 host_byte = DID_OK; 2713 2714 switch (error_info->data_out_result) { 2715 case PQI_DATA_IN_OUT_GOOD: 2716 break; 2717 case PQI_DATA_IN_OUT_UNDERFLOW: 2718 xfer_count = 2719 get_unaligned_le32(&error_info->data_out_transferred); 2720 residual_count = scsi_bufflen(scmd) - xfer_count; 2721 scsi_set_resid(scmd, residual_count); 2722 if (xfer_count < scmd->underflow) 2723 host_byte = DID_SOFT_ERROR; 2724 break; 2725 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2726 case PQI_DATA_IN_OUT_ABORTED: 2727 host_byte = DID_ABORT; 2728 break; 2729 case PQI_DATA_IN_OUT_TIMEOUT: 2730 host_byte = DID_TIME_OUT; 2731 break; 2732 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2733 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2734 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2735 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2736 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2737 case PQI_DATA_IN_OUT_ERROR: 2738 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2739 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2740 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2741 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2742 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2743 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2744 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2745 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2746 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2747 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2748 default: 2749 host_byte = DID_ERROR; 2750 break; 2751 } 2752 2753 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2754 if (sense_data_length == 0) 2755 sense_data_length = 2756 get_unaligned_le16(&error_info->response_data_length); 2757 if (sense_data_length) { 2758 if (sense_data_length > sizeof(error_info->data)) 2759 sense_data_length = sizeof(error_info->data); 2760 2761 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2762 scsi_normalize_sense(error_info->data, 2763 sense_data_length, &sshdr) && 2764 sshdr.sense_key == HARDWARE_ERROR && 2765 sshdr.asc == 0x3e && 2766 sshdr.ascq == 0x1) { 2767 pqi_take_device_offline(scmd->device, "RAID"); 2768 host_byte = DID_NO_CONNECT; 2769 } 2770 2771 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2772 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2773 memcpy(scmd->sense_buffer, error_info->data, 2774 sense_data_length); 2775 } 2776 2777 scmd->result = scsi_status; 2778 set_host_byte(scmd, host_byte); 2779 } 2780 2781 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2782 { 2783 u8 scsi_status; 2784 u8 host_byte; 2785 struct scsi_cmnd *scmd; 2786 struct pqi_aio_error_info *error_info; 2787 size_t sense_data_length; 2788 int residual_count; 2789 int xfer_count; 2790 bool device_offline; 2791 2792 scmd = io_request->scmd; 2793 error_info = io_request->error_info; 2794 host_byte = DID_OK; 2795 sense_data_length = 0; 2796 device_offline = false; 2797 2798 switch (error_info->service_response) { 2799 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2800 scsi_status = error_info->status; 2801 break; 2802 case PQI_AIO_SERV_RESPONSE_FAILURE: 2803 switch (error_info->status) { 2804 case PQI_AIO_STATUS_IO_ABORTED: 2805 scsi_status = SAM_STAT_TASK_ABORTED; 2806 break; 2807 case PQI_AIO_STATUS_UNDERRUN: 2808 scsi_status = SAM_STAT_GOOD; 2809 residual_count = get_unaligned_le32( 2810 &error_info->residual_count); 2811 scsi_set_resid(scmd, residual_count); 2812 xfer_count = scsi_bufflen(scmd) - residual_count; 2813 if (xfer_count < scmd->underflow) 2814 host_byte = DID_SOFT_ERROR; 2815 break; 2816 case PQI_AIO_STATUS_OVERRUN: 2817 scsi_status = SAM_STAT_GOOD; 2818 break; 2819 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2820 pqi_aio_path_disabled(io_request); 2821 scsi_status = SAM_STAT_GOOD; 2822 io_request->status = -EAGAIN; 2823 break; 2824 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2825 case PQI_AIO_STATUS_INVALID_DEVICE: 2826 if (!io_request->raid_bypass) { 2827 device_offline = true; 2828 pqi_take_device_offline(scmd->device, "AIO"); 2829 host_byte = DID_NO_CONNECT; 2830 } 2831 scsi_status = SAM_STAT_CHECK_CONDITION; 2832 break; 2833 case PQI_AIO_STATUS_IO_ERROR: 2834 default: 2835 scsi_status = SAM_STAT_CHECK_CONDITION; 2836 break; 2837 } 2838 break; 2839 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2840 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2841 scsi_status = SAM_STAT_GOOD; 2842 break; 2843 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2844 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2845 default: 2846 scsi_status = SAM_STAT_CHECK_CONDITION; 2847 break; 2848 } 2849 2850 if (error_info->data_present) { 2851 sense_data_length = 2852 get_unaligned_le16(&error_info->data_length); 2853 if (sense_data_length) { 2854 if (sense_data_length > sizeof(error_info->data)) 2855 sense_data_length = sizeof(error_info->data); 2856 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2857 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2858 memcpy(scmd->sense_buffer, error_info->data, 2859 sense_data_length); 2860 } 2861 } 2862 2863 if (device_offline && sense_data_length == 0) 2864 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2865 0x3e, 0x1); 2866 2867 scmd->result = scsi_status; 2868 set_host_byte(scmd, host_byte); 2869 } 2870 2871 static void pqi_process_io_error(unsigned int iu_type, 2872 struct pqi_io_request *io_request) 2873 { 2874 switch (iu_type) { 2875 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2876 pqi_process_raid_io_error(io_request); 2877 break; 2878 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2879 pqi_process_aio_io_error(io_request); 2880 break; 2881 } 2882 } 2883 2884 static int pqi_interpret_task_management_response( 2885 struct pqi_task_management_response *response) 2886 { 2887 int rc; 2888 2889 switch (response->response_code) { 2890 case SOP_TMF_COMPLETE: 2891 case SOP_TMF_FUNCTION_SUCCEEDED: 2892 rc = 0; 2893 break; 2894 case SOP_TMF_REJECTED: 2895 rc = -EAGAIN; 2896 break; 2897 default: 2898 rc = -EIO; 2899 break; 2900 } 2901 2902 return rc; 2903 } 2904 2905 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2906 struct pqi_queue_group *queue_group) 2907 { 2908 unsigned int num_responses; 2909 pqi_index_t oq_pi; 2910 pqi_index_t oq_ci; 2911 struct pqi_io_request *io_request; 2912 struct pqi_io_response *response; 2913 u16 request_id; 2914 2915 num_responses = 0; 2916 oq_ci = queue_group->oq_ci_copy; 2917 2918 while (1) { 2919 oq_pi = readl(queue_group->oq_pi); 2920 if (oq_pi == oq_ci) 2921 break; 2922 2923 num_responses++; 2924 response = queue_group->oq_element_array + 2925 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2926 2927 request_id = get_unaligned_le16(&response->request_id); 2928 WARN_ON(request_id >= ctrl_info->max_io_slots); 2929 2930 io_request = &ctrl_info->io_request_pool[request_id]; 2931 WARN_ON(atomic_read(&io_request->refcount) == 0); 2932 2933 switch (response->header.iu_type) { 2934 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2935 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2936 if (io_request->scmd) 2937 io_request->scmd->result = 0; 2938 /* fall through */ 2939 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2940 break; 2941 case PQI_RESPONSE_IU_VENDOR_GENERAL: 2942 io_request->status = 2943 get_unaligned_le16( 2944 &((struct pqi_vendor_general_response *) 2945 response)->status); 2946 break; 2947 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2948 io_request->status = 2949 pqi_interpret_task_management_response( 2950 (void *)response); 2951 break; 2952 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2953 pqi_aio_path_disabled(io_request); 2954 io_request->status = -EAGAIN; 2955 break; 2956 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2957 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2958 io_request->error_info = ctrl_info->error_buffer + 2959 (get_unaligned_le16(&response->error_index) * 2960 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2961 pqi_process_io_error(response->header.iu_type, 2962 io_request); 2963 break; 2964 default: 2965 dev_err(&ctrl_info->pci_dev->dev, 2966 "unexpected IU type: 0x%x\n", 2967 response->header.iu_type); 2968 break; 2969 } 2970 2971 io_request->io_complete_callback(io_request, 2972 io_request->context); 2973 2974 /* 2975 * Note that the I/O request structure CANNOT BE TOUCHED after 2976 * returning from the I/O completion callback! 2977 */ 2978 2979 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2980 } 2981 2982 if (num_responses) { 2983 queue_group->oq_ci_copy = oq_ci; 2984 writel(oq_ci, queue_group->oq_ci); 2985 } 2986 2987 return num_responses; 2988 } 2989 2990 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2991 unsigned int ci, unsigned int elements_in_queue) 2992 { 2993 unsigned int num_elements_used; 2994 2995 if (pi >= ci) 2996 num_elements_used = pi - ci; 2997 else 2998 num_elements_used = elements_in_queue - ci + pi; 2999 3000 return elements_in_queue - num_elements_used - 1; 3001 } 3002 3003 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3004 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3005 { 3006 pqi_index_t iq_pi; 3007 pqi_index_t iq_ci; 3008 unsigned long flags; 3009 void *next_element; 3010 struct pqi_queue_group *queue_group; 3011 3012 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3013 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3014 3015 while (1) { 3016 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3017 3018 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3019 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3020 3021 if (pqi_num_elements_free(iq_pi, iq_ci, 3022 ctrl_info->num_elements_per_iq)) 3023 break; 3024 3025 spin_unlock_irqrestore( 3026 &queue_group->submit_lock[RAID_PATH], flags); 3027 3028 if (pqi_ctrl_offline(ctrl_info)) 3029 return; 3030 } 3031 3032 next_element = queue_group->iq_element_array[RAID_PATH] + 3033 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3034 3035 memcpy(next_element, iu, iu_length); 3036 3037 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3038 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3039 3040 /* 3041 * This write notifies the controller that an IU is available to be 3042 * processed. 3043 */ 3044 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3045 3046 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3047 } 3048 3049 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3050 struct pqi_event *event) 3051 { 3052 struct pqi_event_acknowledge_request request; 3053 3054 memset(&request, 0, sizeof(request)); 3055 3056 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3057 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3058 &request.header.iu_length); 3059 request.event_type = event->event_type; 3060 request.event_id = event->event_id; 3061 request.additional_event_id = event->additional_event_id; 3062 3063 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3064 } 3065 3066 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3067 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3068 3069 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3070 struct pqi_ctrl_info *ctrl_info) 3071 { 3072 unsigned long timeout; 3073 u8 status; 3074 3075 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; 3076 3077 while (1) { 3078 status = pqi_read_soft_reset_status(ctrl_info); 3079 if (status & PQI_SOFT_RESET_INITIATE) 3080 return RESET_INITIATE_DRIVER; 3081 3082 if (status & PQI_SOFT_RESET_ABORT) 3083 return RESET_ABORT; 3084 3085 if (time_after(jiffies, timeout)) { 3086 dev_err(&ctrl_info->pci_dev->dev, 3087 "timed out waiting for soft reset status\n"); 3088 return RESET_TIMEDOUT; 3089 } 3090 3091 if (!sis_is_firmware_running(ctrl_info)) 3092 return RESET_NORESPONSE; 3093 3094 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3095 } 3096 } 3097 3098 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, 3099 enum pqi_soft_reset_status reset_status) 3100 { 3101 int rc; 3102 3103 switch (reset_status) { 3104 case RESET_INITIATE_DRIVER: 3105 /* fall through */ 3106 case RESET_TIMEDOUT: 3107 dev_info(&ctrl_info->pci_dev->dev, 3108 "resetting controller %u\n", ctrl_info->ctrl_id); 3109 sis_soft_reset(ctrl_info); 3110 /* fall through */ 3111 case RESET_INITIATE_FIRMWARE: 3112 rc = pqi_ofa_ctrl_restart(ctrl_info); 3113 pqi_ofa_free_host_buffer(ctrl_info); 3114 dev_info(&ctrl_info->pci_dev->dev, 3115 "Online Firmware Activation for controller %u: %s\n", 3116 ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); 3117 break; 3118 case RESET_ABORT: 3119 pqi_ofa_ctrl_unquiesce(ctrl_info); 3120 dev_info(&ctrl_info->pci_dev->dev, 3121 "Online Firmware Activation for controller %u: %s\n", 3122 ctrl_info->ctrl_id, "ABORTED"); 3123 break; 3124 case RESET_NORESPONSE: 3125 pqi_ofa_free_host_buffer(ctrl_info); 3126 pqi_take_ctrl_offline(ctrl_info); 3127 break; 3128 } 3129 } 3130 3131 static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3132 struct pqi_event *event) 3133 { 3134 u16 event_id; 3135 enum pqi_soft_reset_status status; 3136 3137 event_id = get_unaligned_le16(&event->event_id); 3138 3139 mutex_lock(&ctrl_info->ofa_mutex); 3140 3141 if (event_id == PQI_EVENT_OFA_QUIESCE) { 3142 dev_info(&ctrl_info->pci_dev->dev, 3143 "Received Online Firmware Activation quiesce event for controller %u\n", 3144 ctrl_info->ctrl_id); 3145 pqi_ofa_ctrl_quiesce(ctrl_info); 3146 pqi_acknowledge_event(ctrl_info, event); 3147 if (ctrl_info->soft_reset_handshake_supported) { 3148 status = pqi_poll_for_soft_reset_status(ctrl_info); 3149 pqi_process_soft_reset(ctrl_info, status); 3150 } else { 3151 pqi_process_soft_reset(ctrl_info, 3152 RESET_INITIATE_FIRMWARE); 3153 } 3154 3155 } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3156 pqi_acknowledge_event(ctrl_info, event); 3157 pqi_ofa_setup_host_buffer(ctrl_info, 3158 le32_to_cpu(event->ofa_bytes_requested)); 3159 pqi_ofa_host_memory_update(ctrl_info); 3160 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3161 pqi_ofa_free_host_buffer(ctrl_info); 3162 pqi_acknowledge_event(ctrl_info, event); 3163 dev_info(&ctrl_info->pci_dev->dev, 3164 "Online Firmware Activation(%u) cancel reason : %u\n", 3165 ctrl_info->ctrl_id, event->ofa_cancel_reason); 3166 } 3167 3168 mutex_unlock(&ctrl_info->ofa_mutex); 3169 } 3170 3171 static void pqi_event_worker(struct work_struct *work) 3172 { 3173 unsigned int i; 3174 struct pqi_ctrl_info *ctrl_info; 3175 struct pqi_event *event; 3176 3177 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3178 3179 pqi_ctrl_busy(ctrl_info); 3180 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 3181 if (pqi_ctrl_offline(ctrl_info)) 3182 goto out; 3183 3184 pqi_schedule_rescan_worker_delayed(ctrl_info); 3185 3186 event = ctrl_info->events; 3187 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3188 if (event->pending) { 3189 event->pending = false; 3190 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3191 pqi_ctrl_unbusy(ctrl_info); 3192 pqi_ofa_process_event(ctrl_info, event); 3193 return; 3194 } 3195 pqi_acknowledge_event(ctrl_info, event); 3196 } 3197 event++; 3198 } 3199 3200 out: 3201 pqi_ctrl_unbusy(ctrl_info); 3202 } 3203 3204 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) 3205 3206 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3207 { 3208 int num_interrupts; 3209 u32 heartbeat_count; 3210 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 3211 heartbeat_timer); 3212 3213 pqi_check_ctrl_health(ctrl_info); 3214 if (pqi_ctrl_offline(ctrl_info)) 3215 return; 3216 3217 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3218 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3219 3220 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3221 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3222 dev_err(&ctrl_info->pci_dev->dev, 3223 "no heartbeat detected - last heartbeat count: %u\n", 3224 heartbeat_count); 3225 pqi_take_ctrl_offline(ctrl_info); 3226 return; 3227 } 3228 } else { 3229 ctrl_info->previous_num_interrupts = num_interrupts; 3230 } 3231 3232 ctrl_info->previous_heartbeat_count = heartbeat_count; 3233 mod_timer(&ctrl_info->heartbeat_timer, 3234 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3235 } 3236 3237 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3238 { 3239 if (!ctrl_info->heartbeat_counter) 3240 return; 3241 3242 ctrl_info->previous_num_interrupts = 3243 atomic_read(&ctrl_info->num_interrupts); 3244 ctrl_info->previous_heartbeat_count = 3245 pqi_read_heartbeat_counter(ctrl_info); 3246 3247 ctrl_info->heartbeat_timer.expires = 3248 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3249 add_timer(&ctrl_info->heartbeat_timer); 3250 } 3251 3252 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3253 { 3254 del_timer_sync(&ctrl_info->heartbeat_timer); 3255 } 3256 3257 static inline int pqi_event_type_to_event_index(unsigned int event_type) 3258 { 3259 int index; 3260 3261 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 3262 if (event_type == pqi_supported_event_types[index]) 3263 return index; 3264 3265 return -1; 3266 } 3267 3268 static inline bool pqi_is_supported_event(unsigned int event_type) 3269 { 3270 return pqi_event_type_to_event_index(event_type) != -1; 3271 } 3272 3273 static void pqi_ofa_capture_event_payload(struct pqi_event *event, 3274 struct pqi_event_response *response) 3275 { 3276 u16 event_id; 3277 3278 event_id = get_unaligned_le16(&event->event_id); 3279 3280 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3281 if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { 3282 event->ofa_bytes_requested = 3283 response->data.ofa_memory_allocation.bytes_requested; 3284 } else if (event_id == PQI_EVENT_OFA_CANCELLED) { 3285 event->ofa_cancel_reason = 3286 response->data.ofa_cancelled.reason; 3287 } 3288 } 3289 } 3290 3291 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3292 { 3293 unsigned int num_events; 3294 pqi_index_t oq_pi; 3295 pqi_index_t oq_ci; 3296 struct pqi_event_queue *event_queue; 3297 struct pqi_event_response *response; 3298 struct pqi_event *event; 3299 int event_index; 3300 3301 event_queue = &ctrl_info->event_queue; 3302 num_events = 0; 3303 oq_ci = event_queue->oq_ci_copy; 3304 3305 while (1) { 3306 oq_pi = readl(event_queue->oq_pi); 3307 if (oq_pi == oq_ci) 3308 break; 3309 3310 num_events++; 3311 response = event_queue->oq_element_array + 3312 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3313 3314 event_index = 3315 pqi_event_type_to_event_index(response->event_type); 3316 3317 if (event_index >= 0) { 3318 if (response->request_acknowlege) { 3319 event = &ctrl_info->events[event_index]; 3320 event->pending = true; 3321 event->event_type = response->event_type; 3322 event->event_id = response->event_id; 3323 event->additional_event_id = 3324 response->additional_event_id; 3325 pqi_ofa_capture_event_payload(event, response); 3326 } 3327 } 3328 3329 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3330 } 3331 3332 if (num_events) { 3333 event_queue->oq_ci_copy = oq_ci; 3334 writel(oq_ci, event_queue->oq_ci); 3335 schedule_work(&ctrl_info->event_work); 3336 } 3337 3338 return num_events; 3339 } 3340 3341 #define PQI_LEGACY_INTX_MASK 0x1 3342 3343 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 3344 bool enable_intx) 3345 { 3346 u32 intx_mask; 3347 struct pqi_device_registers __iomem *pqi_registers; 3348 volatile void __iomem *register_addr; 3349 3350 pqi_registers = ctrl_info->pqi_registers; 3351 3352 if (enable_intx) 3353 register_addr = &pqi_registers->legacy_intx_mask_clear; 3354 else 3355 register_addr = &pqi_registers->legacy_intx_mask_set; 3356 3357 intx_mask = readl(register_addr); 3358 intx_mask |= PQI_LEGACY_INTX_MASK; 3359 writel(intx_mask, register_addr); 3360 } 3361 3362 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3363 enum pqi_irq_mode new_mode) 3364 { 3365 switch (ctrl_info->irq_mode) { 3366 case IRQ_MODE_MSIX: 3367 switch (new_mode) { 3368 case IRQ_MODE_MSIX: 3369 break; 3370 case IRQ_MODE_INTX: 3371 pqi_configure_legacy_intx(ctrl_info, true); 3372 sis_enable_intx(ctrl_info); 3373 break; 3374 case IRQ_MODE_NONE: 3375 break; 3376 } 3377 break; 3378 case IRQ_MODE_INTX: 3379 switch (new_mode) { 3380 case IRQ_MODE_MSIX: 3381 pqi_configure_legacy_intx(ctrl_info, false); 3382 sis_enable_msix(ctrl_info); 3383 break; 3384 case IRQ_MODE_INTX: 3385 break; 3386 case IRQ_MODE_NONE: 3387 pqi_configure_legacy_intx(ctrl_info, false); 3388 break; 3389 } 3390 break; 3391 case IRQ_MODE_NONE: 3392 switch (new_mode) { 3393 case IRQ_MODE_MSIX: 3394 sis_enable_msix(ctrl_info); 3395 break; 3396 case IRQ_MODE_INTX: 3397 pqi_configure_legacy_intx(ctrl_info, true); 3398 sis_enable_intx(ctrl_info); 3399 break; 3400 case IRQ_MODE_NONE: 3401 break; 3402 } 3403 break; 3404 } 3405 3406 ctrl_info->irq_mode = new_mode; 3407 } 3408 3409 #define PQI_LEGACY_INTX_PENDING 0x1 3410 3411 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3412 { 3413 bool valid_irq; 3414 u32 intx_status; 3415 3416 switch (ctrl_info->irq_mode) { 3417 case IRQ_MODE_MSIX: 3418 valid_irq = true; 3419 break; 3420 case IRQ_MODE_INTX: 3421 intx_status = 3422 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3423 if (intx_status & PQI_LEGACY_INTX_PENDING) 3424 valid_irq = true; 3425 else 3426 valid_irq = false; 3427 break; 3428 case IRQ_MODE_NONE: 3429 default: 3430 valid_irq = false; 3431 break; 3432 } 3433 3434 return valid_irq; 3435 } 3436 3437 static irqreturn_t pqi_irq_handler(int irq, void *data) 3438 { 3439 struct pqi_ctrl_info *ctrl_info; 3440 struct pqi_queue_group *queue_group; 3441 unsigned int num_responses_handled; 3442 3443 queue_group = data; 3444 ctrl_info = queue_group->ctrl_info; 3445 3446 if (!pqi_is_valid_irq(ctrl_info)) 3447 return IRQ_NONE; 3448 3449 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3450 3451 if (irq == ctrl_info->event_irq) 3452 num_responses_handled += pqi_process_event_intr(ctrl_info); 3453 3454 if (num_responses_handled) 3455 atomic_inc(&ctrl_info->num_interrupts); 3456 3457 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3458 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3459 3460 return IRQ_HANDLED; 3461 } 3462 3463 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3464 { 3465 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3466 int i; 3467 int rc; 3468 3469 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3470 3471 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3472 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3473 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3474 if (rc) { 3475 dev_err(&pci_dev->dev, 3476 "irq %u init failed with error %d\n", 3477 pci_irq_vector(pci_dev, i), rc); 3478 return rc; 3479 } 3480 ctrl_info->num_msix_vectors_initialized++; 3481 } 3482 3483 return 0; 3484 } 3485 3486 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3487 { 3488 int i; 3489 3490 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3491 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3492 &ctrl_info->queue_groups[i]); 3493 3494 ctrl_info->num_msix_vectors_initialized = 0; 3495 } 3496 3497 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3498 { 3499 int num_vectors_enabled; 3500 3501 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3502 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3503 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3504 if (num_vectors_enabled < 0) { 3505 dev_err(&ctrl_info->pci_dev->dev, 3506 "MSI-X init failed with error %d\n", 3507 num_vectors_enabled); 3508 return num_vectors_enabled; 3509 } 3510 3511 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3512 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3513 return 0; 3514 } 3515 3516 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3517 { 3518 if (ctrl_info->num_msix_vectors_enabled) { 3519 pci_free_irq_vectors(ctrl_info->pci_dev); 3520 ctrl_info->num_msix_vectors_enabled = 0; 3521 } 3522 } 3523 3524 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3525 { 3526 unsigned int i; 3527 size_t alloc_length; 3528 size_t element_array_length_per_iq; 3529 size_t element_array_length_per_oq; 3530 void *element_array; 3531 void __iomem *next_queue_index; 3532 void *aligned_pointer; 3533 unsigned int num_inbound_queues; 3534 unsigned int num_outbound_queues; 3535 unsigned int num_queue_indexes; 3536 struct pqi_queue_group *queue_group; 3537 3538 element_array_length_per_iq = 3539 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3540 ctrl_info->num_elements_per_iq; 3541 element_array_length_per_oq = 3542 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3543 ctrl_info->num_elements_per_oq; 3544 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3545 num_outbound_queues = ctrl_info->num_queue_groups; 3546 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3547 3548 aligned_pointer = NULL; 3549 3550 for (i = 0; i < num_inbound_queues; i++) { 3551 aligned_pointer = PTR_ALIGN(aligned_pointer, 3552 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3553 aligned_pointer += element_array_length_per_iq; 3554 } 3555 3556 for (i = 0; i < num_outbound_queues; i++) { 3557 aligned_pointer = PTR_ALIGN(aligned_pointer, 3558 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3559 aligned_pointer += element_array_length_per_oq; 3560 } 3561 3562 aligned_pointer = PTR_ALIGN(aligned_pointer, 3563 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3564 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3565 PQI_EVENT_OQ_ELEMENT_LENGTH; 3566 3567 for (i = 0; i < num_queue_indexes; i++) { 3568 aligned_pointer = PTR_ALIGN(aligned_pointer, 3569 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3570 aligned_pointer += sizeof(pqi_index_t); 3571 } 3572 3573 alloc_length = (size_t)aligned_pointer + 3574 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3575 3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3577 3578 ctrl_info->queue_memory_base = 3579 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3580 &ctrl_info->queue_memory_base_dma_handle, 3581 GFP_KERNEL); 3582 3583 if (!ctrl_info->queue_memory_base) 3584 return -ENOMEM; 3585 3586 ctrl_info->queue_memory_length = alloc_length; 3587 3588 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3589 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3590 3591 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3592 queue_group = &ctrl_info->queue_groups[i]; 3593 queue_group->iq_element_array[RAID_PATH] = element_array; 3594 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3595 ctrl_info->queue_memory_base_dma_handle + 3596 (element_array - ctrl_info->queue_memory_base); 3597 element_array += element_array_length_per_iq; 3598 element_array = PTR_ALIGN(element_array, 3599 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3600 queue_group->iq_element_array[AIO_PATH] = element_array; 3601 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3602 ctrl_info->queue_memory_base_dma_handle + 3603 (element_array - ctrl_info->queue_memory_base); 3604 element_array += element_array_length_per_iq; 3605 element_array = PTR_ALIGN(element_array, 3606 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3607 } 3608 3609 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3610 queue_group = &ctrl_info->queue_groups[i]; 3611 queue_group->oq_element_array = element_array; 3612 queue_group->oq_element_array_bus_addr = 3613 ctrl_info->queue_memory_base_dma_handle + 3614 (element_array - ctrl_info->queue_memory_base); 3615 element_array += element_array_length_per_oq; 3616 element_array = PTR_ALIGN(element_array, 3617 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3618 } 3619 3620 ctrl_info->event_queue.oq_element_array = element_array; 3621 ctrl_info->event_queue.oq_element_array_bus_addr = 3622 ctrl_info->queue_memory_base_dma_handle + 3623 (element_array - ctrl_info->queue_memory_base); 3624 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3625 PQI_EVENT_OQ_ELEMENT_LENGTH; 3626 3627 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3628 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3629 3630 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3631 queue_group = &ctrl_info->queue_groups[i]; 3632 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3633 queue_group->iq_ci_bus_addr[RAID_PATH] = 3634 ctrl_info->queue_memory_base_dma_handle + 3635 (next_queue_index - 3636 (void __iomem *)ctrl_info->queue_memory_base); 3637 next_queue_index += sizeof(pqi_index_t); 3638 next_queue_index = PTR_ALIGN(next_queue_index, 3639 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3640 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3641 queue_group->iq_ci_bus_addr[AIO_PATH] = 3642 ctrl_info->queue_memory_base_dma_handle + 3643 (next_queue_index - 3644 (void __iomem *)ctrl_info->queue_memory_base); 3645 next_queue_index += sizeof(pqi_index_t); 3646 next_queue_index = PTR_ALIGN(next_queue_index, 3647 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3648 queue_group->oq_pi = next_queue_index; 3649 queue_group->oq_pi_bus_addr = 3650 ctrl_info->queue_memory_base_dma_handle + 3651 (next_queue_index - 3652 (void __iomem *)ctrl_info->queue_memory_base); 3653 next_queue_index += sizeof(pqi_index_t); 3654 next_queue_index = PTR_ALIGN(next_queue_index, 3655 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3656 } 3657 3658 ctrl_info->event_queue.oq_pi = next_queue_index; 3659 ctrl_info->event_queue.oq_pi_bus_addr = 3660 ctrl_info->queue_memory_base_dma_handle + 3661 (next_queue_index - 3662 (void __iomem *)ctrl_info->queue_memory_base); 3663 3664 return 0; 3665 } 3666 3667 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3668 { 3669 unsigned int i; 3670 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3671 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3672 3673 /* 3674 * Initialize the backpointers to the controller structure in 3675 * each operational queue group structure. 3676 */ 3677 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3678 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3679 3680 /* 3681 * Assign IDs to all operational queues. Note that the IDs 3682 * assigned to operational IQs are independent of the IDs 3683 * assigned to operational OQs. 3684 */ 3685 ctrl_info->event_queue.oq_id = next_oq_id++; 3686 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3687 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3688 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3689 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3690 } 3691 3692 /* 3693 * Assign MSI-X table entry indexes to all queues. Note that the 3694 * interrupt for the event queue is shared with the first queue group. 3695 */ 3696 ctrl_info->event_queue.int_msg_num = 0; 3697 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3698 ctrl_info->queue_groups[i].int_msg_num = i; 3699 3700 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3701 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3702 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3703 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3704 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3705 } 3706 } 3707 3708 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3709 { 3710 size_t alloc_length; 3711 struct pqi_admin_queues_aligned *admin_queues_aligned; 3712 struct pqi_admin_queues *admin_queues; 3713 3714 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3716 3717 ctrl_info->admin_queue_memory_base = 3718 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 3719 &ctrl_info->admin_queue_memory_base_dma_handle, 3720 GFP_KERNEL); 3721 3722 if (!ctrl_info->admin_queue_memory_base) 3723 return -ENOMEM; 3724 3725 ctrl_info->admin_queue_memory_length = alloc_length; 3726 3727 admin_queues = &ctrl_info->admin_queues; 3728 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3729 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3730 admin_queues->iq_element_array = 3731 &admin_queues_aligned->iq_element_array; 3732 admin_queues->oq_element_array = 3733 &admin_queues_aligned->oq_element_array; 3734 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3735 admin_queues->oq_pi = 3736 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3737 3738 admin_queues->iq_element_array_bus_addr = 3739 ctrl_info->admin_queue_memory_base_dma_handle + 3740 (admin_queues->iq_element_array - 3741 ctrl_info->admin_queue_memory_base); 3742 admin_queues->oq_element_array_bus_addr = 3743 ctrl_info->admin_queue_memory_base_dma_handle + 3744 (admin_queues->oq_element_array - 3745 ctrl_info->admin_queue_memory_base); 3746 admin_queues->iq_ci_bus_addr = 3747 ctrl_info->admin_queue_memory_base_dma_handle + 3748 ((void *)admin_queues->iq_ci - 3749 ctrl_info->admin_queue_memory_base); 3750 admin_queues->oq_pi_bus_addr = 3751 ctrl_info->admin_queue_memory_base_dma_handle + 3752 ((void __iomem *)admin_queues->oq_pi - 3753 (void __iomem *)ctrl_info->admin_queue_memory_base); 3754 3755 return 0; 3756 } 3757 3758 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ 3759 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3760 3761 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3762 { 3763 struct pqi_device_registers __iomem *pqi_registers; 3764 struct pqi_admin_queues *admin_queues; 3765 unsigned long timeout; 3766 u8 status; 3767 u32 reg; 3768 3769 pqi_registers = ctrl_info->pqi_registers; 3770 admin_queues = &ctrl_info->admin_queues; 3771 3772 writeq((u64)admin_queues->iq_element_array_bus_addr, 3773 &pqi_registers->admin_iq_element_array_addr); 3774 writeq((u64)admin_queues->oq_element_array_bus_addr, 3775 &pqi_registers->admin_oq_element_array_addr); 3776 writeq((u64)admin_queues->iq_ci_bus_addr, 3777 &pqi_registers->admin_iq_ci_addr); 3778 writeq((u64)admin_queues->oq_pi_bus_addr, 3779 &pqi_registers->admin_oq_pi_addr); 3780 3781 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3782 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3783 (admin_queues->int_msg_num << 16); 3784 writel(reg, &pqi_registers->admin_iq_num_elements); 3785 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3786 &pqi_registers->function_and_status_code); 3787 3788 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3789 while (1) { 3790 status = readb(&pqi_registers->function_and_status_code); 3791 if (status == PQI_STATUS_IDLE) 3792 break; 3793 if (time_after(jiffies, timeout)) 3794 return -ETIMEDOUT; 3795 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3796 } 3797 3798 /* 3799 * The offset registers are not initialized to the correct 3800 * offsets until *after* the create admin queue pair command 3801 * completes successfully. 3802 */ 3803 admin_queues->iq_pi = ctrl_info->iomem_base + 3804 PQI_DEVICE_REGISTERS_OFFSET + 3805 readq(&pqi_registers->admin_iq_pi_offset); 3806 admin_queues->oq_ci = ctrl_info->iomem_base + 3807 PQI_DEVICE_REGISTERS_OFFSET + 3808 readq(&pqi_registers->admin_oq_ci_offset); 3809 3810 return 0; 3811 } 3812 3813 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3814 struct pqi_general_admin_request *request) 3815 { 3816 struct pqi_admin_queues *admin_queues; 3817 void *next_element; 3818 pqi_index_t iq_pi; 3819 3820 admin_queues = &ctrl_info->admin_queues; 3821 iq_pi = admin_queues->iq_pi_copy; 3822 3823 next_element = admin_queues->iq_element_array + 3824 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3825 3826 memcpy(next_element, request, sizeof(*request)); 3827 3828 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3829 admin_queues->iq_pi_copy = iq_pi; 3830 3831 /* 3832 * This write notifies the controller that an IU is available to be 3833 * processed. 3834 */ 3835 writel(iq_pi, admin_queues->iq_pi); 3836 } 3837 3838 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3839 3840 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3841 struct pqi_general_admin_response *response) 3842 { 3843 struct pqi_admin_queues *admin_queues; 3844 pqi_index_t oq_pi; 3845 pqi_index_t oq_ci; 3846 unsigned long timeout; 3847 3848 admin_queues = &ctrl_info->admin_queues; 3849 oq_ci = admin_queues->oq_ci_copy; 3850 3851 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; 3852 3853 while (1) { 3854 oq_pi = readl(admin_queues->oq_pi); 3855 if (oq_pi != oq_ci) 3856 break; 3857 if (time_after(jiffies, timeout)) { 3858 dev_err(&ctrl_info->pci_dev->dev, 3859 "timed out waiting for admin response\n"); 3860 return -ETIMEDOUT; 3861 } 3862 if (!sis_is_firmware_running(ctrl_info)) 3863 return -ENXIO; 3864 usleep_range(1000, 2000); 3865 } 3866 3867 memcpy(response, admin_queues->oq_element_array + 3868 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3869 3870 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3871 admin_queues->oq_ci_copy = oq_ci; 3872 writel(oq_ci, admin_queues->oq_ci); 3873 3874 return 0; 3875 } 3876 3877 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3878 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3879 struct pqi_io_request *io_request) 3880 { 3881 struct pqi_io_request *next; 3882 void *next_element; 3883 pqi_index_t iq_pi; 3884 pqi_index_t iq_ci; 3885 size_t iu_length; 3886 unsigned long flags; 3887 unsigned int num_elements_needed; 3888 unsigned int num_elements_to_end_of_queue; 3889 size_t copy_count; 3890 struct pqi_iu_header *request; 3891 3892 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3893 3894 if (io_request) { 3895 io_request->queue_group = queue_group; 3896 list_add_tail(&io_request->request_list_entry, 3897 &queue_group->request_list[path]); 3898 } 3899 3900 iq_pi = queue_group->iq_pi_copy[path]; 3901 3902 list_for_each_entry_safe(io_request, next, 3903 &queue_group->request_list[path], request_list_entry) { 3904 3905 request = io_request->iu; 3906 3907 iu_length = get_unaligned_le16(&request->iu_length) + 3908 PQI_REQUEST_HEADER_LENGTH; 3909 num_elements_needed = 3910 DIV_ROUND_UP(iu_length, 3911 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3912 3913 iq_ci = readl(queue_group->iq_ci[path]); 3914 3915 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3916 ctrl_info->num_elements_per_iq)) 3917 break; 3918 3919 put_unaligned_le16(queue_group->oq_id, 3920 &request->response_queue_id); 3921 3922 next_element = queue_group->iq_element_array[path] + 3923 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3924 3925 num_elements_to_end_of_queue = 3926 ctrl_info->num_elements_per_iq - iq_pi; 3927 3928 if (num_elements_needed <= num_elements_to_end_of_queue) { 3929 memcpy(next_element, request, iu_length); 3930 } else { 3931 copy_count = num_elements_to_end_of_queue * 3932 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3933 memcpy(next_element, request, copy_count); 3934 memcpy(queue_group->iq_element_array[path], 3935 (u8 *)request + copy_count, 3936 iu_length - copy_count); 3937 } 3938 3939 iq_pi = (iq_pi + num_elements_needed) % 3940 ctrl_info->num_elements_per_iq; 3941 3942 list_del(&io_request->request_list_entry); 3943 } 3944 3945 if (iq_pi != queue_group->iq_pi_copy[path]) { 3946 queue_group->iq_pi_copy[path] = iq_pi; 3947 /* 3948 * This write notifies the controller that one or more IUs are 3949 * available to be processed. 3950 */ 3951 writel(iq_pi, queue_group->iq_pi[path]); 3952 } 3953 3954 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3955 } 3956 3957 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3958 3959 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3960 struct completion *wait) 3961 { 3962 int rc; 3963 3964 while (1) { 3965 if (wait_for_completion_io_timeout(wait, 3966 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { 3967 rc = 0; 3968 break; 3969 } 3970 3971 pqi_check_ctrl_health(ctrl_info); 3972 if (pqi_ctrl_offline(ctrl_info)) { 3973 rc = -ENXIO; 3974 break; 3975 } 3976 } 3977 3978 return rc; 3979 } 3980 3981 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3982 void *context) 3983 { 3984 struct completion *waiting = context; 3985 3986 complete(waiting); 3987 } 3988 3989 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info 3990 *error_info) 3991 { 3992 int rc = -EIO; 3993 3994 switch (error_info->data_out_result) { 3995 case PQI_DATA_IN_OUT_GOOD: 3996 if (error_info->status == SAM_STAT_GOOD) 3997 rc = 0; 3998 break; 3999 case PQI_DATA_IN_OUT_UNDERFLOW: 4000 if (error_info->status == SAM_STAT_GOOD || 4001 error_info->status == SAM_STAT_CHECK_CONDITION) 4002 rc = 0; 4003 break; 4004 case PQI_DATA_IN_OUT_ABORTED: 4005 rc = PQI_CMD_STATUS_ABORTED; 4006 break; 4007 } 4008 4009 return rc; 4010 } 4011 4012 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4013 struct pqi_iu_header *request, unsigned int flags, 4014 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 4015 { 4016 int rc = 0; 4017 struct pqi_io_request *io_request; 4018 unsigned long start_jiffies; 4019 unsigned long msecs_blocked; 4020 size_t iu_length; 4021 DECLARE_COMPLETION_ONSTACK(wait); 4022 4023 /* 4024 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 4025 * are mutually exclusive. 4026 */ 4027 4028 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4029 if (down_interruptible(&ctrl_info->sync_request_sem)) 4030 return -ERESTARTSYS; 4031 } else { 4032 if (timeout_msecs == NO_TIMEOUT) { 4033 down(&ctrl_info->sync_request_sem); 4034 } else { 4035 start_jiffies = jiffies; 4036 if (down_timeout(&ctrl_info->sync_request_sem, 4037 msecs_to_jiffies(timeout_msecs))) 4038 return -ETIMEDOUT; 4039 msecs_blocked = 4040 jiffies_to_msecs(jiffies - start_jiffies); 4041 if (msecs_blocked >= timeout_msecs) 4042 return -ETIMEDOUT; 4043 timeout_msecs -= msecs_blocked; 4044 } 4045 } 4046 4047 pqi_ctrl_busy(ctrl_info); 4048 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 4049 if (timeout_msecs == 0) { 4050 pqi_ctrl_unbusy(ctrl_info); 4051 rc = -ETIMEDOUT; 4052 goto out; 4053 } 4054 4055 if (pqi_ctrl_offline(ctrl_info)) { 4056 pqi_ctrl_unbusy(ctrl_info); 4057 rc = -ENXIO; 4058 goto out; 4059 } 4060 4061 io_request = pqi_alloc_io_request(ctrl_info); 4062 4063 put_unaligned_le16(io_request->index, 4064 &(((struct pqi_raid_path_request *)request)->request_id)); 4065 4066 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4067 ((struct pqi_raid_path_request *)request)->error_index = 4068 ((struct pqi_raid_path_request *)request)->request_id; 4069 4070 iu_length = get_unaligned_le16(&request->iu_length) + 4071 PQI_REQUEST_HEADER_LENGTH; 4072 memcpy(io_request->iu, request, iu_length); 4073 4074 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4075 io_request->context = &wait; 4076 4077 pqi_start_io(ctrl_info, 4078 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4079 io_request); 4080 4081 pqi_ctrl_unbusy(ctrl_info); 4082 4083 if (timeout_msecs == NO_TIMEOUT) { 4084 pqi_wait_for_completion_io(ctrl_info, &wait); 4085 } else { 4086 if (!wait_for_completion_io_timeout(&wait, 4087 msecs_to_jiffies(timeout_msecs))) { 4088 dev_warn(&ctrl_info->pci_dev->dev, 4089 "command timed out\n"); 4090 rc = -ETIMEDOUT; 4091 } 4092 } 4093 4094 if (error_info) { 4095 if (io_request->error_info) 4096 memcpy(error_info, io_request->error_info, 4097 sizeof(*error_info)); 4098 else 4099 memset(error_info, 0, sizeof(*error_info)); 4100 } else if (rc == 0 && io_request->error_info) { 4101 rc = pqi_process_raid_io_error_synchronous( 4102 io_request->error_info); 4103 } 4104 4105 pqi_free_io_request(io_request); 4106 4107 out: 4108 up(&ctrl_info->sync_request_sem); 4109 4110 return rc; 4111 } 4112 4113 static int pqi_validate_admin_response( 4114 struct pqi_general_admin_response *response, u8 expected_function_code) 4115 { 4116 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4117 return -EINVAL; 4118 4119 if (get_unaligned_le16(&response->header.iu_length) != 4120 PQI_GENERAL_ADMIN_IU_LENGTH) 4121 return -EINVAL; 4122 4123 if (response->function_code != expected_function_code) 4124 return -EINVAL; 4125 4126 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4127 return -EINVAL; 4128 4129 return 0; 4130 } 4131 4132 static int pqi_submit_admin_request_synchronous( 4133 struct pqi_ctrl_info *ctrl_info, 4134 struct pqi_general_admin_request *request, 4135 struct pqi_general_admin_response *response) 4136 { 4137 int rc; 4138 4139 pqi_submit_admin_request(ctrl_info, request); 4140 4141 rc = pqi_poll_for_admin_response(ctrl_info, response); 4142 4143 if (rc == 0) 4144 rc = pqi_validate_admin_response(response, 4145 request->function_code); 4146 4147 return rc; 4148 } 4149 4150 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4151 { 4152 int rc; 4153 struct pqi_general_admin_request request; 4154 struct pqi_general_admin_response response; 4155 struct pqi_device_capability *capability; 4156 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4157 4158 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4159 if (!capability) 4160 return -ENOMEM; 4161 4162 memset(&request, 0, sizeof(request)); 4163 4164 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4165 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4166 &request.header.iu_length); 4167 request.function_code = 4168 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4169 put_unaligned_le32(sizeof(*capability), 4170 &request.data.report_device_capability.buffer_length); 4171 4172 rc = pqi_map_single(ctrl_info->pci_dev, 4173 &request.data.report_device_capability.sg_descriptor, 4174 capability, sizeof(*capability), 4175 DMA_FROM_DEVICE); 4176 if (rc) 4177 goto out; 4178 4179 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4180 &response); 4181 4182 pqi_pci_unmap(ctrl_info->pci_dev, 4183 &request.data.report_device_capability.sg_descriptor, 1, 4184 DMA_FROM_DEVICE); 4185 4186 if (rc) 4187 goto out; 4188 4189 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4190 rc = -EIO; 4191 goto out; 4192 } 4193 4194 ctrl_info->max_inbound_queues = 4195 get_unaligned_le16(&capability->max_inbound_queues); 4196 ctrl_info->max_elements_per_iq = 4197 get_unaligned_le16(&capability->max_elements_per_iq); 4198 ctrl_info->max_iq_element_length = 4199 get_unaligned_le16(&capability->max_iq_element_length) 4200 * 16; 4201 ctrl_info->max_outbound_queues = 4202 get_unaligned_le16(&capability->max_outbound_queues); 4203 ctrl_info->max_elements_per_oq = 4204 get_unaligned_le16(&capability->max_elements_per_oq); 4205 ctrl_info->max_oq_element_length = 4206 get_unaligned_le16(&capability->max_oq_element_length) 4207 * 16; 4208 4209 sop_iu_layer_descriptor = 4210 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4211 4212 ctrl_info->max_inbound_iu_length_per_firmware = 4213 get_unaligned_le16( 4214 &sop_iu_layer_descriptor->max_inbound_iu_length); 4215 ctrl_info->inbound_spanning_supported = 4216 sop_iu_layer_descriptor->inbound_spanning_supported; 4217 ctrl_info->outbound_spanning_supported = 4218 sop_iu_layer_descriptor->outbound_spanning_supported; 4219 4220 out: 4221 kfree(capability); 4222 4223 return rc; 4224 } 4225 4226 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4227 { 4228 if (ctrl_info->max_iq_element_length < 4229 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4230 dev_err(&ctrl_info->pci_dev->dev, 4231 "max. inbound queue element length of %d is less than the required length of %d\n", 4232 ctrl_info->max_iq_element_length, 4233 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4234 return -EINVAL; 4235 } 4236 4237 if (ctrl_info->max_oq_element_length < 4238 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4239 dev_err(&ctrl_info->pci_dev->dev, 4240 "max. outbound queue element length of %d is less than the required length of %d\n", 4241 ctrl_info->max_oq_element_length, 4242 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4243 return -EINVAL; 4244 } 4245 4246 if (ctrl_info->max_inbound_iu_length_per_firmware < 4247 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4248 dev_err(&ctrl_info->pci_dev->dev, 4249 "max. inbound IU length of %u is less than the min. required length of %d\n", 4250 ctrl_info->max_inbound_iu_length_per_firmware, 4251 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4252 return -EINVAL; 4253 } 4254 4255 if (!ctrl_info->inbound_spanning_supported) { 4256 dev_err(&ctrl_info->pci_dev->dev, 4257 "the controller does not support inbound spanning\n"); 4258 return -EINVAL; 4259 } 4260 4261 if (ctrl_info->outbound_spanning_supported) { 4262 dev_err(&ctrl_info->pci_dev->dev, 4263 "the controller supports outbound spanning but this driver does not\n"); 4264 return -EINVAL; 4265 } 4266 4267 return 0; 4268 } 4269 4270 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4271 { 4272 int rc; 4273 struct pqi_event_queue *event_queue; 4274 struct pqi_general_admin_request request; 4275 struct pqi_general_admin_response response; 4276 4277 event_queue = &ctrl_info->event_queue; 4278 4279 /* 4280 * Create OQ (Outbound Queue - device to host queue) to dedicate 4281 * to events. 4282 */ 4283 memset(&request, 0, sizeof(request)); 4284 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4285 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4286 &request.header.iu_length); 4287 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4288 put_unaligned_le16(event_queue->oq_id, 4289 &request.data.create_operational_oq.queue_id); 4290 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4291 &request.data.create_operational_oq.element_array_addr); 4292 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4293 &request.data.create_operational_oq.pi_addr); 4294 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4295 &request.data.create_operational_oq.num_elements); 4296 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4297 &request.data.create_operational_oq.element_length); 4298 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4299 put_unaligned_le16(event_queue->int_msg_num, 4300 &request.data.create_operational_oq.int_msg_num); 4301 4302 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4303 &response); 4304 if (rc) 4305 return rc; 4306 4307 event_queue->oq_ci = ctrl_info->iomem_base + 4308 PQI_DEVICE_REGISTERS_OFFSET + 4309 get_unaligned_le64( 4310 &response.data.create_operational_oq.oq_ci_offset); 4311 4312 return 0; 4313 } 4314 4315 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4316 unsigned int group_number) 4317 { 4318 int rc; 4319 struct pqi_queue_group *queue_group; 4320 struct pqi_general_admin_request request; 4321 struct pqi_general_admin_response response; 4322 4323 queue_group = &ctrl_info->queue_groups[group_number]; 4324 4325 /* 4326 * Create IQ (Inbound Queue - host to device queue) for 4327 * RAID path. 4328 */ 4329 memset(&request, 0, sizeof(request)); 4330 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4331 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4332 &request.header.iu_length); 4333 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4334 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4335 &request.data.create_operational_iq.queue_id); 4336 put_unaligned_le64( 4337 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4338 &request.data.create_operational_iq.element_array_addr); 4339 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4340 &request.data.create_operational_iq.ci_addr); 4341 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4342 &request.data.create_operational_iq.num_elements); 4343 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4344 &request.data.create_operational_iq.element_length); 4345 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4346 4347 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4348 &response); 4349 if (rc) { 4350 dev_err(&ctrl_info->pci_dev->dev, 4351 "error creating inbound RAID queue\n"); 4352 return rc; 4353 } 4354 4355 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4356 PQI_DEVICE_REGISTERS_OFFSET + 4357 get_unaligned_le64( 4358 &response.data.create_operational_iq.iq_pi_offset); 4359 4360 /* 4361 * Create IQ (Inbound Queue - host to device queue) for 4362 * Advanced I/O (AIO) path. 4363 */ 4364 memset(&request, 0, sizeof(request)); 4365 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4366 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4367 &request.header.iu_length); 4368 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4369 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4370 &request.data.create_operational_iq.queue_id); 4371 put_unaligned_le64((u64)queue_group-> 4372 iq_element_array_bus_addr[AIO_PATH], 4373 &request.data.create_operational_iq.element_array_addr); 4374 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4375 &request.data.create_operational_iq.ci_addr); 4376 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4377 &request.data.create_operational_iq.num_elements); 4378 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4379 &request.data.create_operational_iq.element_length); 4380 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4381 4382 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4383 &response); 4384 if (rc) { 4385 dev_err(&ctrl_info->pci_dev->dev, 4386 "error creating inbound AIO queue\n"); 4387 return rc; 4388 } 4389 4390 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4391 PQI_DEVICE_REGISTERS_OFFSET + 4392 get_unaligned_le64( 4393 &response.data.create_operational_iq.iq_pi_offset); 4394 4395 /* 4396 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4397 * assumed to be for RAID path I/O unless we change the queue's 4398 * property. 4399 */ 4400 memset(&request, 0, sizeof(request)); 4401 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4402 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4403 &request.header.iu_length); 4404 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4405 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4406 &request.data.change_operational_iq_properties.queue_id); 4407 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4408 &request.data.change_operational_iq_properties.vendor_specific); 4409 4410 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4411 &response); 4412 if (rc) { 4413 dev_err(&ctrl_info->pci_dev->dev, 4414 "error changing queue property\n"); 4415 return rc; 4416 } 4417 4418 /* 4419 * Create OQ (Outbound Queue - device to host queue). 4420 */ 4421 memset(&request, 0, sizeof(request)); 4422 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4423 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4424 &request.header.iu_length); 4425 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4426 put_unaligned_le16(queue_group->oq_id, 4427 &request.data.create_operational_oq.queue_id); 4428 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4429 &request.data.create_operational_oq.element_array_addr); 4430 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4431 &request.data.create_operational_oq.pi_addr); 4432 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4433 &request.data.create_operational_oq.num_elements); 4434 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4435 &request.data.create_operational_oq.element_length); 4436 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4437 put_unaligned_le16(queue_group->int_msg_num, 4438 &request.data.create_operational_oq.int_msg_num); 4439 4440 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4441 &response); 4442 if (rc) { 4443 dev_err(&ctrl_info->pci_dev->dev, 4444 "error creating outbound queue\n"); 4445 return rc; 4446 } 4447 4448 queue_group->oq_ci = ctrl_info->iomem_base + 4449 PQI_DEVICE_REGISTERS_OFFSET + 4450 get_unaligned_le64( 4451 &response.data.create_operational_oq.oq_ci_offset); 4452 4453 return 0; 4454 } 4455 4456 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4457 { 4458 int rc; 4459 unsigned int i; 4460 4461 rc = pqi_create_event_queue(ctrl_info); 4462 if (rc) { 4463 dev_err(&ctrl_info->pci_dev->dev, 4464 "error creating event queue\n"); 4465 return rc; 4466 } 4467 4468 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4469 rc = pqi_create_queue_group(ctrl_info, i); 4470 if (rc) { 4471 dev_err(&ctrl_info->pci_dev->dev, 4472 "error creating queue group number %u/%u\n", 4473 i, ctrl_info->num_queue_groups); 4474 return rc; 4475 } 4476 } 4477 4478 return 0; 4479 } 4480 4481 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4482 (offsetof(struct pqi_event_config, descriptors) + \ 4483 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4484 4485 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4486 bool enable_events) 4487 { 4488 int rc; 4489 unsigned int i; 4490 struct pqi_event_config *event_config; 4491 struct pqi_event_descriptor *event_descriptor; 4492 struct pqi_general_management_request request; 4493 4494 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4495 GFP_KERNEL); 4496 if (!event_config) 4497 return -ENOMEM; 4498 4499 memset(&request, 0, sizeof(request)); 4500 4501 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4502 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4503 data.report_event_configuration.sg_descriptors[1]) - 4504 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4505 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4506 &request.data.report_event_configuration.buffer_length); 4507 4508 rc = pqi_map_single(ctrl_info->pci_dev, 4509 request.data.report_event_configuration.sg_descriptors, 4510 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4511 DMA_FROM_DEVICE); 4512 if (rc) 4513 goto out; 4514 4515 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4516 0, NULL, NO_TIMEOUT); 4517 4518 pqi_pci_unmap(ctrl_info->pci_dev, 4519 request.data.report_event_configuration.sg_descriptors, 1, 4520 DMA_FROM_DEVICE); 4521 4522 if (rc) 4523 goto out; 4524 4525 for (i = 0; i < event_config->num_event_descriptors; i++) { 4526 event_descriptor = &event_config->descriptors[i]; 4527 if (enable_events && 4528 pqi_is_supported_event(event_descriptor->event_type)) 4529 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4530 &event_descriptor->oq_id); 4531 else 4532 put_unaligned_le16(0, &event_descriptor->oq_id); 4533 } 4534 4535 memset(&request, 0, sizeof(request)); 4536 4537 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4538 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4539 data.report_event_configuration.sg_descriptors[1]) - 4540 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4541 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4542 &request.data.report_event_configuration.buffer_length); 4543 4544 rc = pqi_map_single(ctrl_info->pci_dev, 4545 request.data.report_event_configuration.sg_descriptors, 4546 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4547 DMA_TO_DEVICE); 4548 if (rc) 4549 goto out; 4550 4551 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4552 NULL, NO_TIMEOUT); 4553 4554 pqi_pci_unmap(ctrl_info->pci_dev, 4555 request.data.report_event_configuration.sg_descriptors, 1, 4556 DMA_TO_DEVICE); 4557 4558 out: 4559 kfree(event_config); 4560 4561 return rc; 4562 } 4563 4564 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4565 { 4566 return pqi_configure_events(ctrl_info, true); 4567 } 4568 4569 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4570 { 4571 return pqi_configure_events(ctrl_info, false); 4572 } 4573 4574 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4575 { 4576 unsigned int i; 4577 struct device *dev; 4578 size_t sg_chain_buffer_length; 4579 struct pqi_io_request *io_request; 4580 4581 if (!ctrl_info->io_request_pool) 4582 return; 4583 4584 dev = &ctrl_info->pci_dev->dev; 4585 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4586 io_request = ctrl_info->io_request_pool; 4587 4588 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4589 kfree(io_request->iu); 4590 if (!io_request->sg_chain_buffer) 4591 break; 4592 dma_free_coherent(dev, sg_chain_buffer_length, 4593 io_request->sg_chain_buffer, 4594 io_request->sg_chain_buffer_dma_handle); 4595 io_request++; 4596 } 4597 4598 kfree(ctrl_info->io_request_pool); 4599 ctrl_info->io_request_pool = NULL; 4600 } 4601 4602 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4603 { 4604 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 4605 ctrl_info->error_buffer_length, 4606 &ctrl_info->error_buffer_dma_handle, 4607 GFP_KERNEL); 4608 4609 if (!ctrl_info->error_buffer) 4610 return -ENOMEM; 4611 4612 return 0; 4613 } 4614 4615 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4616 { 4617 unsigned int i; 4618 void *sg_chain_buffer; 4619 size_t sg_chain_buffer_length; 4620 dma_addr_t sg_chain_buffer_dma_handle; 4621 struct device *dev; 4622 struct pqi_io_request *io_request; 4623 4624 ctrl_info->io_request_pool = 4625 kcalloc(ctrl_info->max_io_slots, 4626 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4627 4628 if (!ctrl_info->io_request_pool) { 4629 dev_err(&ctrl_info->pci_dev->dev, 4630 "failed to allocate I/O request pool\n"); 4631 goto error; 4632 } 4633 4634 dev = &ctrl_info->pci_dev->dev; 4635 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4636 io_request = ctrl_info->io_request_pool; 4637 4638 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4639 io_request->iu = 4640 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4641 4642 if (!io_request->iu) { 4643 dev_err(&ctrl_info->pci_dev->dev, 4644 "failed to allocate IU buffers\n"); 4645 goto error; 4646 } 4647 4648 sg_chain_buffer = dma_alloc_coherent(dev, 4649 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4650 GFP_KERNEL); 4651 4652 if (!sg_chain_buffer) { 4653 dev_err(&ctrl_info->pci_dev->dev, 4654 "failed to allocate PQI scatter-gather chain buffers\n"); 4655 goto error; 4656 } 4657 4658 io_request->index = i; 4659 io_request->sg_chain_buffer = sg_chain_buffer; 4660 io_request->sg_chain_buffer_dma_handle = 4661 sg_chain_buffer_dma_handle; 4662 io_request++; 4663 } 4664 4665 return 0; 4666 4667 error: 4668 pqi_free_all_io_requests(ctrl_info); 4669 4670 return -ENOMEM; 4671 } 4672 4673 /* 4674 * Calculate required resources that are sized based on max. outstanding 4675 * requests and max. transfer size. 4676 */ 4677 4678 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4679 { 4680 u32 max_transfer_size; 4681 u32 max_sg_entries; 4682 4683 ctrl_info->scsi_ml_can_queue = 4684 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4685 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4686 4687 ctrl_info->error_buffer_length = 4688 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4689 4690 if (reset_devices) 4691 max_transfer_size = min(ctrl_info->max_transfer_size, 4692 PQI_MAX_TRANSFER_SIZE_KDUMP); 4693 else 4694 max_transfer_size = min(ctrl_info->max_transfer_size, 4695 PQI_MAX_TRANSFER_SIZE); 4696 4697 max_sg_entries = max_transfer_size / PAGE_SIZE; 4698 4699 /* +1 to cover when the buffer is not page-aligned. */ 4700 max_sg_entries++; 4701 4702 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4703 4704 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4705 4706 ctrl_info->sg_chain_buffer_length = 4707 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4708 PQI_EXTRA_SGL_MEMORY; 4709 ctrl_info->sg_tablesize = max_sg_entries; 4710 ctrl_info->max_sectors = max_transfer_size / 512; 4711 } 4712 4713 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4714 { 4715 int num_queue_groups; 4716 u16 num_elements_per_iq; 4717 u16 num_elements_per_oq; 4718 4719 if (reset_devices) { 4720 num_queue_groups = 1; 4721 } else { 4722 int num_cpus; 4723 int max_queue_groups; 4724 4725 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4726 ctrl_info->max_outbound_queues - 1); 4727 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4728 4729 num_cpus = num_online_cpus(); 4730 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4731 num_queue_groups = min(num_queue_groups, max_queue_groups); 4732 } 4733 4734 ctrl_info->num_queue_groups = num_queue_groups; 4735 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4736 4737 /* 4738 * Make sure that the max. inbound IU length is an even multiple 4739 * of our inbound element length. 4740 */ 4741 ctrl_info->max_inbound_iu_length = 4742 (ctrl_info->max_inbound_iu_length_per_firmware / 4743 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4744 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4745 4746 num_elements_per_iq = 4747 (ctrl_info->max_inbound_iu_length / 4748 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4749 4750 /* Add one because one element in each queue is unusable. */ 4751 num_elements_per_iq++; 4752 4753 num_elements_per_iq = min(num_elements_per_iq, 4754 ctrl_info->max_elements_per_iq); 4755 4756 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4757 num_elements_per_oq = min(num_elements_per_oq, 4758 ctrl_info->max_elements_per_oq); 4759 4760 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4761 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4762 4763 ctrl_info->max_sg_per_iu = 4764 ((ctrl_info->max_inbound_iu_length - 4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4766 sizeof(struct pqi_sg_descriptor)) + 4767 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4768 } 4769 4770 static inline void pqi_set_sg_descriptor( 4771 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4772 { 4773 u64 address = (u64)sg_dma_address(sg); 4774 unsigned int length = sg_dma_len(sg); 4775 4776 put_unaligned_le64(address, &sg_descriptor->address); 4777 put_unaligned_le32(length, &sg_descriptor->length); 4778 put_unaligned_le32(0, &sg_descriptor->flags); 4779 } 4780 4781 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4782 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4783 struct pqi_io_request *io_request) 4784 { 4785 int i; 4786 u16 iu_length; 4787 int sg_count; 4788 bool chained; 4789 unsigned int num_sg_in_iu; 4790 unsigned int max_sg_per_iu; 4791 struct scatterlist *sg; 4792 struct pqi_sg_descriptor *sg_descriptor; 4793 4794 sg_count = scsi_dma_map(scmd); 4795 if (sg_count < 0) 4796 return sg_count; 4797 4798 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4799 PQI_REQUEST_HEADER_LENGTH; 4800 4801 if (sg_count == 0) 4802 goto out; 4803 4804 sg = scsi_sglist(scmd); 4805 sg_descriptor = request->sg_descriptors; 4806 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4807 chained = false; 4808 num_sg_in_iu = 0; 4809 i = 0; 4810 4811 while (1) { 4812 pqi_set_sg_descriptor(sg_descriptor, sg); 4813 if (!chained) 4814 num_sg_in_iu++; 4815 i++; 4816 if (i == sg_count) 4817 break; 4818 sg_descriptor++; 4819 if (i == max_sg_per_iu) { 4820 put_unaligned_le64( 4821 (u64)io_request->sg_chain_buffer_dma_handle, 4822 &sg_descriptor->address); 4823 put_unaligned_le32((sg_count - num_sg_in_iu) 4824 * sizeof(*sg_descriptor), 4825 &sg_descriptor->length); 4826 put_unaligned_le32(CISS_SG_CHAIN, 4827 &sg_descriptor->flags); 4828 chained = true; 4829 num_sg_in_iu++; 4830 sg_descriptor = io_request->sg_chain_buffer; 4831 } 4832 sg = sg_next(sg); 4833 } 4834 4835 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4836 request->partial = chained; 4837 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4838 4839 out: 4840 put_unaligned_le16(iu_length, &request->header.iu_length); 4841 4842 return 0; 4843 } 4844 4845 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4846 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4847 struct pqi_io_request *io_request) 4848 { 4849 int i; 4850 u16 iu_length; 4851 int sg_count; 4852 bool chained; 4853 unsigned int num_sg_in_iu; 4854 unsigned int max_sg_per_iu; 4855 struct scatterlist *sg; 4856 struct pqi_sg_descriptor *sg_descriptor; 4857 4858 sg_count = scsi_dma_map(scmd); 4859 if (sg_count < 0) 4860 return sg_count; 4861 4862 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4863 PQI_REQUEST_HEADER_LENGTH; 4864 num_sg_in_iu = 0; 4865 4866 if (sg_count == 0) 4867 goto out; 4868 4869 sg = scsi_sglist(scmd); 4870 sg_descriptor = request->sg_descriptors; 4871 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4872 chained = false; 4873 i = 0; 4874 4875 while (1) { 4876 pqi_set_sg_descriptor(sg_descriptor, sg); 4877 if (!chained) 4878 num_sg_in_iu++; 4879 i++; 4880 if (i == sg_count) 4881 break; 4882 sg_descriptor++; 4883 if (i == max_sg_per_iu) { 4884 put_unaligned_le64( 4885 (u64)io_request->sg_chain_buffer_dma_handle, 4886 &sg_descriptor->address); 4887 put_unaligned_le32((sg_count - num_sg_in_iu) 4888 * sizeof(*sg_descriptor), 4889 &sg_descriptor->length); 4890 put_unaligned_le32(CISS_SG_CHAIN, 4891 &sg_descriptor->flags); 4892 chained = true; 4893 num_sg_in_iu++; 4894 sg_descriptor = io_request->sg_chain_buffer; 4895 } 4896 sg = sg_next(sg); 4897 } 4898 4899 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4900 request->partial = chained; 4901 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4902 4903 out: 4904 put_unaligned_le16(iu_length, &request->header.iu_length); 4905 request->num_sg_descriptors = num_sg_in_iu; 4906 4907 return 0; 4908 } 4909 4910 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4911 void *context) 4912 { 4913 struct scsi_cmnd *scmd; 4914 4915 scmd = io_request->scmd; 4916 pqi_free_io_request(io_request); 4917 scsi_dma_unmap(scmd); 4918 pqi_scsi_done(scmd); 4919 } 4920 4921 static int pqi_raid_submit_scsi_cmd_with_io_request( 4922 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4923 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4924 struct pqi_queue_group *queue_group) 4925 { 4926 int rc; 4927 size_t cdb_length; 4928 struct pqi_raid_path_request *request; 4929 4930 io_request->io_complete_callback = pqi_raid_io_complete; 4931 io_request->scmd = scmd; 4932 4933 request = io_request->iu; 4934 memset(request, 0, 4935 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4936 4937 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4938 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4939 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4940 put_unaligned_le16(io_request->index, &request->request_id); 4941 request->error_index = request->request_id; 4942 memcpy(request->lun_number, device->scsi3addr, 4943 sizeof(request->lun_number)); 4944 4945 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4946 memcpy(request->cdb, scmd->cmnd, cdb_length); 4947 4948 switch (cdb_length) { 4949 case 6: 4950 case 10: 4951 case 12: 4952 case 16: 4953 /* No bytes in the Additional CDB bytes field */ 4954 request->additional_cdb_bytes_usage = 4955 SOP_ADDITIONAL_CDB_BYTES_0; 4956 break; 4957 case 20: 4958 /* 4 bytes in the Additional cdb field */ 4959 request->additional_cdb_bytes_usage = 4960 SOP_ADDITIONAL_CDB_BYTES_4; 4961 break; 4962 case 24: 4963 /* 8 bytes in the Additional cdb field */ 4964 request->additional_cdb_bytes_usage = 4965 SOP_ADDITIONAL_CDB_BYTES_8; 4966 break; 4967 case 28: 4968 /* 12 bytes in the Additional cdb field */ 4969 request->additional_cdb_bytes_usage = 4970 SOP_ADDITIONAL_CDB_BYTES_12; 4971 break; 4972 case 32: 4973 default: 4974 /* 16 bytes in the Additional cdb field */ 4975 request->additional_cdb_bytes_usage = 4976 SOP_ADDITIONAL_CDB_BYTES_16; 4977 break; 4978 } 4979 4980 switch (scmd->sc_data_direction) { 4981 case DMA_TO_DEVICE: 4982 request->data_direction = SOP_READ_FLAG; 4983 break; 4984 case DMA_FROM_DEVICE: 4985 request->data_direction = SOP_WRITE_FLAG; 4986 break; 4987 case DMA_NONE: 4988 request->data_direction = SOP_NO_DIRECTION_FLAG; 4989 break; 4990 case DMA_BIDIRECTIONAL: 4991 request->data_direction = SOP_BIDIRECTIONAL; 4992 break; 4993 default: 4994 dev_err(&ctrl_info->pci_dev->dev, 4995 "unknown data direction: %d\n", 4996 scmd->sc_data_direction); 4997 break; 4998 } 4999 5000 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5001 if (rc) { 5002 pqi_free_io_request(io_request); 5003 return SCSI_MLQUEUE_HOST_BUSY; 5004 } 5005 5006 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5007 5008 return 0; 5009 } 5010 5011 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5012 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5013 struct pqi_queue_group *queue_group) 5014 { 5015 struct pqi_io_request *io_request; 5016 5017 io_request = pqi_alloc_io_request(ctrl_info); 5018 5019 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5020 device, scmd, queue_group); 5021 } 5022 5023 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 5024 { 5025 if (!pqi_ctrl_blocked(ctrl_info)) 5026 schedule_work(&ctrl_info->raid_bypass_retry_work); 5027 } 5028 5029 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5030 { 5031 struct scsi_cmnd *scmd; 5032 struct pqi_scsi_dev *device; 5033 struct pqi_ctrl_info *ctrl_info; 5034 5035 if (!io_request->raid_bypass) 5036 return false; 5037 5038 scmd = io_request->scmd; 5039 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5040 return false; 5041 if (host_byte(scmd->result) == DID_NO_CONNECT) 5042 return false; 5043 5044 device = scmd->device->hostdata; 5045 if (pqi_device_offline(device)) 5046 return false; 5047 5048 ctrl_info = shost_to_hba(scmd->device->host); 5049 if (pqi_ctrl_offline(ctrl_info)) 5050 return false; 5051 5052 return true; 5053 } 5054 5055 static inline void pqi_add_to_raid_bypass_retry_list( 5056 struct pqi_ctrl_info *ctrl_info, 5057 struct pqi_io_request *io_request, bool at_head) 5058 { 5059 unsigned long flags; 5060 5061 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5062 if (at_head) 5063 list_add(&io_request->request_list_entry, 5064 &ctrl_info->raid_bypass_retry_list); 5065 else 5066 list_add_tail(&io_request->request_list_entry, 5067 &ctrl_info->raid_bypass_retry_list); 5068 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5069 } 5070 5071 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 5072 void *context) 5073 { 5074 struct scsi_cmnd *scmd; 5075 5076 scmd = io_request->scmd; 5077 pqi_free_io_request(io_request); 5078 pqi_scsi_done(scmd); 5079 } 5080 5081 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 5082 { 5083 struct scsi_cmnd *scmd; 5084 struct pqi_ctrl_info *ctrl_info; 5085 5086 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 5087 scmd = io_request->scmd; 5088 scmd->result = 0; 5089 ctrl_info = shost_to_hba(scmd->device->host); 5090 5091 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 5092 pqi_schedule_bypass_retry(ctrl_info); 5093 } 5094 5095 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 5096 { 5097 struct scsi_cmnd *scmd; 5098 struct pqi_scsi_dev *device; 5099 struct pqi_ctrl_info *ctrl_info; 5100 struct pqi_queue_group *queue_group; 5101 5102 scmd = io_request->scmd; 5103 device = scmd->device->hostdata; 5104 if (pqi_device_in_reset(device)) { 5105 pqi_free_io_request(io_request); 5106 set_host_byte(scmd, DID_RESET); 5107 pqi_scsi_done(scmd); 5108 return 0; 5109 } 5110 5111 ctrl_info = shost_to_hba(scmd->device->host); 5112 queue_group = io_request->queue_group; 5113 5114 pqi_reinit_io_request(io_request); 5115 5116 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 5117 device, scmd, queue_group); 5118 } 5119 5120 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 5121 struct pqi_ctrl_info *ctrl_info) 5122 { 5123 unsigned long flags; 5124 struct pqi_io_request *io_request; 5125 5126 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5127 io_request = list_first_entry_or_null( 5128 &ctrl_info->raid_bypass_retry_list, 5129 struct pqi_io_request, request_list_entry); 5130 if (io_request) 5131 list_del(&io_request->request_list_entry); 5132 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5133 5134 return io_request; 5135 } 5136 5137 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 5138 { 5139 int rc; 5140 struct pqi_io_request *io_request; 5141 5142 pqi_ctrl_busy(ctrl_info); 5143 5144 while (1) { 5145 if (pqi_ctrl_blocked(ctrl_info)) 5146 break; 5147 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 5148 if (!io_request) 5149 break; 5150 rc = pqi_retry_raid_bypass(io_request); 5151 if (rc) { 5152 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 5153 true); 5154 pqi_schedule_bypass_retry(ctrl_info); 5155 break; 5156 } 5157 } 5158 5159 pqi_ctrl_unbusy(ctrl_info); 5160 } 5161 5162 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 5163 { 5164 struct pqi_ctrl_info *ctrl_info; 5165 5166 ctrl_info = container_of(work, struct pqi_ctrl_info, 5167 raid_bypass_retry_work); 5168 pqi_retry_raid_bypass_requests(ctrl_info); 5169 } 5170 5171 static void pqi_clear_all_queued_raid_bypass_retries( 5172 struct pqi_ctrl_info *ctrl_info) 5173 { 5174 unsigned long flags; 5175 5176 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 5177 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 5178 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 5179 } 5180 5181 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5182 void *context) 5183 { 5184 struct scsi_cmnd *scmd; 5185 5186 scmd = io_request->scmd; 5187 scsi_dma_unmap(scmd); 5188 if (io_request->status == -EAGAIN) 5189 set_host_byte(scmd, DID_IMM_RETRY); 5190 else if (pqi_raid_bypass_retry_needed(io_request)) { 5191 pqi_queue_raid_bypass_retry(io_request); 5192 return; 5193 } 5194 pqi_free_io_request(io_request); 5195 pqi_scsi_done(scmd); 5196 } 5197 5198 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5199 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5200 struct pqi_queue_group *queue_group) 5201 { 5202 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5203 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 5204 } 5205 5206 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5207 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5208 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5209 struct pqi_encryption_info *encryption_info, bool raid_bypass) 5210 { 5211 int rc; 5212 struct pqi_io_request *io_request; 5213 struct pqi_aio_path_request *request; 5214 5215 io_request = pqi_alloc_io_request(ctrl_info); 5216 io_request->io_complete_callback = pqi_aio_io_complete; 5217 io_request->scmd = scmd; 5218 io_request->raid_bypass = raid_bypass; 5219 5220 request = io_request->iu; 5221 memset(request, 0, 5222 offsetof(struct pqi_raid_path_request, sg_descriptors)); 5223 5224 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5225 put_unaligned_le32(aio_handle, &request->nexus_id); 5226 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5227 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5228 put_unaligned_le16(io_request->index, &request->request_id); 5229 request->error_index = request->request_id; 5230 if (cdb_length > sizeof(request->cdb)) 5231 cdb_length = sizeof(request->cdb); 5232 request->cdb_length = cdb_length; 5233 memcpy(request->cdb, cdb, cdb_length); 5234 5235 switch (scmd->sc_data_direction) { 5236 case DMA_TO_DEVICE: 5237 request->data_direction = SOP_READ_FLAG; 5238 break; 5239 case DMA_FROM_DEVICE: 5240 request->data_direction = SOP_WRITE_FLAG; 5241 break; 5242 case DMA_NONE: 5243 request->data_direction = SOP_NO_DIRECTION_FLAG; 5244 break; 5245 case DMA_BIDIRECTIONAL: 5246 request->data_direction = SOP_BIDIRECTIONAL; 5247 break; 5248 default: 5249 dev_err(&ctrl_info->pci_dev->dev, 5250 "unknown data direction: %d\n", 5251 scmd->sc_data_direction); 5252 break; 5253 } 5254 5255 if (encryption_info) { 5256 request->encryption_enable = true; 5257 put_unaligned_le16(encryption_info->data_encryption_key_index, 5258 &request->data_encryption_key_index); 5259 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5260 &request->encrypt_tweak_lower); 5261 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5262 &request->encrypt_tweak_upper); 5263 } 5264 5265 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5266 if (rc) { 5267 pqi_free_io_request(io_request); 5268 return SCSI_MLQUEUE_HOST_BUSY; 5269 } 5270 5271 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5272 5273 return 0; 5274 } 5275 5276 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5277 struct scsi_cmnd *scmd) 5278 { 5279 u16 hw_queue; 5280 5281 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5282 if (hw_queue > ctrl_info->max_hw_queue_index) 5283 hw_queue = 0; 5284 5285 return hw_queue; 5286 } 5287 5288 /* 5289 * This function gets called just before we hand the completed SCSI request 5290 * back to the SML. 5291 */ 5292 5293 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5294 { 5295 struct pqi_scsi_dev *device; 5296 5297 if (!scmd->device) { 5298 set_host_byte(scmd, DID_NO_CONNECT); 5299 return; 5300 } 5301 5302 device = scmd->device->hostdata; 5303 if (!device) { 5304 set_host_byte(scmd, DID_NO_CONNECT); 5305 return; 5306 } 5307 5308 atomic_dec(&device->scsi_cmds_outstanding); 5309 } 5310 5311 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 5312 struct scsi_cmnd *scmd) 5313 { 5314 int rc; 5315 struct pqi_ctrl_info *ctrl_info; 5316 struct pqi_scsi_dev *device; 5317 u16 hw_queue; 5318 struct pqi_queue_group *queue_group; 5319 bool raid_bypassed; 5320 5321 device = scmd->device->hostdata; 5322 ctrl_info = shost_to_hba(shost); 5323 5324 if (!device) { 5325 set_host_byte(scmd, DID_NO_CONNECT); 5326 pqi_scsi_done(scmd); 5327 return 0; 5328 } 5329 5330 atomic_inc(&device->scsi_cmds_outstanding); 5331 5332 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, 5333 device)) { 5334 set_host_byte(scmd, DID_NO_CONNECT); 5335 pqi_scsi_done(scmd); 5336 return 0; 5337 } 5338 5339 pqi_ctrl_busy(ctrl_info); 5340 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || 5341 pqi_ctrl_in_ofa(ctrl_info)) { 5342 rc = SCSI_MLQUEUE_HOST_BUSY; 5343 goto out; 5344 } 5345 5346 /* 5347 * This is necessary because the SML doesn't zero out this field during 5348 * error recovery. 5349 */ 5350 scmd->result = 0; 5351 5352 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 5353 queue_group = &ctrl_info->queue_groups[hw_queue]; 5354 5355 if (pqi_is_logical_device(device)) { 5356 raid_bypassed = false; 5357 if (device->raid_bypass_enabled && 5358 !blk_rq_is_passthrough(scmd->request)) { 5359 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 5360 scmd, queue_group); 5361 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 5362 raid_bypassed = true; 5363 } 5364 if (!raid_bypassed) 5365 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5366 queue_group); 5367 } else { 5368 if (device->aio_enabled) 5369 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 5370 queue_group); 5371 else 5372 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 5373 queue_group); 5374 } 5375 5376 out: 5377 pqi_ctrl_unbusy(ctrl_info); 5378 if (rc) 5379 atomic_dec(&device->scsi_cmds_outstanding); 5380 5381 return rc; 5382 } 5383 5384 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5385 struct pqi_queue_group *queue_group) 5386 { 5387 unsigned int path; 5388 unsigned long flags; 5389 bool list_is_empty; 5390 5391 for (path = 0; path < 2; path++) { 5392 while (1) { 5393 spin_lock_irqsave( 5394 &queue_group->submit_lock[path], flags); 5395 list_is_empty = 5396 list_empty(&queue_group->request_list[path]); 5397 spin_unlock_irqrestore( 5398 &queue_group->submit_lock[path], flags); 5399 if (list_is_empty) 5400 break; 5401 pqi_check_ctrl_health(ctrl_info); 5402 if (pqi_ctrl_offline(ctrl_info)) 5403 return -ENXIO; 5404 usleep_range(1000, 2000); 5405 } 5406 } 5407 5408 return 0; 5409 } 5410 5411 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5412 { 5413 int rc; 5414 unsigned int i; 5415 unsigned int path; 5416 struct pqi_queue_group *queue_group; 5417 pqi_index_t iq_pi; 5418 pqi_index_t iq_ci; 5419 5420 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5421 queue_group = &ctrl_info->queue_groups[i]; 5422 5423 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5424 if (rc) 5425 return rc; 5426 5427 for (path = 0; path < 2; path++) { 5428 iq_pi = queue_group->iq_pi_copy[path]; 5429 5430 while (1) { 5431 iq_ci = readl(queue_group->iq_ci[path]); 5432 if (iq_ci == iq_pi) 5433 break; 5434 pqi_check_ctrl_health(ctrl_info); 5435 if (pqi_ctrl_offline(ctrl_info)) 5436 return -ENXIO; 5437 usleep_range(1000, 2000); 5438 } 5439 } 5440 } 5441 5442 return 0; 5443 } 5444 5445 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5446 struct pqi_scsi_dev *device) 5447 { 5448 unsigned int i; 5449 unsigned int path; 5450 struct pqi_queue_group *queue_group; 5451 unsigned long flags; 5452 struct pqi_io_request *io_request; 5453 struct pqi_io_request *next; 5454 struct scsi_cmnd *scmd; 5455 struct pqi_scsi_dev *scsi_device; 5456 5457 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5458 queue_group = &ctrl_info->queue_groups[i]; 5459 5460 for (path = 0; path < 2; path++) { 5461 spin_lock_irqsave( 5462 &queue_group->submit_lock[path], flags); 5463 5464 list_for_each_entry_safe(io_request, next, 5465 &queue_group->request_list[path], 5466 request_list_entry) { 5467 scmd = io_request->scmd; 5468 if (!scmd) 5469 continue; 5470 5471 scsi_device = scmd->device->hostdata; 5472 if (scsi_device != device) 5473 continue; 5474 5475 list_del(&io_request->request_list_entry); 5476 set_host_byte(scmd, DID_RESET); 5477 pqi_scsi_done(scmd); 5478 } 5479 5480 spin_unlock_irqrestore( 5481 &queue_group->submit_lock[path], flags); 5482 } 5483 } 5484 } 5485 5486 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) 5487 { 5488 unsigned int i; 5489 unsigned int path; 5490 struct pqi_queue_group *queue_group; 5491 unsigned long flags; 5492 struct pqi_io_request *io_request; 5493 struct pqi_io_request *next; 5494 struct scsi_cmnd *scmd; 5495 5496 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5497 queue_group = &ctrl_info->queue_groups[i]; 5498 5499 for (path = 0; path < 2; path++) { 5500 spin_lock_irqsave(&queue_group->submit_lock[path], 5501 flags); 5502 5503 list_for_each_entry_safe(io_request, next, 5504 &queue_group->request_list[path], 5505 request_list_entry) { 5506 5507 scmd = io_request->scmd; 5508 if (!scmd) 5509 continue; 5510 5511 list_del(&io_request->request_list_entry); 5512 set_host_byte(scmd, DID_RESET); 5513 pqi_scsi_done(scmd); 5514 } 5515 5516 spin_unlock_irqrestore( 5517 &queue_group->submit_lock[path], flags); 5518 } 5519 } 5520 } 5521 5522 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5523 struct pqi_scsi_dev *device, unsigned long timeout_secs) 5524 { 5525 unsigned long timeout; 5526 5527 timeout = (timeout_secs * PQI_HZ) + jiffies; 5528 5529 while (atomic_read(&device->scsi_cmds_outstanding)) { 5530 pqi_check_ctrl_health(ctrl_info); 5531 if (pqi_ctrl_offline(ctrl_info)) 5532 return -ENXIO; 5533 if (timeout_secs != NO_TIMEOUT) { 5534 if (time_after(jiffies, timeout)) { 5535 dev_err(&ctrl_info->pci_dev->dev, 5536 "timed out waiting for pending IO\n"); 5537 return -ETIMEDOUT; 5538 } 5539 } 5540 usleep_range(1000, 2000); 5541 } 5542 5543 return 0; 5544 } 5545 5546 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5547 unsigned long timeout_secs) 5548 { 5549 bool io_pending; 5550 unsigned long flags; 5551 unsigned long timeout; 5552 struct pqi_scsi_dev *device; 5553 5554 timeout = (timeout_secs * PQI_HZ) + jiffies; 5555 while (1) { 5556 io_pending = false; 5557 5558 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5559 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5560 scsi_device_list_entry) { 5561 if (atomic_read(&device->scsi_cmds_outstanding)) { 5562 io_pending = true; 5563 break; 5564 } 5565 } 5566 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5567 flags); 5568 5569 if (!io_pending) 5570 break; 5571 5572 pqi_check_ctrl_health(ctrl_info); 5573 if (pqi_ctrl_offline(ctrl_info)) 5574 return -ENXIO; 5575 5576 if (timeout_secs != NO_TIMEOUT) { 5577 if (time_after(jiffies, timeout)) { 5578 dev_err(&ctrl_info->pci_dev->dev, 5579 "timed out waiting for pending IO\n"); 5580 return -ETIMEDOUT; 5581 } 5582 } 5583 usleep_range(1000, 2000); 5584 } 5585 5586 return 0; 5587 } 5588 5589 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5590 void *context) 5591 { 5592 struct completion *waiting = context; 5593 5594 complete(waiting); 5595 } 5596 5597 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5598 5599 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5600 struct pqi_scsi_dev *device, struct completion *wait) 5601 { 5602 int rc; 5603 5604 while (1) { 5605 if (wait_for_completion_io_timeout(wait, 5606 PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) { 5607 rc = 0; 5608 break; 5609 } 5610 5611 pqi_check_ctrl_health(ctrl_info); 5612 if (pqi_ctrl_offline(ctrl_info)) { 5613 rc = -ENXIO; 5614 break; 5615 } 5616 } 5617 5618 return rc; 5619 } 5620 5621 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5622 struct pqi_scsi_dev *device) 5623 { 5624 int rc; 5625 struct pqi_io_request *io_request; 5626 DECLARE_COMPLETION_ONSTACK(wait); 5627 struct pqi_task_management_request *request; 5628 5629 io_request = pqi_alloc_io_request(ctrl_info); 5630 io_request->io_complete_callback = pqi_lun_reset_complete; 5631 io_request->context = &wait; 5632 5633 request = io_request->iu; 5634 memset(request, 0, sizeof(*request)); 5635 5636 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5637 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5638 &request->header.iu_length); 5639 put_unaligned_le16(io_request->index, &request->request_id); 5640 memcpy(request->lun_number, device->scsi3addr, 5641 sizeof(request->lun_number)); 5642 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5643 5644 pqi_start_io(ctrl_info, 5645 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5646 io_request); 5647 5648 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5649 if (rc == 0) 5650 rc = io_request->status; 5651 5652 pqi_free_io_request(io_request); 5653 5654 return rc; 5655 } 5656 5657 #define PQI_LUN_RESET_RETRIES 3 5658 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5659 /* Performs a reset at the LUN level. */ 5660 5661 static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5662 struct pqi_scsi_dev *device) 5663 { 5664 int rc; 5665 unsigned int retries; 5666 unsigned long timeout_secs; 5667 5668 for (retries = 0;;) { 5669 rc = pqi_lun_reset(ctrl_info, device); 5670 if (rc != -EAGAIN || 5671 ++retries > PQI_LUN_RESET_RETRIES) 5672 break; 5673 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5674 } 5675 timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT; 5676 5677 rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); 5678 5679 return rc == 0 ? SUCCESS : FAILED; 5680 } 5681 5682 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5683 struct pqi_scsi_dev *device) 5684 { 5685 int rc; 5686 5687 mutex_lock(&ctrl_info->lun_reset_mutex); 5688 5689 pqi_ctrl_block_requests(ctrl_info); 5690 pqi_ctrl_wait_until_quiesced(ctrl_info); 5691 pqi_fail_io_queued_for_device(ctrl_info, device); 5692 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5693 pqi_device_reset_start(device); 5694 pqi_ctrl_unblock_requests(ctrl_info); 5695 5696 if (rc) 5697 rc = FAILED; 5698 else 5699 rc = _pqi_device_reset(ctrl_info, device); 5700 5701 pqi_device_reset_done(device); 5702 5703 mutex_unlock(&ctrl_info->lun_reset_mutex); 5704 return rc; 5705 } 5706 5707 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5708 { 5709 int rc; 5710 struct Scsi_Host *shost; 5711 struct pqi_ctrl_info *ctrl_info; 5712 struct pqi_scsi_dev *device; 5713 5714 shost = scmd->device->host; 5715 ctrl_info = shost_to_hba(shost); 5716 device = scmd->device->hostdata; 5717 5718 dev_err(&ctrl_info->pci_dev->dev, 5719 "resetting scsi %d:%d:%d:%d\n", 5720 shost->host_no, device->bus, device->target, device->lun); 5721 5722 pqi_check_ctrl_health(ctrl_info); 5723 if (pqi_ctrl_offline(ctrl_info)) { 5724 dev_err(&ctrl_info->pci_dev->dev, 5725 "controller %u offlined - cannot send device reset\n", 5726 ctrl_info->ctrl_id); 5727 rc = FAILED; 5728 goto out; 5729 } 5730 5731 pqi_wait_until_ofa_finished(ctrl_info); 5732 5733 rc = pqi_device_reset(ctrl_info, device); 5734 out: 5735 dev_err(&ctrl_info->pci_dev->dev, 5736 "reset of scsi %d:%d:%d:%d: %s\n", 5737 shost->host_no, device->bus, device->target, device->lun, 5738 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5739 5740 return rc; 5741 } 5742 5743 static int pqi_slave_alloc(struct scsi_device *sdev) 5744 { 5745 struct pqi_scsi_dev *device; 5746 unsigned long flags; 5747 struct pqi_ctrl_info *ctrl_info; 5748 struct scsi_target *starget; 5749 struct sas_rphy *rphy; 5750 5751 ctrl_info = shost_to_hba(sdev->host); 5752 5753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5754 5755 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5756 starget = scsi_target(sdev); 5757 rphy = target_to_rphy(starget); 5758 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5759 if (device) { 5760 device->target = sdev_id(sdev); 5761 device->lun = sdev->lun; 5762 device->target_lun_valid = true; 5763 } 5764 } else { 5765 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5766 sdev_id(sdev), sdev->lun); 5767 } 5768 5769 if (device) { 5770 sdev->hostdata = device; 5771 device->sdev = sdev; 5772 if (device->queue_depth) { 5773 device->advertised_queue_depth = device->queue_depth; 5774 scsi_change_queue_depth(sdev, 5775 device->advertised_queue_depth); 5776 } 5777 if (pqi_is_logical_device(device)) 5778 pqi_disable_write_same(sdev); 5779 else 5780 sdev->allow_restart = 1; 5781 } 5782 5783 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5784 5785 return 0; 5786 } 5787 5788 static int pqi_map_queues(struct Scsi_Host *shost) 5789 { 5790 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5791 5792 return blk_mq_pci_map_queues(&shost->tag_set.map[0], 5793 ctrl_info->pci_dev, 0); 5794 } 5795 5796 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5797 void __user *arg) 5798 { 5799 struct pci_dev *pci_dev; 5800 u32 subsystem_vendor; 5801 u32 subsystem_device; 5802 cciss_pci_info_struct pciinfo; 5803 5804 if (!arg) 5805 return -EINVAL; 5806 5807 pci_dev = ctrl_info->pci_dev; 5808 5809 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5810 pciinfo.bus = pci_dev->bus->number; 5811 pciinfo.dev_fn = pci_dev->devfn; 5812 subsystem_vendor = pci_dev->subsystem_vendor; 5813 subsystem_device = pci_dev->subsystem_device; 5814 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5815 subsystem_vendor; 5816 5817 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5818 return -EFAULT; 5819 5820 return 0; 5821 } 5822 5823 static int pqi_getdrivver_ioctl(void __user *arg) 5824 { 5825 u32 version; 5826 5827 if (!arg) 5828 return -EINVAL; 5829 5830 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5831 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5832 5833 if (copy_to_user(arg, &version, sizeof(version))) 5834 return -EFAULT; 5835 5836 return 0; 5837 } 5838 5839 struct ciss_error_info { 5840 u8 scsi_status; 5841 int command_status; 5842 size_t sense_data_length; 5843 }; 5844 5845 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5846 struct ciss_error_info *ciss_error_info) 5847 { 5848 int ciss_cmd_status; 5849 size_t sense_data_length; 5850 5851 switch (pqi_error_info->data_out_result) { 5852 case PQI_DATA_IN_OUT_GOOD: 5853 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5854 break; 5855 case PQI_DATA_IN_OUT_UNDERFLOW: 5856 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5857 break; 5858 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5859 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5860 break; 5861 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5862 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5863 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5864 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5865 case PQI_DATA_IN_OUT_ERROR: 5866 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5867 break; 5868 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5869 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5870 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5871 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5872 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5873 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5874 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5875 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5876 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5877 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5878 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5879 break; 5880 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5881 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5882 break; 5883 case PQI_DATA_IN_OUT_ABORTED: 5884 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5885 break; 5886 case PQI_DATA_IN_OUT_TIMEOUT: 5887 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5888 break; 5889 default: 5890 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5891 break; 5892 } 5893 5894 sense_data_length = 5895 get_unaligned_le16(&pqi_error_info->sense_data_length); 5896 if (sense_data_length == 0) 5897 sense_data_length = 5898 get_unaligned_le16(&pqi_error_info->response_data_length); 5899 if (sense_data_length) 5900 if (sense_data_length > sizeof(pqi_error_info->data)) 5901 sense_data_length = sizeof(pqi_error_info->data); 5902 5903 ciss_error_info->scsi_status = pqi_error_info->status; 5904 ciss_error_info->command_status = ciss_cmd_status; 5905 ciss_error_info->sense_data_length = sense_data_length; 5906 } 5907 5908 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5909 { 5910 int rc; 5911 char *kernel_buffer = NULL; 5912 u16 iu_length; 5913 size_t sense_data_length; 5914 IOCTL_Command_struct iocommand; 5915 struct pqi_raid_path_request request; 5916 struct pqi_raid_error_info pqi_error_info; 5917 struct ciss_error_info ciss_error_info; 5918 5919 if (pqi_ctrl_offline(ctrl_info)) 5920 return -ENXIO; 5921 if (!arg) 5922 return -EINVAL; 5923 if (!capable(CAP_SYS_RAWIO)) 5924 return -EPERM; 5925 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5926 return -EFAULT; 5927 if (iocommand.buf_size < 1 && 5928 iocommand.Request.Type.Direction != XFER_NONE) 5929 return -EINVAL; 5930 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5931 return -EINVAL; 5932 if (iocommand.Request.Type.Type != TYPE_CMD) 5933 return -EINVAL; 5934 5935 switch (iocommand.Request.Type.Direction) { 5936 case XFER_NONE: 5937 case XFER_WRITE: 5938 case XFER_READ: 5939 case XFER_READ | XFER_WRITE: 5940 break; 5941 default: 5942 return -EINVAL; 5943 } 5944 5945 if (iocommand.buf_size > 0) { 5946 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5947 if (!kernel_buffer) 5948 return -ENOMEM; 5949 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5950 if (copy_from_user(kernel_buffer, iocommand.buf, 5951 iocommand.buf_size)) { 5952 rc = -EFAULT; 5953 goto out; 5954 } 5955 } else { 5956 memset(kernel_buffer, 0, iocommand.buf_size); 5957 } 5958 } 5959 5960 memset(&request, 0, sizeof(request)); 5961 5962 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5963 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5964 PQI_REQUEST_HEADER_LENGTH; 5965 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5966 sizeof(request.lun_number)); 5967 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5968 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5969 5970 switch (iocommand.Request.Type.Direction) { 5971 case XFER_NONE: 5972 request.data_direction = SOP_NO_DIRECTION_FLAG; 5973 break; 5974 case XFER_WRITE: 5975 request.data_direction = SOP_WRITE_FLAG; 5976 break; 5977 case XFER_READ: 5978 request.data_direction = SOP_READ_FLAG; 5979 break; 5980 case XFER_READ | XFER_WRITE: 5981 request.data_direction = SOP_BIDIRECTIONAL; 5982 break; 5983 } 5984 5985 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5986 5987 if (iocommand.buf_size > 0) { 5988 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5989 5990 rc = pqi_map_single(ctrl_info->pci_dev, 5991 &request.sg_descriptors[0], kernel_buffer, 5992 iocommand.buf_size, DMA_BIDIRECTIONAL); 5993 if (rc) 5994 goto out; 5995 5996 iu_length += sizeof(request.sg_descriptors[0]); 5997 } 5998 5999 put_unaligned_le16(iu_length, &request.header.iu_length); 6000 6001 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6002 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 6003 6004 if (iocommand.buf_size > 0) 6005 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6006 DMA_BIDIRECTIONAL); 6007 6008 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6009 6010 if (rc == 0) { 6011 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6012 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6013 iocommand.error_info.CommandStatus = 6014 ciss_error_info.command_status; 6015 sense_data_length = ciss_error_info.sense_data_length; 6016 if (sense_data_length) { 6017 if (sense_data_length > 6018 sizeof(iocommand.error_info.SenseInfo)) 6019 sense_data_length = 6020 sizeof(iocommand.error_info.SenseInfo); 6021 memcpy(iocommand.error_info.SenseInfo, 6022 pqi_error_info.data, sense_data_length); 6023 iocommand.error_info.SenseLen = sense_data_length; 6024 } 6025 } 6026 6027 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6028 rc = -EFAULT; 6029 goto out; 6030 } 6031 6032 if (rc == 0 && iocommand.buf_size > 0 && 6033 (iocommand.Request.Type.Direction & XFER_READ)) { 6034 if (copy_to_user(iocommand.buf, kernel_buffer, 6035 iocommand.buf_size)) { 6036 rc = -EFAULT; 6037 } 6038 } 6039 6040 out: 6041 kfree(kernel_buffer); 6042 6043 return rc; 6044 } 6045 6046 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 6047 { 6048 int rc; 6049 struct pqi_ctrl_info *ctrl_info; 6050 6051 ctrl_info = shost_to_hba(sdev->host); 6052 6053 if (pqi_ctrl_in_ofa(ctrl_info)) 6054 return -EBUSY; 6055 6056 switch (cmd) { 6057 case CCISS_DEREGDISK: 6058 case CCISS_REGNEWDISK: 6059 case CCISS_REGNEWD: 6060 rc = pqi_scan_scsi_devices(ctrl_info); 6061 break; 6062 case CCISS_GETPCIINFO: 6063 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6064 break; 6065 case CCISS_GETDRIVVER: 6066 rc = pqi_getdrivver_ioctl(arg); 6067 break; 6068 case CCISS_PASSTHRU: 6069 rc = pqi_passthru_ioctl(ctrl_info, arg); 6070 break; 6071 default: 6072 rc = -EINVAL; 6073 break; 6074 } 6075 6076 return rc; 6077 } 6078 6079 static ssize_t pqi_version_show(struct device *dev, 6080 struct device_attribute *attr, char *buffer) 6081 { 6082 ssize_t count = 0; 6083 struct Scsi_Host *shost; 6084 struct pqi_ctrl_info *ctrl_info; 6085 6086 shost = class_to_shost(dev); 6087 ctrl_info = shost_to_hba(shost); 6088 6089 count += snprintf(buffer + count, PAGE_SIZE - count, 6090 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 6091 6092 count += snprintf(buffer + count, PAGE_SIZE - count, 6093 "firmware: %s\n", ctrl_info->firmware_version); 6094 6095 return count; 6096 } 6097 6098 static ssize_t pqi_host_rescan_store(struct device *dev, 6099 struct device_attribute *attr, const char *buffer, size_t count) 6100 { 6101 struct Scsi_Host *shost = class_to_shost(dev); 6102 6103 pqi_scan_start(shost); 6104 6105 return count; 6106 } 6107 6108 static ssize_t pqi_lockup_action_show(struct device *dev, 6109 struct device_attribute *attr, char *buffer) 6110 { 6111 int count = 0; 6112 unsigned int i; 6113 6114 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6115 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6116 count += snprintf(buffer + count, PAGE_SIZE - count, 6117 "[%s] ", pqi_lockup_actions[i].name); 6118 else 6119 count += snprintf(buffer + count, PAGE_SIZE - count, 6120 "%s ", pqi_lockup_actions[i].name); 6121 } 6122 6123 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 6124 6125 return count; 6126 } 6127 6128 static ssize_t pqi_lockup_action_store(struct device *dev, 6129 struct device_attribute *attr, const char *buffer, size_t count) 6130 { 6131 unsigned int i; 6132 char *action_name; 6133 char action_name_buffer[32]; 6134 6135 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6136 action_name = strstrip(action_name_buffer); 6137 6138 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6139 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6140 pqi_lockup_action = pqi_lockup_actions[i].action; 6141 return count; 6142 } 6143 } 6144 6145 return -EINVAL; 6146 } 6147 6148 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 6149 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6150 static DEVICE_ATTR(lockup_action, 0644, 6151 pqi_lockup_action_show, pqi_lockup_action_store); 6152 6153 static struct device_attribute *pqi_shost_attrs[] = { 6154 &dev_attr_version, 6155 &dev_attr_rescan, 6156 &dev_attr_lockup_action, 6157 NULL 6158 }; 6159 6160 static ssize_t pqi_unique_id_show(struct device *dev, 6161 struct device_attribute *attr, char *buffer) 6162 { 6163 struct pqi_ctrl_info *ctrl_info; 6164 struct scsi_device *sdev; 6165 struct pqi_scsi_dev *device; 6166 unsigned long flags; 6167 unsigned char uid[16]; 6168 6169 sdev = to_scsi_device(dev); 6170 ctrl_info = shost_to_hba(sdev->host); 6171 6172 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6173 6174 device = sdev->hostdata; 6175 if (!device) { 6176 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6177 flags); 6178 return -ENODEV; 6179 } 6180 memcpy(uid, device->unique_id, sizeof(uid)); 6181 6182 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6183 6184 return snprintf(buffer, PAGE_SIZE, 6185 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", 6186 uid[0], uid[1], uid[2], uid[3], 6187 uid[4], uid[5], uid[6], uid[7], 6188 uid[8], uid[9], uid[10], uid[11], 6189 uid[12], uid[13], uid[14], uid[15]); 6190 } 6191 6192 static ssize_t pqi_lunid_show(struct device *dev, 6193 struct device_attribute *attr, char *buffer) 6194 { 6195 struct pqi_ctrl_info *ctrl_info; 6196 struct scsi_device *sdev; 6197 struct pqi_scsi_dev *device; 6198 unsigned long flags; 6199 u8 lunid[8]; 6200 6201 sdev = to_scsi_device(dev); 6202 ctrl_info = shost_to_hba(sdev->host); 6203 6204 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6205 6206 device = sdev->hostdata; 6207 if (!device) { 6208 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6209 flags); 6210 return -ENODEV; 6211 } 6212 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 6213 6214 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6215 6216 return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 6217 } 6218 6219 #define MAX_PATHS 8 6220 static ssize_t pqi_path_info_show(struct device *dev, 6221 struct device_attribute *attr, char *buf) 6222 { 6223 struct pqi_ctrl_info *ctrl_info; 6224 struct scsi_device *sdev; 6225 struct pqi_scsi_dev *device; 6226 unsigned long flags; 6227 int i; 6228 int output_len = 0; 6229 u8 box; 6230 u8 bay; 6231 u8 path_map_index = 0; 6232 char *active; 6233 unsigned char phys_connector[2]; 6234 6235 sdev = to_scsi_device(dev); 6236 ctrl_info = shost_to_hba(sdev->host); 6237 6238 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6239 6240 device = sdev->hostdata; 6241 if (!device) { 6242 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6243 flags); 6244 return -ENODEV; 6245 } 6246 6247 bay = device->bay; 6248 for (i = 0; i < MAX_PATHS; i++) { 6249 path_map_index = 1<<i; 6250 if (i == device->active_path_index) 6251 active = "Active"; 6252 else if (device->path_map & path_map_index) 6253 active = "Inactive"; 6254 else 6255 continue; 6256 6257 output_len += scnprintf(buf + output_len, 6258 PAGE_SIZE - output_len, 6259 "[%d:%d:%d:%d] %20.20s ", 6260 ctrl_info->scsi_host->host_no, 6261 device->bus, device->target, 6262 device->lun, 6263 scsi_device_type(device->devtype)); 6264 6265 if (device->devtype == TYPE_RAID || 6266 pqi_is_logical_device(device)) 6267 goto end_buffer; 6268 6269 memcpy(&phys_connector, &device->phys_connector[i], 6270 sizeof(phys_connector)); 6271 if (phys_connector[0] < '0') 6272 phys_connector[0] = '0'; 6273 if (phys_connector[1] < '0') 6274 phys_connector[1] = '0'; 6275 6276 output_len += scnprintf(buf + output_len, 6277 PAGE_SIZE - output_len, 6278 "PORT: %.2s ", phys_connector); 6279 6280 box = device->box[i]; 6281 if (box != 0 && box != 0xFF) 6282 output_len += scnprintf(buf + output_len, 6283 PAGE_SIZE - output_len, 6284 "BOX: %hhu ", box); 6285 6286 if ((device->devtype == TYPE_DISK || 6287 device->devtype == TYPE_ZBC) && 6288 pqi_expose_device(device)) 6289 output_len += scnprintf(buf + output_len, 6290 PAGE_SIZE - output_len, 6291 "BAY: %hhu ", bay); 6292 6293 end_buffer: 6294 output_len += scnprintf(buf + output_len, 6295 PAGE_SIZE - output_len, 6296 "%s\n", active); 6297 } 6298 6299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6300 return output_len; 6301 } 6302 6303 6304 static ssize_t pqi_sas_address_show(struct device *dev, 6305 struct device_attribute *attr, char *buffer) 6306 { 6307 struct pqi_ctrl_info *ctrl_info; 6308 struct scsi_device *sdev; 6309 struct pqi_scsi_dev *device; 6310 unsigned long flags; 6311 u64 sas_address; 6312 6313 sdev = to_scsi_device(dev); 6314 ctrl_info = shost_to_hba(sdev->host); 6315 6316 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6317 6318 device = sdev->hostdata; 6319 if (pqi_is_logical_device(device)) { 6320 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 6321 flags); 6322 return -ENODEV; 6323 } 6324 sas_address = device->sas_address; 6325 6326 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6327 6328 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 6329 } 6330 6331 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 6332 struct device_attribute *attr, char *buffer) 6333 { 6334 struct pqi_ctrl_info *ctrl_info; 6335 struct scsi_device *sdev; 6336 struct pqi_scsi_dev *device; 6337 unsigned long flags; 6338 6339 sdev = to_scsi_device(dev); 6340 ctrl_info = shost_to_hba(sdev->host); 6341 6342 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6343 6344 device = sdev->hostdata; 6345 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 6346 buffer[1] = '\n'; 6347 buffer[2] = '\0'; 6348 6349 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6350 6351 return 2; 6352 } 6353 6354 static ssize_t pqi_raid_level_show(struct device *dev, 6355 struct device_attribute *attr, char *buffer) 6356 { 6357 struct pqi_ctrl_info *ctrl_info; 6358 struct scsi_device *sdev; 6359 struct pqi_scsi_dev *device; 6360 unsigned long flags; 6361 char *raid_level; 6362 6363 sdev = to_scsi_device(dev); 6364 ctrl_info = shost_to_hba(sdev->host); 6365 6366 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6367 6368 device = sdev->hostdata; 6369 6370 if (pqi_is_logical_device(device)) 6371 raid_level = pqi_raid_level_to_string(device->raid_level); 6372 else 6373 raid_level = "N/A"; 6374 6375 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6376 6377 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 6378 } 6379 6380 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 6381 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 6382 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 6383 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 6384 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 6385 pqi_ssd_smart_path_enabled_show, NULL); 6386 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 6387 6388 static struct device_attribute *pqi_sdev_attrs[] = { 6389 &dev_attr_lunid, 6390 &dev_attr_unique_id, 6391 &dev_attr_path_info, 6392 &dev_attr_sas_address, 6393 &dev_attr_ssd_smart_path_enabled, 6394 &dev_attr_raid_level, 6395 NULL 6396 }; 6397 6398 static struct scsi_host_template pqi_driver_template = { 6399 .module = THIS_MODULE, 6400 .name = DRIVER_NAME_SHORT, 6401 .proc_name = DRIVER_NAME_SHORT, 6402 .queuecommand = pqi_scsi_queue_command, 6403 .scan_start = pqi_scan_start, 6404 .scan_finished = pqi_scan_finished, 6405 .this_id = -1, 6406 .eh_device_reset_handler = pqi_eh_device_reset_handler, 6407 .ioctl = pqi_ioctl, 6408 .slave_alloc = pqi_slave_alloc, 6409 .map_queues = pqi_map_queues, 6410 .sdev_attrs = pqi_sdev_attrs, 6411 .shost_attrs = pqi_shost_attrs, 6412 }; 6413 6414 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 6415 { 6416 int rc; 6417 struct Scsi_Host *shost; 6418 6419 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 6420 if (!shost) { 6421 dev_err(&ctrl_info->pci_dev->dev, 6422 "scsi_host_alloc failed for controller %u\n", 6423 ctrl_info->ctrl_id); 6424 return -ENOMEM; 6425 } 6426 6427 shost->io_port = 0; 6428 shost->n_io_port = 0; 6429 shost->this_id = -1; 6430 shost->max_channel = PQI_MAX_BUS; 6431 shost->max_cmd_len = MAX_COMMAND_SIZE; 6432 shost->max_lun = ~0; 6433 shost->max_id = ~0; 6434 shost->max_sectors = ctrl_info->max_sectors; 6435 shost->can_queue = ctrl_info->scsi_ml_can_queue; 6436 shost->cmd_per_lun = shost->can_queue; 6437 shost->sg_tablesize = ctrl_info->sg_tablesize; 6438 shost->transportt = pqi_sas_transport_template; 6439 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 6440 shost->unique_id = shost->irq; 6441 shost->nr_hw_queues = ctrl_info->num_queue_groups; 6442 shost->hostdata[0] = (unsigned long)ctrl_info; 6443 6444 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 6445 if (rc) { 6446 dev_err(&ctrl_info->pci_dev->dev, 6447 "scsi_add_host failed for controller %u\n", 6448 ctrl_info->ctrl_id); 6449 goto free_host; 6450 } 6451 6452 rc = pqi_add_sas_host(shost, ctrl_info); 6453 if (rc) { 6454 dev_err(&ctrl_info->pci_dev->dev, 6455 "add SAS host failed for controller %u\n", 6456 ctrl_info->ctrl_id); 6457 goto remove_host; 6458 } 6459 6460 ctrl_info->scsi_host = shost; 6461 6462 return 0; 6463 6464 remove_host: 6465 scsi_remove_host(shost); 6466 free_host: 6467 scsi_host_put(shost); 6468 6469 return rc; 6470 } 6471 6472 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 6473 { 6474 struct Scsi_Host *shost; 6475 6476 pqi_delete_sas_host(ctrl_info); 6477 6478 shost = ctrl_info->scsi_host; 6479 if (!shost) 6480 return; 6481 6482 scsi_remove_host(shost); 6483 scsi_host_put(shost); 6484 } 6485 6486 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 6487 { 6488 int rc = 0; 6489 struct pqi_device_registers __iomem *pqi_registers; 6490 unsigned long timeout; 6491 unsigned int timeout_msecs; 6492 union pqi_reset_register reset_reg; 6493 6494 pqi_registers = ctrl_info->pqi_registers; 6495 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 6496 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 6497 6498 while (1) { 6499 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 6500 reset_reg.all_bits = readl(&pqi_registers->device_reset); 6501 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 6502 break; 6503 pqi_check_ctrl_health(ctrl_info); 6504 if (pqi_ctrl_offline(ctrl_info)) { 6505 rc = -ENXIO; 6506 break; 6507 } 6508 if (time_after(jiffies, timeout)) { 6509 rc = -ETIMEDOUT; 6510 break; 6511 } 6512 } 6513 6514 return rc; 6515 } 6516 6517 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 6518 { 6519 int rc; 6520 union pqi_reset_register reset_reg; 6521 6522 if (ctrl_info->pqi_reset_quiesce_supported) { 6523 rc = sis_pqi_reset_quiesce(ctrl_info); 6524 if (rc) { 6525 dev_err(&ctrl_info->pci_dev->dev, 6526 "PQI reset failed during quiesce with error %d\n", 6527 rc); 6528 return rc; 6529 } 6530 } 6531 6532 reset_reg.all_bits = 0; 6533 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 6534 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 6535 6536 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 6537 6538 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 6539 if (rc) 6540 dev_err(&ctrl_info->pci_dev->dev, 6541 "PQI reset failed with error %d\n", rc); 6542 6543 return rc; 6544 } 6545 6546 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 6547 { 6548 int rc; 6549 struct bmic_identify_controller *identify; 6550 6551 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 6552 if (!identify) 6553 return -ENOMEM; 6554 6555 rc = pqi_identify_controller(ctrl_info, identify); 6556 if (rc) 6557 goto out; 6558 6559 memcpy(ctrl_info->firmware_version, identify->firmware_version, 6560 sizeof(identify->firmware_version)); 6561 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 6562 snprintf(ctrl_info->firmware_version + 6563 strlen(ctrl_info->firmware_version), 6564 sizeof(ctrl_info->firmware_version), 6565 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 6566 6567 out: 6568 kfree(identify); 6569 6570 return rc; 6571 } 6572 6573 struct pqi_config_table_section_info { 6574 struct pqi_ctrl_info *ctrl_info; 6575 void *section; 6576 u32 section_offset; 6577 void __iomem *section_iomem_addr; 6578 }; 6579 6580 static inline bool pqi_is_firmware_feature_supported( 6581 struct pqi_config_table_firmware_features *firmware_features, 6582 unsigned int bit_position) 6583 { 6584 unsigned int byte_index; 6585 6586 byte_index = bit_position / BITS_PER_BYTE; 6587 6588 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 6589 return false; 6590 6591 return firmware_features->features_supported[byte_index] & 6592 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6593 } 6594 6595 static inline bool pqi_is_firmware_feature_enabled( 6596 struct pqi_config_table_firmware_features *firmware_features, 6597 void __iomem *firmware_features_iomem_addr, 6598 unsigned int bit_position) 6599 { 6600 unsigned int byte_index; 6601 u8 __iomem *features_enabled_iomem_addr; 6602 6603 byte_index = (bit_position / BITS_PER_BYTE) + 6604 (le16_to_cpu(firmware_features->num_elements) * 2); 6605 6606 features_enabled_iomem_addr = firmware_features_iomem_addr + 6607 offsetof(struct pqi_config_table_firmware_features, 6608 features_supported) + byte_index; 6609 6610 return *((__force u8 *)features_enabled_iomem_addr) & 6611 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6612 } 6613 6614 static inline void pqi_request_firmware_feature( 6615 struct pqi_config_table_firmware_features *firmware_features, 6616 unsigned int bit_position) 6617 { 6618 unsigned int byte_index; 6619 6620 byte_index = (bit_position / BITS_PER_BYTE) + 6621 le16_to_cpu(firmware_features->num_elements); 6622 6623 firmware_features->features_supported[byte_index] |= 6624 (1 << (bit_position % BITS_PER_BYTE)); 6625 } 6626 6627 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 6628 u16 first_section, u16 last_section) 6629 { 6630 struct pqi_vendor_general_request request; 6631 6632 memset(&request, 0, sizeof(request)); 6633 6634 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 6635 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 6636 &request.header.iu_length); 6637 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 6638 &request.function_code); 6639 put_unaligned_le16(first_section, 6640 &request.data.config_table_update.first_section); 6641 put_unaligned_le16(last_section, 6642 &request.data.config_table_update.last_section); 6643 6644 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6645 0, NULL, NO_TIMEOUT); 6646 } 6647 6648 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 6649 struct pqi_config_table_firmware_features *firmware_features, 6650 void __iomem *firmware_features_iomem_addr) 6651 { 6652 void *features_requested; 6653 void __iomem *features_requested_iomem_addr; 6654 6655 features_requested = firmware_features->features_supported + 6656 le16_to_cpu(firmware_features->num_elements); 6657 6658 features_requested_iomem_addr = firmware_features_iomem_addr + 6659 (features_requested - (void *)firmware_features); 6660 6661 memcpy_toio(features_requested_iomem_addr, features_requested, 6662 le16_to_cpu(firmware_features->num_elements)); 6663 6664 return pqi_config_table_update(ctrl_info, 6665 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 6666 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 6667 } 6668 6669 struct pqi_firmware_feature { 6670 char *feature_name; 6671 unsigned int feature_bit; 6672 bool supported; 6673 bool enabled; 6674 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 6675 struct pqi_firmware_feature *firmware_feature); 6676 }; 6677 6678 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 6679 struct pqi_firmware_feature *firmware_feature) 6680 { 6681 if (!firmware_feature->supported) { 6682 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 6683 firmware_feature->feature_name); 6684 return; 6685 } 6686 6687 if (firmware_feature->enabled) { 6688 dev_info(&ctrl_info->pci_dev->dev, 6689 "%s enabled\n", firmware_feature->feature_name); 6690 return; 6691 } 6692 6693 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 6694 firmware_feature->feature_name); 6695 } 6696 6697 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 6698 struct pqi_firmware_feature *firmware_feature) 6699 { 6700 if (firmware_feature->feature_status) 6701 firmware_feature->feature_status(ctrl_info, firmware_feature); 6702 } 6703 6704 static DEFINE_MUTEX(pqi_firmware_features_mutex); 6705 6706 static struct pqi_firmware_feature pqi_firmware_features[] = { 6707 { 6708 .feature_name = "Online Firmware Activation", 6709 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 6710 .feature_status = pqi_firmware_feature_status, 6711 }, 6712 { 6713 .feature_name = "Serial Management Protocol", 6714 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6715 .feature_status = pqi_firmware_feature_status, 6716 }, 6717 { 6718 .feature_name = "New Soft Reset Handshake", 6719 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 6720 .feature_status = pqi_firmware_feature_status, 6721 }, 6722 }; 6723 6724 static void pqi_process_firmware_features( 6725 struct pqi_config_table_section_info *section_info) 6726 { 6727 int rc; 6728 struct pqi_ctrl_info *ctrl_info; 6729 struct pqi_config_table_firmware_features *firmware_features; 6730 void __iomem *firmware_features_iomem_addr; 6731 unsigned int i; 6732 unsigned int num_features_supported; 6733 6734 ctrl_info = section_info->ctrl_info; 6735 firmware_features = section_info->section; 6736 firmware_features_iomem_addr = section_info->section_iomem_addr; 6737 6738 for (i = 0, num_features_supported = 0; 6739 i < ARRAY_SIZE(pqi_firmware_features); i++) { 6740 if (pqi_is_firmware_feature_supported(firmware_features, 6741 pqi_firmware_features[i].feature_bit)) { 6742 pqi_firmware_features[i].supported = true; 6743 num_features_supported++; 6744 } else { 6745 pqi_firmware_feature_update(ctrl_info, 6746 &pqi_firmware_features[i]); 6747 } 6748 } 6749 6750 if (num_features_supported == 0) 6751 return; 6752 6753 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6754 if (!pqi_firmware_features[i].supported) 6755 continue; 6756 pqi_request_firmware_feature(firmware_features, 6757 pqi_firmware_features[i].feature_bit); 6758 } 6759 6760 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 6761 firmware_features_iomem_addr); 6762 if (rc) { 6763 dev_err(&ctrl_info->pci_dev->dev, 6764 "failed to enable firmware features in PQI configuration table\n"); 6765 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6766 if (!pqi_firmware_features[i].supported) 6767 continue; 6768 pqi_firmware_feature_update(ctrl_info, 6769 &pqi_firmware_features[i]); 6770 } 6771 return; 6772 } 6773 6774 ctrl_info->soft_reset_handshake_supported = false; 6775 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6776 if (!pqi_firmware_features[i].supported) 6777 continue; 6778 if (pqi_is_firmware_feature_enabled(firmware_features, 6779 firmware_features_iomem_addr, 6780 pqi_firmware_features[i].feature_bit)) { 6781 pqi_firmware_features[i].enabled = true; 6782 if (pqi_firmware_features[i].feature_bit == 6783 PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE) 6784 ctrl_info->soft_reset_handshake_supported = 6785 true; 6786 } 6787 pqi_firmware_feature_update(ctrl_info, 6788 &pqi_firmware_features[i]); 6789 } 6790 } 6791 6792 static void pqi_init_firmware_features(void) 6793 { 6794 unsigned int i; 6795 6796 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6797 pqi_firmware_features[i].supported = false; 6798 pqi_firmware_features[i].enabled = false; 6799 } 6800 } 6801 6802 static void pqi_process_firmware_features_section( 6803 struct pqi_config_table_section_info *section_info) 6804 { 6805 mutex_lock(&pqi_firmware_features_mutex); 6806 pqi_init_firmware_features(); 6807 pqi_process_firmware_features(section_info); 6808 mutex_unlock(&pqi_firmware_features_mutex); 6809 } 6810 6811 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 6812 { 6813 u32 table_length; 6814 u32 section_offset; 6815 void __iomem *table_iomem_addr; 6816 struct pqi_config_table *config_table; 6817 struct pqi_config_table_section_header *section; 6818 struct pqi_config_table_section_info section_info; 6819 6820 table_length = ctrl_info->config_table_length; 6821 if (table_length == 0) 6822 return 0; 6823 6824 config_table = kmalloc(table_length, GFP_KERNEL); 6825 if (!config_table) { 6826 dev_err(&ctrl_info->pci_dev->dev, 6827 "failed to allocate memory for PQI configuration table\n"); 6828 return -ENOMEM; 6829 } 6830 6831 /* 6832 * Copy the config table contents from I/O memory space into the 6833 * temporary buffer. 6834 */ 6835 table_iomem_addr = ctrl_info->iomem_base + 6836 ctrl_info->config_table_offset; 6837 memcpy_fromio(config_table, table_iomem_addr, table_length); 6838 6839 section_info.ctrl_info = ctrl_info; 6840 section_offset = 6841 get_unaligned_le32(&config_table->first_section_offset); 6842 6843 while (section_offset) { 6844 section = (void *)config_table + section_offset; 6845 6846 section_info.section = section; 6847 section_info.section_offset = section_offset; 6848 section_info.section_iomem_addr = 6849 table_iomem_addr + section_offset; 6850 6851 switch (get_unaligned_le16(§ion->section_id)) { 6852 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 6853 pqi_process_firmware_features_section(§ion_info); 6854 break; 6855 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 6856 if (pqi_disable_heartbeat) 6857 dev_warn(&ctrl_info->pci_dev->dev, 6858 "heartbeat disabled by module parameter\n"); 6859 else 6860 ctrl_info->heartbeat_counter = 6861 table_iomem_addr + 6862 section_offset + 6863 offsetof( 6864 struct pqi_config_table_heartbeat, 6865 heartbeat_counter); 6866 break; 6867 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 6868 ctrl_info->soft_reset_status = 6869 table_iomem_addr + 6870 section_offset + 6871 offsetof(struct pqi_config_table_soft_reset, 6872 soft_reset_status); 6873 break; 6874 } 6875 6876 section_offset = 6877 get_unaligned_le16(§ion->next_section_offset); 6878 } 6879 6880 kfree(config_table); 6881 6882 return 0; 6883 } 6884 6885 /* Switches the controller from PQI mode back into SIS mode. */ 6886 6887 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6888 { 6889 int rc; 6890 6891 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6892 rc = pqi_reset(ctrl_info); 6893 if (rc) 6894 return rc; 6895 rc = sis_reenable_sis_mode(ctrl_info); 6896 if (rc) { 6897 dev_err(&ctrl_info->pci_dev->dev, 6898 "re-enabling SIS mode failed with error %d\n", rc); 6899 return rc; 6900 } 6901 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6902 6903 return 0; 6904 } 6905 6906 /* 6907 * If the controller isn't already in SIS mode, this function forces it into 6908 * SIS mode. 6909 */ 6910 6911 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6912 { 6913 if (!sis_is_firmware_running(ctrl_info)) 6914 return -ENXIO; 6915 6916 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6917 return 0; 6918 6919 if (sis_is_kernel_up(ctrl_info)) { 6920 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6921 return 0; 6922 } 6923 6924 return pqi_revert_to_sis_mode(ctrl_info); 6925 } 6926 6927 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6928 { 6929 int rc; 6930 6931 rc = pqi_force_sis_mode(ctrl_info); 6932 if (rc) 6933 return rc; 6934 6935 /* 6936 * Wait until the controller is ready to start accepting SIS 6937 * commands. 6938 */ 6939 rc = sis_wait_for_ctrl_ready(ctrl_info); 6940 if (rc) 6941 return rc; 6942 6943 /* 6944 * Get the controller properties. This allows us to determine 6945 * whether or not it supports PQI mode. 6946 */ 6947 rc = sis_get_ctrl_properties(ctrl_info); 6948 if (rc) { 6949 dev_err(&ctrl_info->pci_dev->dev, 6950 "error obtaining controller properties\n"); 6951 return rc; 6952 } 6953 6954 rc = sis_get_pqi_capabilities(ctrl_info); 6955 if (rc) { 6956 dev_err(&ctrl_info->pci_dev->dev, 6957 "error obtaining controller capabilities\n"); 6958 return rc; 6959 } 6960 6961 if (reset_devices) { 6962 if (ctrl_info->max_outstanding_requests > 6963 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6964 ctrl_info->max_outstanding_requests = 6965 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6966 } else { 6967 if (ctrl_info->max_outstanding_requests > 6968 PQI_MAX_OUTSTANDING_REQUESTS) 6969 ctrl_info->max_outstanding_requests = 6970 PQI_MAX_OUTSTANDING_REQUESTS; 6971 } 6972 6973 pqi_calculate_io_resources(ctrl_info); 6974 6975 rc = pqi_alloc_error_buffer(ctrl_info); 6976 if (rc) { 6977 dev_err(&ctrl_info->pci_dev->dev, 6978 "failed to allocate PQI error buffer\n"); 6979 return rc; 6980 } 6981 6982 /* 6983 * If the function we are about to call succeeds, the 6984 * controller will transition from legacy SIS mode 6985 * into PQI mode. 6986 */ 6987 rc = sis_init_base_struct_addr(ctrl_info); 6988 if (rc) { 6989 dev_err(&ctrl_info->pci_dev->dev, 6990 "error initializing PQI mode\n"); 6991 return rc; 6992 } 6993 6994 /* Wait for the controller to complete the SIS -> PQI transition. */ 6995 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6996 if (rc) { 6997 dev_err(&ctrl_info->pci_dev->dev, 6998 "transition to PQI mode failed\n"); 6999 return rc; 7000 } 7001 7002 /* From here on, we are running in PQI mode. */ 7003 ctrl_info->pqi_mode_enabled = true; 7004 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7005 7006 rc = pqi_alloc_admin_queues(ctrl_info); 7007 if (rc) { 7008 dev_err(&ctrl_info->pci_dev->dev, 7009 "failed to allocate admin queues\n"); 7010 return rc; 7011 } 7012 7013 rc = pqi_create_admin_queues(ctrl_info); 7014 if (rc) { 7015 dev_err(&ctrl_info->pci_dev->dev, 7016 "error creating admin queues\n"); 7017 return rc; 7018 } 7019 7020 rc = pqi_report_device_capability(ctrl_info); 7021 if (rc) { 7022 dev_err(&ctrl_info->pci_dev->dev, 7023 "obtaining device capability failed\n"); 7024 return rc; 7025 } 7026 7027 rc = pqi_validate_device_capability(ctrl_info); 7028 if (rc) 7029 return rc; 7030 7031 pqi_calculate_queue_resources(ctrl_info); 7032 7033 rc = pqi_enable_msix_interrupts(ctrl_info); 7034 if (rc) 7035 return rc; 7036 7037 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 7038 ctrl_info->max_msix_vectors = 7039 ctrl_info->num_msix_vectors_enabled; 7040 pqi_calculate_queue_resources(ctrl_info); 7041 } 7042 7043 rc = pqi_alloc_io_resources(ctrl_info); 7044 if (rc) 7045 return rc; 7046 7047 rc = pqi_alloc_operational_queues(ctrl_info); 7048 if (rc) { 7049 dev_err(&ctrl_info->pci_dev->dev, 7050 "failed to allocate operational queues\n"); 7051 return rc; 7052 } 7053 7054 pqi_init_operational_queues(ctrl_info); 7055 7056 rc = pqi_request_irqs(ctrl_info); 7057 if (rc) 7058 return rc; 7059 7060 rc = pqi_create_queues(ctrl_info); 7061 if (rc) 7062 return rc; 7063 7064 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7065 7066 ctrl_info->controller_online = true; 7067 7068 rc = pqi_process_config_table(ctrl_info); 7069 if (rc) 7070 return rc; 7071 7072 pqi_start_heartbeat_timer(ctrl_info); 7073 7074 rc = pqi_enable_events(ctrl_info); 7075 if (rc) { 7076 dev_err(&ctrl_info->pci_dev->dev, 7077 "error enabling events\n"); 7078 return rc; 7079 } 7080 7081 /* Register with the SCSI subsystem. */ 7082 rc = pqi_register_scsi(ctrl_info); 7083 if (rc) 7084 return rc; 7085 7086 rc = pqi_get_ctrl_firmware_version(ctrl_info); 7087 if (rc) { 7088 dev_err(&ctrl_info->pci_dev->dev, 7089 "error obtaining firmware version\n"); 7090 return rc; 7091 } 7092 7093 rc = pqi_set_diag_rescan(ctrl_info); 7094 if (rc) { 7095 dev_err(&ctrl_info->pci_dev->dev, 7096 "error enabling multi-lun rescan\n"); 7097 return rc; 7098 } 7099 7100 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7101 if (rc) { 7102 dev_err(&ctrl_info->pci_dev->dev, 7103 "error updating host wellness\n"); 7104 return rc; 7105 } 7106 7107 pqi_schedule_update_time_worker(ctrl_info); 7108 7109 pqi_scan_scsi_devices(ctrl_info); 7110 7111 return 0; 7112 } 7113 7114 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 7115 { 7116 unsigned int i; 7117 struct pqi_admin_queues *admin_queues; 7118 struct pqi_event_queue *event_queue; 7119 7120 admin_queues = &ctrl_info->admin_queues; 7121 admin_queues->iq_pi_copy = 0; 7122 admin_queues->oq_ci_copy = 0; 7123 writel(0, admin_queues->oq_pi); 7124 7125 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 7126 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 7127 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 7128 ctrl_info->queue_groups[i].oq_ci_copy = 0; 7129 7130 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 7131 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 7132 writel(0, ctrl_info->queue_groups[i].oq_pi); 7133 } 7134 7135 event_queue = &ctrl_info->event_queue; 7136 writel(0, event_queue->oq_pi); 7137 event_queue->oq_ci_copy = 0; 7138 } 7139 7140 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 7141 { 7142 int rc; 7143 7144 rc = pqi_force_sis_mode(ctrl_info); 7145 if (rc) 7146 return rc; 7147 7148 /* 7149 * Wait until the controller is ready to start accepting SIS 7150 * commands. 7151 */ 7152 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 7153 if (rc) 7154 return rc; 7155 7156 /* 7157 * Get the controller properties. This allows us to determine 7158 * whether or not it supports PQI mode. 7159 */ 7160 rc = sis_get_ctrl_properties(ctrl_info); 7161 if (rc) { 7162 dev_err(&ctrl_info->pci_dev->dev, 7163 "error obtaining controller properties\n"); 7164 return rc; 7165 } 7166 7167 rc = sis_get_pqi_capabilities(ctrl_info); 7168 if (rc) { 7169 dev_err(&ctrl_info->pci_dev->dev, 7170 "error obtaining controller capabilities\n"); 7171 return rc; 7172 } 7173 7174 /* 7175 * If the function we are about to call succeeds, the 7176 * controller will transition from legacy SIS mode 7177 * into PQI mode. 7178 */ 7179 rc = sis_init_base_struct_addr(ctrl_info); 7180 if (rc) { 7181 dev_err(&ctrl_info->pci_dev->dev, 7182 "error initializing PQI mode\n"); 7183 return rc; 7184 } 7185 7186 /* Wait for the controller to complete the SIS -> PQI transition. */ 7187 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 7188 if (rc) { 7189 dev_err(&ctrl_info->pci_dev->dev, 7190 "transition to PQI mode failed\n"); 7191 return rc; 7192 } 7193 7194 /* From here on, we are running in PQI mode. */ 7195 ctrl_info->pqi_mode_enabled = true; 7196 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7197 7198 pqi_reinit_queues(ctrl_info); 7199 7200 rc = pqi_create_admin_queues(ctrl_info); 7201 if (rc) { 7202 dev_err(&ctrl_info->pci_dev->dev, 7203 "error creating admin queues\n"); 7204 return rc; 7205 } 7206 7207 rc = pqi_create_queues(ctrl_info); 7208 if (rc) 7209 return rc; 7210 7211 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 7212 7213 ctrl_info->controller_online = true; 7214 pqi_ctrl_unblock_requests(ctrl_info); 7215 7216 rc = pqi_process_config_table(ctrl_info); 7217 if (rc) 7218 return rc; 7219 7220 pqi_start_heartbeat_timer(ctrl_info); 7221 7222 rc = pqi_enable_events(ctrl_info); 7223 if (rc) { 7224 dev_err(&ctrl_info->pci_dev->dev, 7225 "error enabling events\n"); 7226 return rc; 7227 } 7228 7229 rc = pqi_get_ctrl_firmware_version(ctrl_info); 7230 if (rc) { 7231 dev_err(&ctrl_info->pci_dev->dev, 7232 "error obtaining firmware version\n"); 7233 return rc; 7234 } 7235 7236 rc = pqi_set_diag_rescan(ctrl_info); 7237 if (rc) { 7238 dev_err(&ctrl_info->pci_dev->dev, 7239 "error enabling multi-lun rescan\n"); 7240 return rc; 7241 } 7242 7243 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 7244 if (rc) { 7245 dev_err(&ctrl_info->pci_dev->dev, 7246 "error updating host wellness\n"); 7247 return rc; 7248 } 7249 7250 pqi_schedule_update_time_worker(ctrl_info); 7251 7252 pqi_scan_scsi_devices(ctrl_info); 7253 7254 return 0; 7255 } 7256 7257 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 7258 u16 timeout) 7259 { 7260 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 7261 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 7262 } 7263 7264 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 7265 { 7266 int rc; 7267 u64 mask; 7268 7269 rc = pci_enable_device(ctrl_info->pci_dev); 7270 if (rc) { 7271 dev_err(&ctrl_info->pci_dev->dev, 7272 "failed to enable PCI device\n"); 7273 return rc; 7274 } 7275 7276 if (sizeof(dma_addr_t) > 4) 7277 mask = DMA_BIT_MASK(64); 7278 else 7279 mask = DMA_BIT_MASK(32); 7280 7281 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 7282 if (rc) { 7283 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 7284 goto disable_device; 7285 } 7286 7287 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 7288 if (rc) { 7289 dev_err(&ctrl_info->pci_dev->dev, 7290 "failed to obtain PCI resources\n"); 7291 goto disable_device; 7292 } 7293 7294 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 7295 ctrl_info->pci_dev, 0), 7296 sizeof(struct pqi_ctrl_registers)); 7297 if (!ctrl_info->iomem_base) { 7298 dev_err(&ctrl_info->pci_dev->dev, 7299 "failed to map memory for controller registers\n"); 7300 rc = -ENOMEM; 7301 goto release_regions; 7302 } 7303 7304 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 7305 7306 /* Increase the PCIe completion timeout. */ 7307 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 7308 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 7309 if (rc) { 7310 dev_err(&ctrl_info->pci_dev->dev, 7311 "failed to set PCIe completion timeout\n"); 7312 goto release_regions; 7313 } 7314 7315 /* Enable bus mastering. */ 7316 pci_set_master(ctrl_info->pci_dev); 7317 7318 ctrl_info->registers = ctrl_info->iomem_base; 7319 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 7320 7321 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 7322 7323 return 0; 7324 7325 release_regions: 7326 pci_release_regions(ctrl_info->pci_dev); 7327 disable_device: 7328 pci_disable_device(ctrl_info->pci_dev); 7329 7330 return rc; 7331 } 7332 7333 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 7334 { 7335 iounmap(ctrl_info->iomem_base); 7336 pci_release_regions(ctrl_info->pci_dev); 7337 if (pci_is_enabled(ctrl_info->pci_dev)) 7338 pci_disable_device(ctrl_info->pci_dev); 7339 pci_set_drvdata(ctrl_info->pci_dev, NULL); 7340 } 7341 7342 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 7343 { 7344 struct pqi_ctrl_info *ctrl_info; 7345 7346 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 7347 GFP_KERNEL, numa_node); 7348 if (!ctrl_info) 7349 return NULL; 7350 7351 mutex_init(&ctrl_info->scan_mutex); 7352 mutex_init(&ctrl_info->lun_reset_mutex); 7353 mutex_init(&ctrl_info->ofa_mutex); 7354 7355 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 7356 spin_lock_init(&ctrl_info->scsi_device_list_lock); 7357 7358 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 7359 atomic_set(&ctrl_info->num_interrupts, 0); 7360 7361 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 7362 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 7363 7364 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 7365 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 7366 7367 sema_init(&ctrl_info->sync_request_sem, 7368 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 7369 init_waitqueue_head(&ctrl_info->block_requests_wait); 7370 7371 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 7372 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 7373 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 7374 pqi_raid_bypass_retry_worker); 7375 7376 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 7377 ctrl_info->irq_mode = IRQ_MODE_NONE; 7378 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 7379 7380 return ctrl_info; 7381 } 7382 7383 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 7384 { 7385 kfree(ctrl_info); 7386 } 7387 7388 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 7389 { 7390 pqi_free_irqs(ctrl_info); 7391 pqi_disable_msix_interrupts(ctrl_info); 7392 } 7393 7394 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 7395 { 7396 pqi_stop_heartbeat_timer(ctrl_info); 7397 pqi_free_interrupts(ctrl_info); 7398 if (ctrl_info->queue_memory_base) 7399 dma_free_coherent(&ctrl_info->pci_dev->dev, 7400 ctrl_info->queue_memory_length, 7401 ctrl_info->queue_memory_base, 7402 ctrl_info->queue_memory_base_dma_handle); 7403 if (ctrl_info->admin_queue_memory_base) 7404 dma_free_coherent(&ctrl_info->pci_dev->dev, 7405 ctrl_info->admin_queue_memory_length, 7406 ctrl_info->admin_queue_memory_base, 7407 ctrl_info->admin_queue_memory_base_dma_handle); 7408 pqi_free_all_io_requests(ctrl_info); 7409 if (ctrl_info->error_buffer) 7410 dma_free_coherent(&ctrl_info->pci_dev->dev, 7411 ctrl_info->error_buffer_length, 7412 ctrl_info->error_buffer, 7413 ctrl_info->error_buffer_dma_handle); 7414 if (ctrl_info->iomem_base) 7415 pqi_cleanup_pci_init(ctrl_info); 7416 pqi_free_ctrl_info(ctrl_info); 7417 } 7418 7419 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 7420 { 7421 pqi_cancel_rescan_worker(ctrl_info); 7422 pqi_cancel_update_time_worker(ctrl_info); 7423 pqi_remove_all_scsi_devices(ctrl_info); 7424 pqi_unregister_scsi(ctrl_info); 7425 if (ctrl_info->pqi_mode_enabled) 7426 pqi_revert_to_sis_mode(ctrl_info); 7427 pqi_free_ctrl_resources(ctrl_info); 7428 } 7429 7430 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 7431 { 7432 pqi_cancel_update_time_worker(ctrl_info); 7433 pqi_cancel_rescan_worker(ctrl_info); 7434 pqi_wait_until_lun_reset_finished(ctrl_info); 7435 pqi_wait_until_scan_finished(ctrl_info); 7436 pqi_ctrl_ofa_start(ctrl_info); 7437 pqi_ctrl_block_requests(ctrl_info); 7438 pqi_ctrl_wait_until_quiesced(ctrl_info); 7439 pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); 7440 pqi_fail_io_queued_for_all_devices(ctrl_info); 7441 pqi_wait_until_inbound_queues_empty(ctrl_info); 7442 pqi_stop_heartbeat_timer(ctrl_info); 7443 ctrl_info->pqi_mode_enabled = false; 7444 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 7445 } 7446 7447 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 7448 { 7449 pqi_ofa_free_host_buffer(ctrl_info); 7450 ctrl_info->pqi_mode_enabled = true; 7451 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 7452 ctrl_info->controller_online = true; 7453 pqi_ctrl_unblock_requests(ctrl_info); 7454 pqi_start_heartbeat_timer(ctrl_info); 7455 pqi_schedule_update_time_worker(ctrl_info); 7456 pqi_clear_soft_reset_status(ctrl_info, 7457 PQI_SOFT_RESET_ABORT); 7458 pqi_scan_scsi_devices(ctrl_info); 7459 } 7460 7461 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, 7462 u32 total_size, u32 chunk_size) 7463 { 7464 u32 sg_count; 7465 u32 size; 7466 int i; 7467 struct pqi_sg_descriptor *mem_descriptor = NULL; 7468 struct device *dev; 7469 struct pqi_ofa_memory *ofap; 7470 7471 dev = &ctrl_info->pci_dev->dev; 7472 7473 sg_count = (total_size + chunk_size - 1); 7474 sg_count /= chunk_size; 7475 7476 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7477 7478 if (sg_count*chunk_size < total_size) 7479 goto out; 7480 7481 ctrl_info->pqi_ofa_chunk_virt_addr = 7482 kcalloc(sg_count, sizeof(void *), GFP_KERNEL); 7483 if (!ctrl_info->pqi_ofa_chunk_virt_addr) 7484 goto out; 7485 7486 for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { 7487 dma_addr_t dma_handle; 7488 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7490 dma_alloc_coherent(dev, chunk_size, &dma_handle, 7491 GFP_KERNEL); 7492 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7494 break; 7495 7496 mem_descriptor = &ofap->sg_descriptor[i]; 7497 put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); 7498 put_unaligned_le32 (chunk_size, &mem_descriptor->length); 7499 } 7500 7501 if (!size || size < total_size) 7502 goto out_free_chunks; 7503 7504 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 7505 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 7506 put_unaligned_le32(size, &ofap->bytes_allocated); 7507 7508 return 0; 7509 7510 out_free_chunks: 7511 while (--i >= 0) { 7512 mem_descriptor = &ofap->sg_descriptor[i]; 7513 dma_free_coherent(dev, chunk_size, 7514 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7515 get_unaligned_le64(&mem_descriptor->address)); 7516 } 7517 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7518 7519 out: 7520 put_unaligned_le32 (0, &ofap->bytes_allocated); 7521 return -ENOMEM; 7522 } 7523 7524 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 7525 { 7526 u32 total_size; 7527 u32 min_chunk_size; 7528 u32 chunk_sz; 7529 7530 total_size = le32_to_cpu( 7531 ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); 7532 min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; 7533 7534 for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) 7535 if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) 7536 return 0; 7537 7538 return -ENOMEM; 7539 } 7540 7541 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 7542 u32 bytes_requested) 7543 { 7544 struct pqi_ofa_memory *pqi_ofa_memory; 7545 struct device *dev; 7546 7547 dev = &ctrl_info->pci_dev->dev; 7548 pqi_ofa_memory = dma_alloc_coherent(dev, 7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7550 &ctrl_info->pqi_ofa_mem_dma_handle, 7551 GFP_KERNEL); 7552 7553 if (!pqi_ofa_memory) 7554 return; 7555 7556 put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); 7557 memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, 7558 sizeof(pqi_ofa_memory->signature)); 7559 pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); 7560 7561 ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; 7562 7563 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 7564 dev_err(dev, "Failed to allocate host buffer of size = %u", 7565 bytes_requested); 7566 } 7567 } 7568 7569 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 7570 { 7571 int i; 7572 struct pqi_sg_descriptor *mem_descriptor; 7573 struct pqi_ofa_memory *ofap; 7574 7575 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7576 7577 if (!ofap) 7578 return; 7579 7580 if (!ofap->bytes_allocated) 7581 goto out; 7582 7583 mem_descriptor = ofap->sg_descriptor; 7584 7585 for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); 7586 i++) { 7587 dma_free_coherent(&ctrl_info->pci_dev->dev, 7588 get_unaligned_le32(&mem_descriptor[i].length), 7589 ctrl_info->pqi_ofa_chunk_virt_addr[i], 7590 get_unaligned_le64(&mem_descriptor[i].address)); 7591 } 7592 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 7593 7594 out: 7595 dma_free_coherent(&ctrl_info->pci_dev->dev, 7596 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, 7597 ctrl_info->pqi_ofa_mem_dma_handle); 7598 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 7599 } 7600 7601 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 7602 { 7603 struct pqi_vendor_general_request request; 7604 size_t size; 7605 struct pqi_ofa_memory *ofap; 7606 7607 memset(&request, 0, sizeof(request)); 7608 7609 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 7610 7611 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7612 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7613 &request.header.iu_length); 7614 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 7615 &request.function_code); 7616 7617 if (ofap) { 7618 size = offsetof(struct pqi_ofa_memory, sg_descriptor) + 7619 get_unaligned_le16(&ofap->num_memory_descriptors) * 7620 sizeof(struct pqi_sg_descriptor); 7621 7622 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 7623 &request.data.ofa_memory_allocation.buffer_address); 7624 put_unaligned_le32(size, 7625 &request.data.ofa_memory_allocation.buffer_length); 7626 7627 } 7628 7629 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 7630 0, NULL, NO_TIMEOUT); 7631 } 7632 7633 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 7634 7635 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) 7636 { 7637 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); 7638 return pqi_ctrl_init_resume(ctrl_info); 7639 } 7640 7641 static void pqi_perform_lockup_action(void) 7642 { 7643 switch (pqi_lockup_action) { 7644 case PANIC: 7645 panic("FATAL: Smart Family Controller lockup detected"); 7646 break; 7647 case REBOOT: 7648 emergency_restart(); 7649 break; 7650 case NONE: 7651 default: 7652 break; 7653 } 7654 } 7655 7656 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 7657 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 7658 .status = SAM_STAT_CHECK_CONDITION, 7659 }; 7660 7661 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 7662 { 7663 unsigned int i; 7664 struct pqi_io_request *io_request; 7665 struct scsi_cmnd *scmd; 7666 7667 for (i = 0; i < ctrl_info->max_io_slots; i++) { 7668 io_request = &ctrl_info->io_request_pool[i]; 7669 if (atomic_read(&io_request->refcount) == 0) 7670 continue; 7671 7672 scmd = io_request->scmd; 7673 if (scmd) { 7674 set_host_byte(scmd, DID_NO_CONNECT); 7675 } else { 7676 io_request->status = -ENXIO; 7677 io_request->error_info = 7678 &pqi_ctrl_offline_raid_error_info; 7679 } 7680 7681 io_request->io_complete_callback(io_request, 7682 io_request->context); 7683 } 7684 } 7685 7686 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 7687 { 7688 pqi_perform_lockup_action(); 7689 pqi_stop_heartbeat_timer(ctrl_info); 7690 pqi_free_interrupts(ctrl_info); 7691 pqi_cancel_rescan_worker(ctrl_info); 7692 pqi_cancel_update_time_worker(ctrl_info); 7693 pqi_ctrl_wait_until_quiesced(ctrl_info); 7694 pqi_fail_all_outstanding_requests(ctrl_info); 7695 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 7696 pqi_ctrl_unblock_requests(ctrl_info); 7697 } 7698 7699 static void pqi_ctrl_offline_worker(struct work_struct *work) 7700 { 7701 struct pqi_ctrl_info *ctrl_info; 7702 7703 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 7704 pqi_take_ctrl_offline_deferred(ctrl_info); 7705 } 7706 7707 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 7708 { 7709 if (!ctrl_info->controller_online) 7710 return; 7711 7712 ctrl_info->controller_online = false; 7713 ctrl_info->pqi_mode_enabled = false; 7714 pqi_ctrl_block_requests(ctrl_info); 7715 if (!pqi_disable_ctrl_shutdown) 7716 sis_shutdown_ctrl(ctrl_info); 7717 pci_disable_device(ctrl_info->pci_dev); 7718 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 7719 schedule_work(&ctrl_info->ctrl_offline_work); 7720 } 7721 7722 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 7723 const struct pci_device_id *id) 7724 { 7725 char *ctrl_description; 7726 7727 if (id->driver_data) 7728 ctrl_description = (char *)id->driver_data; 7729 else 7730 ctrl_description = "Microsemi Smart Family Controller"; 7731 7732 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 7733 } 7734 7735 static int pqi_pci_probe(struct pci_dev *pci_dev, 7736 const struct pci_device_id *id) 7737 { 7738 int rc; 7739 int node, cp_node; 7740 struct pqi_ctrl_info *ctrl_info; 7741 7742 pqi_print_ctrl_info(pci_dev, id); 7743 7744 if (pqi_disable_device_id_wildcards && 7745 id->subvendor == PCI_ANY_ID && 7746 id->subdevice == PCI_ANY_ID) { 7747 dev_warn(&pci_dev->dev, 7748 "controller not probed because device ID wildcards are disabled\n"); 7749 return -ENODEV; 7750 } 7751 7752 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 7753 dev_warn(&pci_dev->dev, 7754 "controller device ID matched using wildcards\n"); 7755 7756 node = dev_to_node(&pci_dev->dev); 7757 if (node == NUMA_NO_NODE) { 7758 cp_node = cpu_to_node(0); 7759 if (cp_node == NUMA_NO_NODE) 7760 cp_node = 0; 7761 set_dev_node(&pci_dev->dev, cp_node); 7762 } 7763 7764 ctrl_info = pqi_alloc_ctrl_info(node); 7765 if (!ctrl_info) { 7766 dev_err(&pci_dev->dev, 7767 "failed to allocate controller info block\n"); 7768 return -ENOMEM; 7769 } 7770 7771 ctrl_info->pci_dev = pci_dev; 7772 7773 rc = pqi_pci_init(ctrl_info); 7774 if (rc) 7775 goto error; 7776 7777 rc = pqi_ctrl_init(ctrl_info); 7778 if (rc) 7779 goto error; 7780 7781 return 0; 7782 7783 error: 7784 pqi_remove_ctrl(ctrl_info); 7785 7786 return rc; 7787 } 7788 7789 static void pqi_pci_remove(struct pci_dev *pci_dev) 7790 { 7791 struct pqi_ctrl_info *ctrl_info; 7792 7793 ctrl_info = pci_get_drvdata(pci_dev); 7794 if (!ctrl_info) 7795 return; 7796 7797 ctrl_info->in_shutdown = true; 7798 7799 pqi_remove_ctrl(ctrl_info); 7800 } 7801 7802 static void pqi_shutdown(struct pci_dev *pci_dev) 7803 { 7804 int rc; 7805 struct pqi_ctrl_info *ctrl_info; 7806 7807 ctrl_info = pci_get_drvdata(pci_dev); 7808 if (!ctrl_info) 7809 goto error; 7810 7811 /* 7812 * Write all data in the controller's battery-backed cache to 7813 * storage. 7814 */ 7815 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 7816 pqi_free_interrupts(ctrl_info); 7817 pqi_reset(ctrl_info); 7818 if (rc == 0) 7819 return; 7820 7821 error: 7822 dev_warn(&pci_dev->dev, 7823 "unable to flush controller cache\n"); 7824 } 7825 7826 static void pqi_process_lockup_action_param(void) 7827 { 7828 unsigned int i; 7829 7830 if (!pqi_lockup_action_param) 7831 return; 7832 7833 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 7834 if (strcmp(pqi_lockup_action_param, 7835 pqi_lockup_actions[i].name) == 0) { 7836 pqi_lockup_action = pqi_lockup_actions[i].action; 7837 return; 7838 } 7839 } 7840 7841 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 7842 DRIVER_NAME_SHORT, pqi_lockup_action_param); 7843 } 7844 7845 static void pqi_process_module_params(void) 7846 { 7847 pqi_process_lockup_action_param(); 7848 } 7849 7850 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 7851 { 7852 struct pqi_ctrl_info *ctrl_info; 7853 7854 ctrl_info = pci_get_drvdata(pci_dev); 7855 7856 pqi_disable_events(ctrl_info); 7857 pqi_cancel_update_time_worker(ctrl_info); 7858 pqi_cancel_rescan_worker(ctrl_info); 7859 pqi_wait_until_scan_finished(ctrl_info); 7860 pqi_wait_until_lun_reset_finished(ctrl_info); 7861 pqi_wait_until_ofa_finished(ctrl_info); 7862 pqi_flush_cache(ctrl_info, SUSPEND); 7863 pqi_ctrl_block_requests(ctrl_info); 7864 pqi_ctrl_wait_until_quiesced(ctrl_info); 7865 pqi_wait_until_inbound_queues_empty(ctrl_info); 7866 pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); 7867 pqi_stop_heartbeat_timer(ctrl_info); 7868 7869 if (state.event == PM_EVENT_FREEZE) 7870 return 0; 7871 7872 pci_save_state(pci_dev); 7873 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 7874 7875 ctrl_info->controller_online = false; 7876 ctrl_info->pqi_mode_enabled = false; 7877 7878 return 0; 7879 } 7880 7881 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 7882 { 7883 int rc; 7884 struct pqi_ctrl_info *ctrl_info; 7885 7886 ctrl_info = pci_get_drvdata(pci_dev); 7887 7888 if (pci_dev->current_state != PCI_D0) { 7889 ctrl_info->max_hw_queue_index = 0; 7890 pqi_free_interrupts(ctrl_info); 7891 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 7892 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 7893 IRQF_SHARED, DRIVER_NAME_SHORT, 7894 &ctrl_info->queue_groups[0]); 7895 if (rc) { 7896 dev_err(&ctrl_info->pci_dev->dev, 7897 "irq %u init failed with error %d\n", 7898 pci_dev->irq, rc); 7899 return rc; 7900 } 7901 pqi_start_heartbeat_timer(ctrl_info); 7902 pqi_ctrl_unblock_requests(ctrl_info); 7903 return 0; 7904 } 7905 7906 pci_set_power_state(pci_dev, PCI_D0); 7907 pci_restore_state(pci_dev); 7908 7909 return pqi_ctrl_init_resume(ctrl_info); 7910 } 7911 7912 /* Define the PCI IDs for the controllers that we support. */ 7913 static const struct pci_device_id pqi_pci_id_table[] = { 7914 { 7915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7916 0x105b, 0x1211) 7917 }, 7918 { 7919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7920 0x105b, 0x1321) 7921 }, 7922 { 7923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7924 0x152d, 0x8a22) 7925 }, 7926 { 7927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7928 0x152d, 0x8a23) 7929 }, 7930 { 7931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7932 0x152d, 0x8a24) 7933 }, 7934 { 7935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7936 0x152d, 0x8a36) 7937 }, 7938 { 7939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7940 0x152d, 0x8a37) 7941 }, 7942 { 7943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7944 0x193d, 0x8460) 7945 }, 7946 { 7947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7948 0x193d, 0x8461) 7949 }, 7950 { 7951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7952 0x193d, 0xc460) 7953 }, 7954 { 7955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7956 0x193d, 0xc461) 7957 }, 7958 { 7959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7960 0x193d, 0xf460) 7961 }, 7962 { 7963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7964 0x193d, 0xf461) 7965 }, 7966 { 7967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7968 0x1bd4, 0x0045) 7969 }, 7970 { 7971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7972 0x1bd4, 0x0046) 7973 }, 7974 { 7975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7976 0x1bd4, 0x0047) 7977 }, 7978 { 7979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7980 0x1bd4, 0x0048) 7981 }, 7982 { 7983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7984 0x1bd4, 0x004a) 7985 }, 7986 { 7987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7988 0x1bd4, 0x004b) 7989 }, 7990 { 7991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7992 0x1bd4, 0x004c) 7993 }, 7994 { 7995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7996 0x19e5, 0xd227) 7997 }, 7998 { 7999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8000 0x19e5, 0xd228) 8001 }, 8002 { 8003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8004 0x19e5, 0xd229) 8005 }, 8006 { 8007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8008 0x19e5, 0xd22a) 8009 }, 8010 { 8011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8012 0x19e5, 0xd22b) 8013 }, 8014 { 8015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8016 0x19e5, 0xd22c) 8017 }, 8018 { 8019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8020 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 8021 }, 8022 { 8023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8024 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 8025 }, 8026 { 8027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8028 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 8029 }, 8030 { 8031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8032 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 8033 }, 8034 { 8035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8036 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 8037 }, 8038 { 8039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8040 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 8041 }, 8042 { 8043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8044 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 8045 }, 8046 { 8047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8048 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 8049 }, 8050 { 8051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8052 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 8053 }, 8054 { 8055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8056 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 8057 }, 8058 { 8059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8060 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 8061 }, 8062 { 8063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8064 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 8065 }, 8066 { 8067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8068 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 8069 }, 8070 { 8071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8072 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 8073 }, 8074 { 8075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8076 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 8077 }, 8078 { 8079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8080 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 8081 }, 8082 { 8083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8084 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 8085 }, 8086 { 8087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8088 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 8089 }, 8090 { 8091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8092 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 8093 }, 8094 { 8095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8096 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 8097 }, 8098 { 8099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8100 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 8101 }, 8102 { 8103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8104 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 8105 }, 8106 { 8107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8108 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 8109 }, 8110 { 8111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8112 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 8113 }, 8114 { 8115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8116 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 8117 }, 8118 { 8119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8120 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 8121 }, 8122 { 8123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8124 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 8125 }, 8126 { 8127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8128 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 8129 }, 8130 { 8131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8132 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 8133 }, 8134 { 8135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8136 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 8137 }, 8138 { 8139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8140 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 8141 }, 8142 { 8143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8144 PCI_VENDOR_ID_ADVANTECH, 0x8312) 8145 }, 8146 { 8147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8148 PCI_VENDOR_ID_DELL, 0x1fe0) 8149 }, 8150 { 8151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8152 PCI_VENDOR_ID_HP, 0x0600) 8153 }, 8154 { 8155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8156 PCI_VENDOR_ID_HP, 0x0601) 8157 }, 8158 { 8159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8160 PCI_VENDOR_ID_HP, 0x0602) 8161 }, 8162 { 8163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8164 PCI_VENDOR_ID_HP, 0x0603) 8165 }, 8166 { 8167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8168 PCI_VENDOR_ID_HP, 0x0609) 8169 }, 8170 { 8171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8172 PCI_VENDOR_ID_HP, 0x0650) 8173 }, 8174 { 8175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8176 PCI_VENDOR_ID_HP, 0x0651) 8177 }, 8178 { 8179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8180 PCI_VENDOR_ID_HP, 0x0652) 8181 }, 8182 { 8183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8184 PCI_VENDOR_ID_HP, 0x0653) 8185 }, 8186 { 8187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8188 PCI_VENDOR_ID_HP, 0x0654) 8189 }, 8190 { 8191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8192 PCI_VENDOR_ID_HP, 0x0655) 8193 }, 8194 { 8195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8196 PCI_VENDOR_ID_HP, 0x0700) 8197 }, 8198 { 8199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8200 PCI_VENDOR_ID_HP, 0x0701) 8201 }, 8202 { 8203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8204 PCI_VENDOR_ID_HP, 0x1001) 8205 }, 8206 { 8207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8208 PCI_VENDOR_ID_HP, 0x1100) 8209 }, 8210 { 8211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8212 PCI_VENDOR_ID_HP, 0x1101) 8213 }, 8214 { 8215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 8216 PCI_ANY_ID, PCI_ANY_ID) 8217 }, 8218 { 0 } 8219 }; 8220 8221 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 8222 8223 static struct pci_driver pqi_pci_driver = { 8224 .name = DRIVER_NAME_SHORT, 8225 .id_table = pqi_pci_id_table, 8226 .probe = pqi_pci_probe, 8227 .remove = pqi_pci_remove, 8228 .shutdown = pqi_shutdown, 8229 #if defined(CONFIG_PM) 8230 .suspend = pqi_suspend, 8231 .resume = pqi_resume, 8232 #endif 8233 }; 8234 8235 static int __init pqi_init(void) 8236 { 8237 int rc; 8238 8239 pr_info(DRIVER_NAME "\n"); 8240 8241 pqi_sas_transport_template = 8242 sas_attach_transport(&pqi_sas_transport_functions); 8243 if (!pqi_sas_transport_template) 8244 return -ENODEV; 8245 8246 pqi_process_module_params(); 8247 8248 rc = pci_register_driver(&pqi_pci_driver); 8249 if (rc) 8250 sas_release_transport(pqi_sas_transport_template); 8251 8252 return rc; 8253 } 8254 8255 static void __exit pqi_cleanup(void) 8256 { 8257 pci_unregister_driver(&pqi_pci_driver); 8258 sas_release_transport(pqi_sas_transport_template); 8259 } 8260 8261 module_init(pqi_init); 8262 module_exit(pqi_cleanup); 8263 8264 static void __attribute__((unused)) verify_structures(void) 8265 { 8266 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8267 sis_host_to_ctrl_doorbell) != 0x20); 8268 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8269 sis_interrupt_mask) != 0x34); 8270 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8271 sis_ctrl_to_host_doorbell) != 0x9c); 8272 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8273 sis_ctrl_to_host_doorbell_clear) != 0xa0); 8274 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8275 sis_driver_scratch) != 0xb0); 8276 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8277 sis_firmware_status) != 0xbc); 8278 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8279 sis_mailbox) != 0x1000); 8280 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 8281 pqi_registers) != 0x4000); 8282 8283 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8284 iu_type) != 0x0); 8285 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8286 iu_length) != 0x2); 8287 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8288 response_queue_id) != 0x4); 8289 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 8290 work_area) != 0x6); 8291 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 8292 8293 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8294 status) != 0x0); 8295 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8296 service_response) != 0x1); 8297 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8298 data_present) != 0x2); 8299 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8300 reserved) != 0x3); 8301 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8302 residual_count) != 0x4); 8303 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8304 data_length) != 0x8); 8305 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8306 reserved1) != 0xa); 8307 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 8308 data) != 0xc); 8309 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 8310 8311 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8312 data_in_result) != 0x0); 8313 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8314 data_out_result) != 0x1); 8315 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8316 reserved) != 0x2); 8317 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8318 status) != 0x5); 8319 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8320 status_qualifier) != 0x6); 8321 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8322 sense_data_length) != 0x8); 8323 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8324 response_data_length) != 0xa); 8325 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8326 data_in_transferred) != 0xc); 8327 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8328 data_out_transferred) != 0x10); 8329 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 8330 data) != 0x14); 8331 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 8332 8333 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8334 signature) != 0x0); 8335 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8336 function_and_status_code) != 0x8); 8337 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8338 max_admin_iq_elements) != 0x10); 8339 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8340 max_admin_oq_elements) != 0x11); 8341 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8342 admin_iq_element_length) != 0x12); 8343 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8344 admin_oq_element_length) != 0x13); 8345 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8346 max_reset_timeout) != 0x14); 8347 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8348 legacy_intx_status) != 0x18); 8349 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8350 legacy_intx_mask_set) != 0x1c); 8351 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8352 legacy_intx_mask_clear) != 0x20); 8353 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8354 device_status) != 0x40); 8355 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8356 admin_iq_pi_offset) != 0x48); 8357 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8358 admin_oq_ci_offset) != 0x50); 8359 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8360 admin_iq_element_array_addr) != 0x58); 8361 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8362 admin_oq_element_array_addr) != 0x60); 8363 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8364 admin_iq_ci_addr) != 0x68); 8365 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8366 admin_oq_pi_addr) != 0x70); 8367 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8368 admin_iq_num_elements) != 0x78); 8369 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8370 admin_oq_num_elements) != 0x79); 8371 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8372 admin_queue_int_msg_num) != 0x7a); 8373 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8374 device_error) != 0x80); 8375 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8376 error_details) != 0x88); 8377 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8378 device_reset) != 0x90); 8379 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 8380 power_action) != 0x94); 8381 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 8382 8383 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8384 header.iu_type) != 0); 8385 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8386 header.iu_length) != 2); 8387 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8388 header.work_area) != 6); 8389 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8390 request_id) != 8); 8391 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8392 function_code) != 10); 8393 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8394 data.report_device_capability.buffer_length) != 44); 8395 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8396 data.report_device_capability.sg_descriptor) != 48); 8397 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8398 data.create_operational_iq.queue_id) != 12); 8399 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8400 data.create_operational_iq.element_array_addr) != 16); 8401 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8402 data.create_operational_iq.ci_addr) != 24); 8403 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8404 data.create_operational_iq.num_elements) != 32); 8405 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8406 data.create_operational_iq.element_length) != 34); 8407 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8408 data.create_operational_iq.queue_protocol) != 36); 8409 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8410 data.create_operational_oq.queue_id) != 12); 8411 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8412 data.create_operational_oq.element_array_addr) != 16); 8413 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8414 data.create_operational_oq.pi_addr) != 24); 8415 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8416 data.create_operational_oq.num_elements) != 32); 8417 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8418 data.create_operational_oq.element_length) != 34); 8419 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8420 data.create_operational_oq.queue_protocol) != 36); 8421 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8422 data.create_operational_oq.int_msg_num) != 40); 8423 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8424 data.create_operational_oq.coalescing_count) != 42); 8425 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8426 data.create_operational_oq.min_coalescing_time) != 44); 8427 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8428 data.create_operational_oq.max_coalescing_time) != 48); 8429 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8430 data.delete_operational_queue.queue_id) != 12); 8431 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 8432 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8433 data.create_operational_iq) != 64 - 11); 8434 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8435 data.create_operational_oq) != 64 - 11); 8436 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8437 data.delete_operational_queue) != 64 - 11); 8438 8439 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8440 header.iu_type) != 0); 8441 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8442 header.iu_length) != 2); 8443 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8444 header.work_area) != 6); 8445 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8446 request_id) != 8); 8447 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8448 function_code) != 10); 8449 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8450 status) != 11); 8451 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8452 data.create_operational_iq.status_descriptor) != 12); 8453 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8454 data.create_operational_iq.iq_pi_offset) != 16); 8455 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8456 data.create_operational_oq.status_descriptor) != 12); 8457 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 8458 data.create_operational_oq.oq_ci_offset) != 16); 8459 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 8460 8461 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8462 header.iu_type) != 0); 8463 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8464 header.iu_length) != 2); 8465 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8466 header.response_queue_id) != 4); 8467 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8468 header.work_area) != 6); 8469 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8470 request_id) != 8); 8471 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8472 nexus_id) != 10); 8473 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8474 buffer_length) != 12); 8475 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8476 lun_number) != 16); 8477 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8478 protocol_specific) != 24); 8479 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8480 error_index) != 27); 8481 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8482 cdb) != 32); 8483 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 8484 sg_descriptors) != 64); 8485 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 8486 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8487 8488 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8489 header.iu_type) != 0); 8490 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8491 header.iu_length) != 2); 8492 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8493 header.response_queue_id) != 4); 8494 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8495 header.work_area) != 6); 8496 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8497 request_id) != 8); 8498 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8499 nexus_id) != 12); 8500 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8501 buffer_length) != 16); 8502 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8503 data_encryption_key_index) != 22); 8504 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8505 encrypt_tweak_lower) != 24); 8506 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8507 encrypt_tweak_upper) != 28); 8508 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8509 cdb) != 32); 8510 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8511 error_index) != 48); 8512 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8513 num_sg_descriptors) != 50); 8514 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8515 cdb_length) != 51); 8516 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8517 lun_number) != 52); 8518 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 8519 sg_descriptors) != 64); 8520 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 8521 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 8522 8523 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8524 header.iu_type) != 0); 8525 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8526 header.iu_length) != 2); 8527 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8528 request_id) != 8); 8529 BUILD_BUG_ON(offsetof(struct pqi_io_response, 8530 error_index) != 10); 8531 8532 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8533 header.iu_type) != 0); 8534 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8535 header.iu_length) != 2); 8536 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8537 header.response_queue_id) != 4); 8538 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8539 request_id) != 8); 8540 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8541 data.report_event_configuration.buffer_length) != 12); 8542 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8543 data.report_event_configuration.sg_descriptors) != 16); 8544 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8545 data.set_event_configuration.global_event_oq_id) != 10); 8546 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8547 data.set_event_configuration.buffer_length) != 12); 8548 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 8549 data.set_event_configuration.sg_descriptors) != 16); 8550 8551 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8552 max_inbound_iu_length) != 6); 8553 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 8554 max_outbound_iu_length) != 14); 8555 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 8556 8557 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8558 data_length) != 0); 8559 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8560 iq_arbitration_priority_support_bitmask) != 8); 8561 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8562 maximum_aw_a) != 9); 8563 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8564 maximum_aw_b) != 10); 8565 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8566 maximum_aw_c) != 11); 8567 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8568 max_inbound_queues) != 16); 8569 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8570 max_elements_per_iq) != 18); 8571 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8572 max_iq_element_length) != 24); 8573 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8574 min_iq_element_length) != 26); 8575 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8576 max_outbound_queues) != 30); 8577 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8578 max_elements_per_oq) != 32); 8579 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8580 intr_coalescing_time_granularity) != 34); 8581 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8582 max_oq_element_length) != 36); 8583 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8584 min_oq_element_length) != 38); 8585 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 8586 iu_layer_descriptors) != 64); 8587 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 8588 8589 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8590 event_type) != 0); 8591 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 8592 oq_id) != 2); 8593 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 8594 8595 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8596 num_event_descriptors) != 2); 8597 BUILD_BUG_ON(offsetof(struct pqi_event_config, 8598 descriptors) != 4); 8599 8600 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 8601 ARRAY_SIZE(pqi_supported_event_types)); 8602 8603 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8604 header.iu_type) != 0); 8605 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8606 header.iu_length) != 2); 8607 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8608 event_type) != 8); 8609 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8610 event_id) != 10); 8611 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8612 additional_event_id) != 12); 8613 BUILD_BUG_ON(offsetof(struct pqi_event_response, 8614 data) != 16); 8615 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 8616 8617 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8618 header.iu_type) != 0); 8619 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8620 header.iu_length) != 2); 8621 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8622 event_type) != 8); 8623 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8624 event_id) != 10); 8625 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 8626 additional_event_id) != 12); 8627 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 8628 8629 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8630 header.iu_type) != 0); 8631 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8632 header.iu_length) != 2); 8633 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8634 request_id) != 8); 8635 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8636 nexus_id) != 10); 8637 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8638 lun_number) != 16); 8639 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8640 protocol_specific) != 24); 8641 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8642 outbound_queue_id_to_manage) != 26); 8643 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8644 request_id_to_manage) != 28); 8645 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 8646 task_management_function) != 30); 8647 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 8648 8649 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8650 header.iu_type) != 0); 8651 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8652 header.iu_length) != 2); 8653 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8654 request_id) != 8); 8655 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8656 nexus_id) != 10); 8657 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8658 additional_response_info) != 12); 8659 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 8660 response_code) != 15); 8661 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 8662 8663 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8664 configured_logical_drive_count) != 0); 8665 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8666 configuration_signature) != 1); 8667 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8668 firmware_version) != 5); 8669 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8670 extended_logical_unit_count) != 154); 8671 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8672 firmware_build_number) != 190); 8673 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 8674 controller_mode) != 292); 8675 8676 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8677 phys_bay_in_box) != 115); 8678 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8679 device_type) != 120); 8680 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8681 redundant_path_present_map) != 1736); 8682 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8683 active_path_number) != 1738); 8684 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8685 alternate_paths_phys_connector) != 1739); 8686 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8687 alternate_paths_phys_box_on_port) != 1755); 8688 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 8689 current_queue_depth_limit) != 1796); 8690 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 8691 8692 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 8693 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 8694 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 8695 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8696 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 8697 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8698 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 8699 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 8700 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8701 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 8702 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 8703 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 8704 8705 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 8706 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 8707 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 8708 } 8709