1 /* 2 * Driver for the Micron P320 SSD 3 * Copyright (C) 2011 Micron Technology, Inc. 4 * 5 * Portions of this code were derived from works subjected to the 6 * following copyright: 7 * Copyright (C) 2009 Integrated Device Technology, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 */ 20 21 #include <linux/pci.h> 22 #include <linux/interrupt.h> 23 #include <linux/ata.h> 24 #include <linux/delay.h> 25 #include <linux/hdreg.h> 26 #include <linux/uaccess.h> 27 #include <linux/random.h> 28 #include <linux/smp.h> 29 #include <linux/compat.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/genhd.h> 33 #include <linux/blkdev.h> 34 #include <linux/bio.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/idr.h> 37 #include <linux/kthread.h> 38 #include <../drivers/ata/ahci.h> 39 #include <linux/export.h> 40 #include <linux/debugfs.h> 41 #include "mtip32xx.h" 42 43 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 44 45 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */ 46 #define AHCI_RX_FIS_SZ 0x100 47 #define AHCI_RX_FIS_OFFSET 0x0 48 #define AHCI_IDFY_SZ ATA_SECT_SIZE 49 #define AHCI_IDFY_OFFSET 0x400 50 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE 51 #define AHCI_SECTBUF_OFFSET 0x800 52 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE 53 #define AHCI_SMARTBUF_OFFSET 0xC00 54 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */ 55 #define BLOCK_DMA_ALLOC_SZ 4096 56 57 /* DMA region containing command table (should be 8192 bytes) */ 58 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr) 59 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ) 60 #define AHCI_CMD_TBL_OFFSET 0x0 61 62 /* DMA region per command (contains header and SGL) */ 63 #define AHCI_CMD_TBL_HDR_SZ 0x80 64 #define AHCI_CMD_TBL_HDR_OFFSET 0x0 65 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg)) 66 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ 67 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ) 68 69 70 #define HOST_CAP_NZDMA (1 << 19) 71 #define HOST_HSORG 0xFC 72 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24) 73 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16) 74 #define HSORG_HWREV 0xFF00 75 #define HSORG_STYLE 0x8 76 #define HSORG_SLOTGROUPS 0x7 77 78 #define PORT_COMMAND_ISSUE 0x38 79 #define PORT_SDBV 0x7C 80 81 #define PORT_OFFSET 0x100 82 #define PORT_MEM_SIZE 0x80 83 84 #define PORT_IRQ_ERR \ 85 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \ 86 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \ 87 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \ 88 PORT_IRQ_OVERFLOW) 89 #define PORT_IRQ_LEGACY \ 90 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) 91 #define PORT_IRQ_HANDLED \ 92 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \ 93 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \ 94 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY) 95 #define DEF_PORT_IRQ \ 96 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS) 97 98 /* product numbers */ 99 #define MTIP_PRODUCT_UNKNOWN 0x00 100 #define MTIP_PRODUCT_ASICFPGA 0x11 101 102 /* Device instance number, incremented each time a device is probed. */ 103 static int instance; 104 105 struct list_head online_list; 106 struct list_head removing_list; 107 spinlock_t dev_lock; 108 109 /* 110 * Global variable used to hold the major block device number 111 * allocated in mtip_init(). 112 */ 113 static int mtip_major; 114 static struct dentry *dfs_parent; 115 static struct dentry *dfs_device_status; 116 117 static u32 cpu_use[NR_CPUS]; 118 119 static DEFINE_SPINLOCK(rssd_index_lock); 120 static DEFINE_IDA(rssd_index_ida); 121 122 static int mtip_block_initialize(struct driver_data *dd); 123 124 #ifdef CONFIG_COMPAT 125 struct mtip_compat_ide_task_request_s { 126 __u8 io_ports[8]; 127 __u8 hob_ports[8]; 128 ide_reg_valid_t out_flags; 129 ide_reg_valid_t in_flags; 130 int data_phase; 131 int req_cmd; 132 compat_ulong_t out_size; 133 compat_ulong_t in_size; 134 }; 135 #endif 136 137 /* 138 * This function check_for_surprise_removal is called 139 * while card is removed from the system and it will 140 * read the vendor id from the configration space 141 * 142 * @pdev Pointer to the pci_dev structure. 143 * 144 * return value 145 * true if device removed, else false 146 */ 147 static bool mtip_check_surprise_removal(struct pci_dev *pdev) 148 { 149 u16 vendor_id = 0; 150 struct driver_data *dd = pci_get_drvdata(pdev); 151 152 if (dd->sr) 153 return true; 154 155 /* Read the vendorID from the configuration space */ 156 pci_read_config_word(pdev, 0x00, &vendor_id); 157 if (vendor_id == 0xFFFF) { 158 dd->sr = true; 159 if (dd->queue) 160 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); 161 else 162 dev_warn(&dd->pdev->dev, 163 "%s: dd->queue is NULL\n", __func__); 164 if (dd->port) { 165 set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags); 166 wake_up_interruptible(&dd->port->svc_wait); 167 } else 168 dev_warn(&dd->pdev->dev, 169 "%s: dd->port is NULL\n", __func__); 170 return true; /* device removed */ 171 } 172 173 return false; /* device present */ 174 } 175 176 /* 177 * Obtain an empty command slot. 178 * 179 * This function needs to be reentrant since it could be called 180 * at the same time on multiple CPUs. The allocation of the 181 * command slot must be atomic. 182 * 183 * @port Pointer to the port data structure. 184 * 185 * return value 186 * >= 0 Index of command slot obtained. 187 * -1 No command slots available. 188 */ 189 static int get_slot(struct mtip_port *port) 190 { 191 int slot, i; 192 unsigned int num_command_slots = port->dd->slot_groups * 32; 193 194 /* 195 * Try 10 times, because there is a small race here. 196 * that's ok, because it's still cheaper than a lock. 197 * 198 * Race: Since this section is not protected by lock, same bit 199 * could be chosen by different process contexts running in 200 * different processor. So instead of costly lock, we are going 201 * with loop. 202 */ 203 for (i = 0; i < 10; i++) { 204 slot = find_next_zero_bit(port->allocated, 205 num_command_slots, 1); 206 if ((slot < num_command_slots) && 207 (!test_and_set_bit(slot, port->allocated))) 208 return slot; 209 } 210 dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); 211 212 mtip_check_surprise_removal(port->dd->pdev); 213 return -1; 214 } 215 216 /* 217 * Release a command slot. 218 * 219 * @port Pointer to the port data structure. 220 * @tag Tag of command to release 221 * 222 * return value 223 * None 224 */ 225 static inline void release_slot(struct mtip_port *port, int tag) 226 { 227 smp_mb__before_clear_bit(); 228 clear_bit(tag, port->allocated); 229 smp_mb__after_clear_bit(); 230 } 231 232 /* 233 * IO completion function. 234 * 235 * This completion function is called by the driver ISR when a 236 * command that was issued by the kernel completes. It first calls the 237 * asynchronous completion function which normally calls back into the block 238 * layer passing the asynchronous callback data, then unmaps the 239 * scatter list associated with the completed command, and finally 240 * clears the allocated bit associated with the completed command. 241 * 242 * @port Pointer to the port data structure. 243 * @tag Tag of the command. 244 * @data Pointer to driver_data. 245 * @status Completion status. 246 * 247 * return value 248 * None 249 */ 250 static void mtip_async_complete(struct mtip_port *port, 251 int tag, 252 void *data, 253 int status) 254 { 255 struct mtip_cmd *command; 256 struct driver_data *dd = data; 257 int cb_status = status ? -EIO : 0; 258 259 if (unlikely(!dd) || unlikely(!port)) 260 return; 261 262 command = &port->commands[tag]; 263 264 if (unlikely(status == PORT_IRQ_TF_ERR)) { 265 dev_warn(&port->dd->pdev->dev, 266 "Command tag %d failed due to TFE\n", tag); 267 } 268 269 /* Upper layer callback */ 270 if (likely(command->async_callback)) 271 command->async_callback(command->async_data, cb_status); 272 273 command->async_callback = NULL; 274 command->comp_func = NULL; 275 276 /* Unmap the DMA scatter list entries */ 277 dma_unmap_sg(&dd->pdev->dev, 278 command->sg, 279 command->scatter_ents, 280 command->direction); 281 282 /* Clear the allocated and active bits for the command */ 283 atomic_set(&port->commands[tag].active, 0); 284 release_slot(port, tag); 285 286 up(&port->cmd_slot); 287 } 288 289 /* 290 * This function is called for clean the pending command in the 291 * command slot during the surprise removal of device and return 292 * error to the upper layer. 293 * 294 * @dd Pointer to the DRIVER_DATA structure. 295 * 296 * return value 297 * None 298 */ 299 static void mtip_command_cleanup(struct driver_data *dd) 300 { 301 int tag = 0; 302 struct mtip_cmd *cmd; 303 struct mtip_port *port = dd->port; 304 unsigned int num_cmd_slots = dd->slot_groups * 32; 305 306 if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) 307 return; 308 309 if (!port) 310 return; 311 312 cmd = &port->commands[MTIP_TAG_INTERNAL]; 313 if (atomic_read(&cmd->active)) 314 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & 315 (1 << MTIP_TAG_INTERNAL)) 316 if (cmd->comp_func) 317 cmd->comp_func(port, MTIP_TAG_INTERNAL, 318 cmd->comp_data, -ENODEV); 319 320 while (1) { 321 tag = find_next_bit(port->allocated, num_cmd_slots, tag); 322 if (tag >= num_cmd_slots) 323 break; 324 325 cmd = &port->commands[tag]; 326 if (atomic_read(&cmd->active)) 327 mtip_async_complete(port, tag, dd, -ENODEV); 328 } 329 330 set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); 331 } 332 333 /* 334 * Reset the HBA (without sleeping) 335 * 336 * @dd Pointer to the driver data structure. 337 * 338 * return value 339 * 0 The reset was successful. 340 * -1 The HBA Reset bit did not clear. 341 */ 342 static int mtip_hba_reset(struct driver_data *dd) 343 { 344 unsigned long timeout; 345 346 /* Set the reset bit */ 347 writel(HOST_RESET, dd->mmio + HOST_CTL); 348 349 /* Flush */ 350 readl(dd->mmio + HOST_CTL); 351 352 /* Spin for up to 2 seconds, waiting for reset acknowledgement */ 353 timeout = jiffies + msecs_to_jiffies(2000); 354 do { 355 mdelay(10); 356 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 357 return -1; 358 359 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) 360 && time_before(jiffies, timeout)); 361 362 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 363 return -1; 364 365 return 0; 366 } 367 368 /* 369 * Issue a command to the hardware. 370 * 371 * Set the appropriate bit in the s_active and Command Issue hardware 372 * registers, causing hardware command processing to begin. 373 * 374 * @port Pointer to the port structure. 375 * @tag The tag of the command to be issued. 376 * 377 * return value 378 * None 379 */ 380 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) 381 { 382 int group = tag >> 5; 383 384 atomic_set(&port->commands[tag].active, 1); 385 386 /* guard SACT and CI registers */ 387 spin_lock(&port->cmd_issue_lock[group]); 388 writel((1 << MTIP_TAG_BIT(tag)), 389 port->s_active[MTIP_TAG_INDEX(tag)]); 390 writel((1 << MTIP_TAG_BIT(tag)), 391 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 392 spin_unlock(&port->cmd_issue_lock[group]); 393 394 /* Set the command's timeout value.*/ 395 port->commands[tag].comp_time = jiffies + msecs_to_jiffies( 396 MTIP_NCQ_COMMAND_TIMEOUT_MS); 397 } 398 399 /* 400 * Enable/disable the reception of FIS 401 * 402 * @port Pointer to the port data structure 403 * @enable 1 to enable, 0 to disable 404 * 405 * return value 406 * Previous state: 1 enabled, 0 disabled 407 */ 408 static int mtip_enable_fis(struct mtip_port *port, int enable) 409 { 410 u32 tmp; 411 412 /* enable FIS reception */ 413 tmp = readl(port->mmio + PORT_CMD); 414 if (enable) 415 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 416 else 417 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 418 419 /* Flush */ 420 readl(port->mmio + PORT_CMD); 421 422 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX)); 423 } 424 425 /* 426 * Enable/disable the DMA engine 427 * 428 * @port Pointer to the port data structure 429 * @enable 1 to enable, 0 to disable 430 * 431 * return value 432 * Previous state: 1 enabled, 0 disabled. 433 */ 434 static int mtip_enable_engine(struct mtip_port *port, int enable) 435 { 436 u32 tmp; 437 438 /* enable FIS reception */ 439 tmp = readl(port->mmio + PORT_CMD); 440 if (enable) 441 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD); 442 else 443 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD); 444 445 readl(port->mmio + PORT_CMD); 446 return (((tmp & PORT_CMD_START) == PORT_CMD_START)); 447 } 448 449 /* 450 * Enables the port DMA engine and FIS reception. 451 * 452 * return value 453 * None 454 */ 455 static inline void mtip_start_port(struct mtip_port *port) 456 { 457 /* Enable FIS reception */ 458 mtip_enable_fis(port, 1); 459 460 /* Enable the DMA engine */ 461 mtip_enable_engine(port, 1); 462 } 463 464 /* 465 * Deinitialize a port by disabling port interrupts, the DMA engine, 466 * and FIS reception. 467 * 468 * @port Pointer to the port structure 469 * 470 * return value 471 * None 472 */ 473 static inline void mtip_deinit_port(struct mtip_port *port) 474 { 475 /* Disable interrupts on this port */ 476 writel(0, port->mmio + PORT_IRQ_MASK); 477 478 /* Disable the DMA engine */ 479 mtip_enable_engine(port, 0); 480 481 /* Disable FIS reception */ 482 mtip_enable_fis(port, 0); 483 } 484 485 /* 486 * Initialize a port. 487 * 488 * This function deinitializes the port by calling mtip_deinit_port() and 489 * then initializes it by setting the command header and RX FIS addresses, 490 * clearing the SError register and any pending port interrupts before 491 * re-enabling the default set of port interrupts. 492 * 493 * @port Pointer to the port structure. 494 * 495 * return value 496 * None 497 */ 498 static void mtip_init_port(struct mtip_port *port) 499 { 500 int i; 501 mtip_deinit_port(port); 502 503 /* Program the command list base and FIS base addresses */ 504 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) { 505 writel((port->command_list_dma >> 16) >> 16, 506 port->mmio + PORT_LST_ADDR_HI); 507 writel((port->rxfis_dma >> 16) >> 16, 508 port->mmio + PORT_FIS_ADDR_HI); 509 } 510 511 writel(port->command_list_dma & 0xFFFFFFFF, 512 port->mmio + PORT_LST_ADDR); 513 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR); 514 515 /* Clear SError */ 516 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR); 517 518 /* reset the completed registers.*/ 519 for (i = 0; i < port->dd->slot_groups; i++) 520 writel(0xFFFFFFFF, port->completed[i]); 521 522 /* Clear any pending interrupts for this port */ 523 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT); 524 525 /* Clear any pending interrupts on the HBA. */ 526 writel(readl(port->dd->mmio + HOST_IRQ_STAT), 527 port->dd->mmio + HOST_IRQ_STAT); 528 529 /* Enable port interrupts */ 530 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK); 531 } 532 533 /* 534 * Restart a port 535 * 536 * @port Pointer to the port data structure. 537 * 538 * return value 539 * None 540 */ 541 static void mtip_restart_port(struct mtip_port *port) 542 { 543 unsigned long timeout; 544 545 /* Disable the DMA engine */ 546 mtip_enable_engine(port, 0); 547 548 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */ 549 timeout = jiffies + msecs_to_jiffies(500); 550 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) 551 && time_before(jiffies, timeout)) 552 ; 553 554 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 555 return; 556 557 /* 558 * Chip quirk: escalate to hba reset if 559 * PxCMD.CR not clear after 500 ms 560 */ 561 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) { 562 dev_warn(&port->dd->pdev->dev, 563 "PxCMD.CR not clear, escalating reset\n"); 564 565 if (mtip_hba_reset(port->dd)) 566 dev_err(&port->dd->pdev->dev, 567 "HBA reset escalation failed.\n"); 568 569 /* 30 ms delay before com reset to quiesce chip */ 570 mdelay(30); 571 } 572 573 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n"); 574 575 /* Set PxSCTL.DET */ 576 writel(readl(port->mmio + PORT_SCR_CTL) | 577 1, port->mmio + PORT_SCR_CTL); 578 readl(port->mmio + PORT_SCR_CTL); 579 580 /* Wait 1 ms to quiesce chip function */ 581 timeout = jiffies + msecs_to_jiffies(1); 582 while (time_before(jiffies, timeout)) 583 ; 584 585 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 586 return; 587 588 /* Clear PxSCTL.DET */ 589 writel(readl(port->mmio + PORT_SCR_CTL) & ~1, 590 port->mmio + PORT_SCR_CTL); 591 readl(port->mmio + PORT_SCR_CTL); 592 593 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */ 594 timeout = jiffies + msecs_to_jiffies(500); 595 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 596 && time_before(jiffies, timeout)) 597 ; 598 599 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 600 return; 601 602 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 603 dev_warn(&port->dd->pdev->dev, 604 "COM reset failed\n"); 605 606 mtip_init_port(port); 607 mtip_start_port(port); 608 609 } 610 611 static int mtip_device_reset(struct driver_data *dd) 612 { 613 int rv = 0; 614 615 if (mtip_check_surprise_removal(dd->pdev)) 616 return 0; 617 618 if (mtip_hba_reset(dd) < 0) 619 rv = -EFAULT; 620 621 mdelay(1); 622 mtip_init_port(dd->port); 623 mtip_start_port(dd->port); 624 625 /* Enable interrupts on the HBA. */ 626 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 627 dd->mmio + HOST_CTL); 628 return rv; 629 } 630 631 /* 632 * Helper function for tag logging 633 */ 634 static void print_tags(struct driver_data *dd, 635 char *msg, 636 unsigned long *tagbits, 637 int cnt) 638 { 639 unsigned char tagmap[128]; 640 int group, tagmap_len = 0; 641 642 memset(tagmap, 0, sizeof(tagmap)); 643 for (group = SLOTBITS_IN_LONGS; group > 0; group--) 644 tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ", 645 tagbits[group-1]); 646 dev_warn(&dd->pdev->dev, 647 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); 648 } 649 650 /* 651 * Called periodically to see if any read/write commands are 652 * taking too long to complete. 653 * 654 * @data Pointer to the PORT data structure. 655 * 656 * return value 657 * None 658 */ 659 static void mtip_timeout_function(unsigned long int data) 660 { 661 struct mtip_port *port = (struct mtip_port *) data; 662 struct host_to_dev_fis *fis; 663 struct mtip_cmd *command; 664 int tag, cmdto_cnt = 0; 665 unsigned int bit, group; 666 unsigned int num_command_slots; 667 unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; 668 669 if (unlikely(!port)) 670 return; 671 672 if (unlikely(port->dd->sr)) 673 return; 674 675 if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { 676 mod_timer(&port->cmd_timer, 677 jiffies + msecs_to_jiffies(30000)); 678 return; 679 } 680 /* clear the tag accumulator */ 681 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 682 num_command_slots = port->dd->slot_groups * 32; 683 684 for (tag = 0; tag < num_command_slots; tag++) { 685 /* 686 * Skip internal command slot as it has 687 * its own timeout mechanism 688 */ 689 if (tag == MTIP_TAG_INTERNAL) 690 continue; 691 692 if (atomic_read(&port->commands[tag].active) && 693 (time_after(jiffies, port->commands[tag].comp_time))) { 694 group = tag >> 5; 695 bit = tag & 0x1F; 696 697 command = &port->commands[tag]; 698 fis = (struct host_to_dev_fis *) command->command; 699 700 set_bit(tag, tagaccum); 701 cmdto_cnt++; 702 if (cmdto_cnt == 1) 703 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 704 705 /* 706 * Clear the completed bit. This should prevent 707 * any interrupt handlers from trying to retire 708 * the command. 709 */ 710 writel(1 << bit, port->completed[group]); 711 712 /* Call the async completion callback. */ 713 if (likely(command->async_callback)) 714 command->async_callback(command->async_data, 715 -EIO); 716 command->async_callback = NULL; 717 command->comp_func = NULL; 718 719 /* Unmap the DMA scatter list entries */ 720 dma_unmap_sg(&port->dd->pdev->dev, 721 command->sg, 722 command->scatter_ents, 723 command->direction); 724 725 /* 726 * Clear the allocated bit and active tag for the 727 * command. 728 */ 729 atomic_set(&port->commands[tag].active, 0); 730 release_slot(port, tag); 731 732 up(&port->cmd_slot); 733 } 734 } 735 736 if (cmdto_cnt) { 737 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); 738 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 739 mtip_device_reset(port->dd); 740 wake_up_interruptible(&port->svc_wait); 741 } 742 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 743 } 744 745 if (port->ic_pause_timer) { 746 to = port->ic_pause_timer + msecs_to_jiffies(1000); 747 if (time_after(jiffies, to)) { 748 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { 749 port->ic_pause_timer = 0; 750 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 751 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 752 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 753 wake_up_interruptible(&port->svc_wait); 754 } 755 756 757 } 758 } 759 760 /* Restart the timer */ 761 mod_timer(&port->cmd_timer, 762 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); 763 } 764 765 /* 766 * Internal command completion callback function. 767 * 768 * This function is normally called by the driver ISR when an internal 769 * command completed. This function signals the command completion by 770 * calling complete(). 771 * 772 * @port Pointer to the port data structure. 773 * @tag Tag of the command that has completed. 774 * @data Pointer to a completion structure. 775 * @status Completion status. 776 * 777 * return value 778 * None 779 */ 780 static void mtip_completion(struct mtip_port *port, 781 int tag, 782 void *data, 783 int status) 784 { 785 struct mtip_cmd *command = &port->commands[tag]; 786 struct completion *waiting = data; 787 if (unlikely(status == PORT_IRQ_TF_ERR)) 788 dev_warn(&port->dd->pdev->dev, 789 "Internal command %d completed with TFE\n", tag); 790 791 command->async_callback = NULL; 792 command->comp_func = NULL; 793 794 complete(waiting); 795 } 796 797 static void mtip_null_completion(struct mtip_port *port, 798 int tag, 799 void *data, 800 int status) 801 { 802 return; 803 } 804 805 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 806 dma_addr_t buffer_dma, unsigned int sectors); 807 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 808 struct smart_attr *attrib); 809 /* 810 * Handle an error. 811 * 812 * @dd Pointer to the DRIVER_DATA structure. 813 * 814 * return value 815 * None 816 */ 817 static void mtip_handle_tfe(struct driver_data *dd) 818 { 819 int group, tag, bit, reissue, rv; 820 struct mtip_port *port; 821 struct mtip_cmd *cmd; 822 u32 completed; 823 struct host_to_dev_fis *fis; 824 unsigned long tagaccum[SLOTBITS_IN_LONGS]; 825 unsigned int cmd_cnt = 0; 826 unsigned char *buf; 827 char *fail_reason = NULL; 828 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0; 829 830 dev_warn(&dd->pdev->dev, "Taskfile error\n"); 831 832 port = dd->port; 833 834 /* Stop the timer to prevent command timeouts. */ 835 del_timer(&port->cmd_timer); 836 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 837 838 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 839 test_bit(MTIP_TAG_INTERNAL, port->allocated)) { 840 cmd = &port->commands[MTIP_TAG_INTERNAL]; 841 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); 842 843 atomic_inc(&cmd->active); /* active > 1 indicates error */ 844 if (cmd->comp_data && cmd->comp_func) { 845 cmd->comp_func(port, MTIP_TAG_INTERNAL, 846 cmd->comp_data, PORT_IRQ_TF_ERR); 847 } 848 goto handle_tfe_exit; 849 } 850 851 /* clear the tag accumulator */ 852 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 853 854 /* Loop through all the groups */ 855 for (group = 0; group < dd->slot_groups; group++) { 856 completed = readl(port->completed[group]); 857 858 /* clear completed status register in the hardware.*/ 859 writel(completed, port->completed[group]); 860 861 /* Process successfully completed commands */ 862 for (bit = 0; bit < 32 && completed; bit++) { 863 if (!(completed & (1<<bit))) 864 continue; 865 tag = (group << 5) + bit; 866 867 /* Skip the internal command slot */ 868 if (tag == MTIP_TAG_INTERNAL) 869 continue; 870 871 cmd = &port->commands[tag]; 872 if (likely(cmd->comp_func)) { 873 set_bit(tag, tagaccum); 874 cmd_cnt++; 875 atomic_set(&cmd->active, 0); 876 cmd->comp_func(port, 877 tag, 878 cmd->comp_data, 879 0); 880 } else { 881 dev_err(&port->dd->pdev->dev, 882 "Missing completion func for tag %d", 883 tag); 884 if (mtip_check_surprise_removal(dd->pdev)) { 885 /* don't proceed further */ 886 return; 887 } 888 } 889 } 890 } 891 892 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt); 893 894 /* Restart the port */ 895 mdelay(20); 896 mtip_restart_port(port); 897 898 /* Trying to determine the cause of the error */ 899 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 900 dd->port->log_buf, 901 dd->port->log_buf_dma, 1); 902 if (rv) { 903 dev_warn(&dd->pdev->dev, 904 "Error in READ LOG EXT (10h) command\n"); 905 /* non-critical error, don't fail the load */ 906 } else { 907 buf = (unsigned char *)dd->port->log_buf; 908 if (buf[259] & 0x1) { 909 dev_info(&dd->pdev->dev, 910 "Write protect bit is set.\n"); 911 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 912 fail_all_ncq_write = 1; 913 fail_reason = "write protect"; 914 } 915 if (buf[288] == 0xF7) { 916 dev_info(&dd->pdev->dev, 917 "Exceeded Tmax, drive in thermal shutdown.\n"); 918 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 919 fail_all_ncq_cmds = 1; 920 fail_reason = "thermal shutdown"; 921 } 922 if (buf[288] == 0xBF) { 923 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); 924 dev_info(&dd->pdev->dev, 925 "Drive indicates rebuild has failed. Secure erase required.\n"); 926 fail_all_ncq_cmds = 1; 927 fail_reason = "rebuild failed"; 928 } 929 } 930 931 /* clear the tag accumulator */ 932 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 933 934 /* Loop through all the groups */ 935 for (group = 0; group < dd->slot_groups; group++) { 936 for (bit = 0; bit < 32; bit++) { 937 reissue = 1; 938 tag = (group << 5) + bit; 939 cmd = &port->commands[tag]; 940 941 /* If the active bit is set re-issue the command */ 942 if (atomic_read(&cmd->active) == 0) 943 continue; 944 945 fis = (struct host_to_dev_fis *)cmd->command; 946 947 /* Should re-issue? */ 948 if (tag == MTIP_TAG_INTERNAL || 949 fis->command == ATA_CMD_SET_FEATURES) 950 reissue = 0; 951 else { 952 if (fail_all_ncq_cmds || 953 (fail_all_ncq_write && 954 fis->command == ATA_CMD_FPDMA_WRITE)) { 955 dev_warn(&dd->pdev->dev, 956 " Fail: %s w/tag %d [%s].\n", 957 fis->command == ATA_CMD_FPDMA_WRITE ? 958 "write" : "read", 959 tag, 960 fail_reason != NULL ? 961 fail_reason : "unknown"); 962 atomic_set(&cmd->active, 0); 963 if (cmd->comp_func) { 964 cmd->comp_func(port, tag, 965 cmd->comp_data, 966 -ENODATA); 967 } 968 continue; 969 } 970 } 971 972 /* 973 * First check if this command has 974 * exceeded its retries. 975 */ 976 if (reissue && (cmd->retries-- > 0)) { 977 978 set_bit(tag, tagaccum); 979 980 /* Re-issue the command. */ 981 mtip_issue_ncq_command(port, tag); 982 983 continue; 984 } 985 986 /* Retire a command that will not be reissued */ 987 dev_warn(&port->dd->pdev->dev, 988 "retiring tag %d\n", tag); 989 atomic_set(&cmd->active, 0); 990 991 if (cmd->comp_func) 992 cmd->comp_func( 993 port, 994 tag, 995 cmd->comp_data, 996 PORT_IRQ_TF_ERR); 997 else 998 dev_warn(&port->dd->pdev->dev, 999 "Bad completion for tag %d\n", 1000 tag); 1001 } 1002 } 1003 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); 1004 1005 handle_tfe_exit: 1006 /* clear eh_active */ 1007 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 1008 wake_up_interruptible(&port->svc_wait); 1009 1010 mod_timer(&port->cmd_timer, 1011 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); 1012 } 1013 1014 /* 1015 * Handle a set device bits interrupt 1016 */ 1017 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, 1018 u32 completed) 1019 { 1020 struct driver_data *dd = port->dd; 1021 int tag, bit; 1022 struct mtip_cmd *command; 1023 1024 if (!completed) { 1025 WARN_ON_ONCE(!completed); 1026 return; 1027 } 1028 /* clear completed status register in the hardware.*/ 1029 writel(completed, port->completed[group]); 1030 1031 /* Process completed commands. */ 1032 for (bit = 0; (bit < 32) && completed; bit++) { 1033 if (completed & 0x01) { 1034 tag = (group << 5) | bit; 1035 1036 /* skip internal command slot. */ 1037 if (unlikely(tag == MTIP_TAG_INTERNAL)) 1038 continue; 1039 1040 command = &port->commands[tag]; 1041 /* make internal callback */ 1042 if (likely(command->comp_func)) { 1043 command->comp_func( 1044 port, 1045 tag, 1046 command->comp_data, 1047 0); 1048 } else { 1049 dev_dbg(&dd->pdev->dev, 1050 "Null completion for tag %d", 1051 tag); 1052 1053 if (mtip_check_surprise_removal( 1054 dd->pdev)) { 1055 return; 1056 } 1057 } 1058 } 1059 completed >>= 1; 1060 } 1061 1062 /* If last, re-enable interrupts */ 1063 if (atomic_dec_return(&dd->irq_workers_active) == 0) 1064 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT); 1065 } 1066 1067 /* 1068 * Process legacy pio and d2h interrupts 1069 */ 1070 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) 1071 { 1072 struct mtip_port *port = dd->port; 1073 struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; 1074 1075 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 1076 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1077 & (1 << MTIP_TAG_INTERNAL))) { 1078 if (cmd->comp_func) { 1079 cmd->comp_func(port, 1080 MTIP_TAG_INTERNAL, 1081 cmd->comp_data, 1082 0); 1083 return; 1084 } 1085 } 1086 1087 return; 1088 } 1089 1090 /* 1091 * Demux and handle errors 1092 */ 1093 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) 1094 { 1095 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) 1096 mtip_handle_tfe(dd); 1097 1098 if (unlikely(port_stat & PORT_IRQ_CONNECT)) { 1099 dev_warn(&dd->pdev->dev, 1100 "Clearing PxSERR.DIAG.x\n"); 1101 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR); 1102 } 1103 1104 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) { 1105 dev_warn(&dd->pdev->dev, 1106 "Clearing PxSERR.DIAG.n\n"); 1107 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR); 1108 } 1109 1110 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) { 1111 dev_warn(&dd->pdev->dev, 1112 "Port stat errors %x unhandled\n", 1113 (port_stat & ~PORT_IRQ_HANDLED)); 1114 } 1115 } 1116 1117 static inline irqreturn_t mtip_handle_irq(struct driver_data *data) 1118 { 1119 struct driver_data *dd = (struct driver_data *) data; 1120 struct mtip_port *port = dd->port; 1121 u32 hba_stat, port_stat; 1122 int rv = IRQ_NONE; 1123 int do_irq_enable = 1, i, workers; 1124 struct mtip_work *twork; 1125 1126 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 1127 if (hba_stat) { 1128 rv = IRQ_HANDLED; 1129 1130 /* Acknowledge the interrupt status on the port.*/ 1131 port_stat = readl(port->mmio + PORT_IRQ_STAT); 1132 writel(port_stat, port->mmio + PORT_IRQ_STAT); 1133 1134 /* Demux port status */ 1135 if (likely(port_stat & PORT_IRQ_SDB_FIS)) { 1136 do_irq_enable = 0; 1137 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0); 1138 1139 /* Start at 1: group zero is always local? */ 1140 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; 1141 i++) { 1142 twork = &dd->work[i]; 1143 twork->completed = readl(port->completed[i]); 1144 if (twork->completed) 1145 workers++; 1146 } 1147 1148 atomic_set(&dd->irq_workers_active, workers); 1149 if (workers) { 1150 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) { 1151 twork = &dd->work[i]; 1152 if (twork->completed) 1153 queue_work_on( 1154 twork->cpu_binding, 1155 dd->isr_workq, 1156 &twork->work); 1157 } 1158 1159 if (likely(dd->work[0].completed)) 1160 mtip_workq_sdbfx(port, 0, 1161 dd->work[0].completed); 1162 1163 } else { 1164 /* 1165 * Chip quirk: SDB interrupt but nothing 1166 * to complete 1167 */ 1168 do_irq_enable = 1; 1169 } 1170 } 1171 1172 if (unlikely(port_stat & PORT_IRQ_ERR)) { 1173 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 1174 /* don't proceed further */ 1175 return IRQ_HANDLED; 1176 } 1177 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1178 &dd->dd_flag)) 1179 return rv; 1180 1181 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); 1182 } 1183 1184 if (unlikely(port_stat & PORT_IRQ_LEGACY)) 1185 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY); 1186 } 1187 1188 /* acknowledge interrupt */ 1189 if (unlikely(do_irq_enable)) 1190 writel(hba_stat, dd->mmio + HOST_IRQ_STAT); 1191 1192 return rv; 1193 } 1194 1195 /* 1196 * HBA interrupt subroutine. 1197 * 1198 * @irq IRQ number. 1199 * @instance Pointer to the driver data structure. 1200 * 1201 * return value 1202 * IRQ_HANDLED A HBA interrupt was pending and handled. 1203 * IRQ_NONE This interrupt was not for the HBA. 1204 */ 1205 static irqreturn_t mtip_irq_handler(int irq, void *instance) 1206 { 1207 struct driver_data *dd = instance; 1208 1209 return mtip_handle_irq(dd); 1210 } 1211 1212 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) 1213 { 1214 atomic_set(&port->commands[tag].active, 1); 1215 writel(1 << MTIP_TAG_BIT(tag), 1216 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 1217 } 1218 1219 static bool mtip_pause_ncq(struct mtip_port *port, 1220 struct host_to_dev_fis *fis) 1221 { 1222 struct host_to_dev_fis *reply; 1223 unsigned long task_file_data; 1224 1225 reply = port->rxfis + RX_FIS_D2H_REG; 1226 task_file_data = readl(port->mmio+PORT_TFDATA); 1227 1228 if (fis->command == ATA_CMD_SEC_ERASE_UNIT) 1229 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1230 1231 if ((task_file_data & 1)) 1232 return false; 1233 1234 if (fis->command == ATA_CMD_SEC_ERASE_PREP) { 1235 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1236 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1237 port->ic_pause_timer = jiffies; 1238 return true; 1239 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && 1240 (fis->features == 0x03)) { 1241 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1242 port->ic_pause_timer = jiffies; 1243 return true; 1244 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) || 1245 ((fis->command == 0xFC) && 1246 (fis->features == 0x27 || fis->features == 0x72 || 1247 fis->features == 0x62 || fis->features == 0x26))) { 1248 /* Com reset after secure erase or lowlevel format */ 1249 mtip_restart_port(port); 1250 return false; 1251 } 1252 1253 return false; 1254 } 1255 1256 /* 1257 * Wait for port to quiesce 1258 * 1259 * @port Pointer to port data structure 1260 * @timeout Max duration to wait (ms) 1261 * 1262 * return value 1263 * 0 Success 1264 * -EBUSY Commands still active 1265 */ 1266 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) 1267 { 1268 unsigned long to; 1269 unsigned int n; 1270 unsigned int active = 1; 1271 1272 to = jiffies + msecs_to_jiffies(timeout); 1273 do { 1274 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && 1275 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 1276 msleep(20); 1277 continue; /* svc thd is actively issuing commands */ 1278 } 1279 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 1280 return -EFAULT; 1281 /* 1282 * Ignore s_active bit 0 of array element 0. 1283 * This bit will always be set 1284 */ 1285 active = readl(port->s_active[0]) & 0xFFFFFFFE; 1286 for (n = 1; n < port->dd->slot_groups; n++) 1287 active |= readl(port->s_active[n]); 1288 1289 if (!active) 1290 break; 1291 1292 msleep(20); 1293 } while (time_before(jiffies, to)); 1294 1295 return active ? -EBUSY : 0; 1296 } 1297 1298 /* 1299 * Execute an internal command and wait for the completion. 1300 * 1301 * @port Pointer to the port data structure. 1302 * @fis Pointer to the FIS that describes the command. 1303 * @fis_len Length in WORDS of the FIS. 1304 * @buffer DMA accessible for command data. 1305 * @buf_len Length, in bytes, of the data buffer. 1306 * @opts Command header options, excluding the FIS length 1307 * and the number of PRD entries. 1308 * @timeout Time in ms to wait for the command to complete. 1309 * 1310 * return value 1311 * 0 Command completed successfully. 1312 * -EFAULT The buffer address is not correctly aligned. 1313 * -EBUSY Internal command or other IO in progress. 1314 * -EAGAIN Time out waiting for command to complete. 1315 */ 1316 static int mtip_exec_internal_command(struct mtip_port *port, 1317 struct host_to_dev_fis *fis, 1318 int fis_len, 1319 dma_addr_t buffer, 1320 int buf_len, 1321 u32 opts, 1322 gfp_t atomic, 1323 unsigned long timeout) 1324 { 1325 struct mtip_cmd_sg *command_sg; 1326 DECLARE_COMPLETION_ONSTACK(wait); 1327 int rv = 0, ready2go = 1; 1328 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; 1329 unsigned long to; 1330 struct driver_data *dd = port->dd; 1331 1332 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1333 if (buffer & 0x00000007) { 1334 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); 1335 return -EFAULT; 1336 } 1337 1338 to = jiffies + msecs_to_jiffies(timeout); 1339 do { 1340 ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL, 1341 port->allocated); 1342 if (ready2go) 1343 break; 1344 mdelay(100); 1345 } while (time_before(jiffies, to)); 1346 if (!ready2go) { 1347 dev_warn(&dd->pdev->dev, 1348 "Internal cmd active. new cmd [%02X]\n", fis->command); 1349 return -EBUSY; 1350 } 1351 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1352 port->ic_pause_timer = 0; 1353 1354 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1355 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1356 1357 if (atomic == GFP_KERNEL) { 1358 if (fis->command != ATA_CMD_STANDBYNOW1) { 1359 /* wait for io to complete if non atomic */ 1360 if (mtip_quiesce_io(port, 5000) < 0) { 1361 dev_warn(&dd->pdev->dev, 1362 "Failed to quiesce IO\n"); 1363 release_slot(port, MTIP_TAG_INTERNAL); 1364 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1365 wake_up_interruptible(&port->svc_wait); 1366 return -EBUSY; 1367 } 1368 } 1369 1370 /* Set the completion function and data for the command. */ 1371 int_cmd->comp_data = &wait; 1372 int_cmd->comp_func = mtip_completion; 1373 1374 } else { 1375 /* Clear completion - we're going to poll */ 1376 int_cmd->comp_data = NULL; 1377 int_cmd->comp_func = mtip_null_completion; 1378 } 1379 1380 /* Copy the command to the command table */ 1381 memcpy(int_cmd->command, fis, fis_len*4); 1382 1383 /* Populate the SG list */ 1384 int_cmd->command_header->opts = 1385 __force_bit2int cpu_to_le32(opts | fis_len); 1386 if (buf_len) { 1387 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ; 1388 1389 command_sg->info = 1390 __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF); 1391 command_sg->dba = 1392 __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF); 1393 command_sg->dba_upper = 1394 __force_bit2int cpu_to_le32((buffer >> 16) >> 16); 1395 1396 int_cmd->command_header->opts |= 1397 __force_bit2int cpu_to_le32((1 << 16)); 1398 } 1399 1400 /* Populate the command header */ 1401 int_cmd->command_header->byte_count = 0; 1402 1403 /* Issue the command to the hardware */ 1404 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); 1405 1406 if (atomic == GFP_KERNEL) { 1407 /* Wait for the command to complete or timeout. */ 1408 if (wait_for_completion_interruptible_timeout( 1409 &wait, 1410 msecs_to_jiffies(timeout)) <= 0) { 1411 if (rv == -ERESTARTSYS) { /* interrupted */ 1412 dev_err(&dd->pdev->dev, 1413 "Internal command [%02X] was interrupted after %lu ms\n", 1414 fis->command, timeout); 1415 rv = -EINTR; 1416 goto exec_ic_exit; 1417 } else if (rv == 0) /* timeout */ 1418 dev_err(&dd->pdev->dev, 1419 "Internal command did not complete [%02X] within timeout of %lu ms\n", 1420 fis->command, timeout); 1421 else 1422 dev_err(&dd->pdev->dev, 1423 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", 1424 fis->command, rv, timeout); 1425 1426 if (mtip_check_surprise_removal(dd->pdev) || 1427 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1428 &dd->dd_flag)) { 1429 dev_err(&dd->pdev->dev, 1430 "Internal command [%02X] wait returned due to SR\n", 1431 fis->command); 1432 rv = -ENXIO; 1433 goto exec_ic_exit; 1434 } 1435 mtip_device_reset(dd); /* recover from timeout issue */ 1436 rv = -EAGAIN; 1437 goto exec_ic_exit; 1438 } 1439 } else { 1440 u32 hba_stat, port_stat; 1441 1442 /* Spin for <timeout> checking if command still outstanding */ 1443 timeout = jiffies + msecs_to_jiffies(timeout); 1444 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1445 & (1 << MTIP_TAG_INTERNAL)) 1446 && time_before(jiffies, timeout)) { 1447 if (mtip_check_surprise_removal(dd->pdev)) { 1448 rv = -ENXIO; 1449 goto exec_ic_exit; 1450 } 1451 if ((fis->command != ATA_CMD_STANDBYNOW1) && 1452 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1453 &dd->dd_flag)) { 1454 rv = -ENXIO; 1455 goto exec_ic_exit; 1456 } 1457 port_stat = readl(port->mmio + PORT_IRQ_STAT); 1458 if (!port_stat) 1459 continue; 1460 1461 if (port_stat & PORT_IRQ_ERR) { 1462 dev_err(&dd->pdev->dev, 1463 "Internal command [%02X] failed\n", 1464 fis->command); 1465 mtip_device_reset(dd); 1466 rv = -EIO; 1467 goto exec_ic_exit; 1468 } else { 1469 writel(port_stat, port->mmio + PORT_IRQ_STAT); 1470 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 1471 if (hba_stat) 1472 writel(hba_stat, 1473 dd->mmio + HOST_IRQ_STAT); 1474 } 1475 break; 1476 } 1477 } 1478 1479 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1480 & (1 << MTIP_TAG_INTERNAL)) { 1481 rv = -ENXIO; 1482 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 1483 mtip_device_reset(dd); 1484 rv = -EAGAIN; 1485 } 1486 } 1487 exec_ic_exit: 1488 /* Clear the allocated and active bits for the internal command. */ 1489 atomic_set(&int_cmd->active, 0); 1490 release_slot(port, MTIP_TAG_INTERNAL); 1491 if (rv >= 0 && mtip_pause_ncq(port, fis)) { 1492 /* NCQ paused */ 1493 return rv; 1494 } 1495 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1496 wake_up_interruptible(&port->svc_wait); 1497 1498 return rv; 1499 } 1500 1501 /* 1502 * Byte-swap ATA ID strings. 1503 * 1504 * ATA identify data contains strings in byte-swapped 16-bit words. 1505 * They must be swapped (on all architectures) to be usable as C strings. 1506 * This function swaps bytes in-place. 1507 * 1508 * @buf The buffer location of the string 1509 * @len The number of bytes to swap 1510 * 1511 * return value 1512 * None 1513 */ 1514 static inline void ata_swap_string(u16 *buf, unsigned int len) 1515 { 1516 int i; 1517 for (i = 0; i < (len/2); i++) 1518 be16_to_cpus(&buf[i]); 1519 } 1520 1521 /* 1522 * Request the device identity information. 1523 * 1524 * If a user space buffer is not specified, i.e. is NULL, the 1525 * identify information is still read from the drive and placed 1526 * into the identify data buffer (@e port->identify) in the 1527 * port data structure. 1528 * When the identify buffer contains valid identify information @e 1529 * port->identify_valid is non-zero. 1530 * 1531 * @port Pointer to the port structure. 1532 * @user_buffer A user space buffer where the identify data should be 1533 * copied. 1534 * 1535 * return value 1536 * 0 Command completed successfully. 1537 * -EFAULT An error occurred while coping data to the user buffer. 1538 * -1 Command failed. 1539 */ 1540 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) 1541 { 1542 int rv = 0; 1543 struct host_to_dev_fis fis; 1544 1545 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 1546 return -EFAULT; 1547 1548 /* Build the FIS. */ 1549 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1550 fis.type = 0x27; 1551 fis.opts = 1 << 7; 1552 fis.command = ATA_CMD_ID_ATA; 1553 1554 /* Set the identify information as invalid. */ 1555 port->identify_valid = 0; 1556 1557 /* Clear the identify information. */ 1558 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS); 1559 1560 /* Execute the command. */ 1561 if (mtip_exec_internal_command(port, 1562 &fis, 1563 5, 1564 port->identify_dma, 1565 sizeof(u16) * ATA_ID_WORDS, 1566 0, 1567 GFP_KERNEL, 1568 MTIP_INTERNAL_COMMAND_TIMEOUT_MS) 1569 < 0) { 1570 rv = -1; 1571 goto out; 1572 } 1573 1574 /* 1575 * Perform any necessary byte-swapping. Yes, the kernel does in fact 1576 * perform field-sensitive swapping on the string fields. 1577 * See the kernel use of ata_id_string() for proof of this. 1578 */ 1579 #ifdef __LITTLE_ENDIAN 1580 ata_swap_string(port->identify + 27, 40); /* model string*/ 1581 ata_swap_string(port->identify + 23, 8); /* firmware string*/ 1582 ata_swap_string(port->identify + 10, 20); /* serial# string*/ 1583 #else 1584 { 1585 int i; 1586 for (i = 0; i < ATA_ID_WORDS; i++) 1587 port->identify[i] = le16_to_cpu(port->identify[i]); 1588 } 1589 #endif 1590 1591 /* Check security locked state */ 1592 if (port->identify[128] & 0x4) 1593 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1594 else 1595 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1596 1597 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */ 1598 /* Demux ID.DRAT & ID.RZAT to determine trim support */ 1599 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5)) 1600 port->dd->trim_supp = true; 1601 else 1602 #endif 1603 port->dd->trim_supp = false; 1604 1605 /* Set the identify buffer as valid. */ 1606 port->identify_valid = 1; 1607 1608 if (user_buffer) { 1609 if (copy_to_user( 1610 user_buffer, 1611 port->identify, 1612 ATA_ID_WORDS * sizeof(u16))) { 1613 rv = -EFAULT; 1614 goto out; 1615 } 1616 } 1617 1618 out: 1619 return rv; 1620 } 1621 1622 /* 1623 * Issue a standby immediate command to the device. 1624 * 1625 * @port Pointer to the port structure. 1626 * 1627 * return value 1628 * 0 Command was executed successfully. 1629 * -1 An error occurred while executing the command. 1630 */ 1631 static int mtip_standby_immediate(struct mtip_port *port) 1632 { 1633 int rv; 1634 struct host_to_dev_fis fis; 1635 unsigned long start; 1636 1637 /* Build the FIS. */ 1638 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1639 fis.type = 0x27; 1640 fis.opts = 1 << 7; 1641 fis.command = ATA_CMD_STANDBYNOW1; 1642 1643 start = jiffies; 1644 rv = mtip_exec_internal_command(port, 1645 &fis, 1646 5, 1647 0, 1648 0, 1649 0, 1650 GFP_ATOMIC, 1651 15000); 1652 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", 1653 jiffies_to_msecs(jiffies - start)); 1654 if (rv) 1655 dev_warn(&port->dd->pdev->dev, 1656 "STANDBY IMMEDIATE command failed.\n"); 1657 1658 return rv; 1659 } 1660 1661 /* 1662 * Issue a READ LOG EXT command to the device. 1663 * 1664 * @port pointer to the port structure. 1665 * @page page number to fetch 1666 * @buffer pointer to buffer 1667 * @buffer_dma dma address corresponding to @buffer 1668 * @sectors page length to fetch, in sectors 1669 * 1670 * return value 1671 * @rv return value from mtip_exec_internal_command() 1672 */ 1673 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 1674 dma_addr_t buffer_dma, unsigned int sectors) 1675 { 1676 struct host_to_dev_fis fis; 1677 1678 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1679 fis.type = 0x27; 1680 fis.opts = 1 << 7; 1681 fis.command = ATA_CMD_READ_LOG_EXT; 1682 fis.sect_count = sectors & 0xFF; 1683 fis.sect_cnt_ex = (sectors >> 8) & 0xFF; 1684 fis.lba_low = page; 1685 fis.lba_mid = 0; 1686 fis.device = ATA_DEVICE_OBS; 1687 1688 memset(buffer, 0, sectors * ATA_SECT_SIZE); 1689 1690 return mtip_exec_internal_command(port, 1691 &fis, 1692 5, 1693 buffer_dma, 1694 sectors * ATA_SECT_SIZE, 1695 0, 1696 GFP_ATOMIC, 1697 MTIP_INTERNAL_COMMAND_TIMEOUT_MS); 1698 } 1699 1700 /* 1701 * Issue a SMART READ DATA command to the device. 1702 * 1703 * @port pointer to the port structure. 1704 * @buffer pointer to buffer 1705 * @buffer_dma dma address corresponding to @buffer 1706 * 1707 * return value 1708 * @rv return value from mtip_exec_internal_command() 1709 */ 1710 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer, 1711 dma_addr_t buffer_dma) 1712 { 1713 struct host_to_dev_fis fis; 1714 1715 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1716 fis.type = 0x27; 1717 fis.opts = 1 << 7; 1718 fis.command = ATA_CMD_SMART; 1719 fis.features = 0xD0; 1720 fis.sect_count = 1; 1721 fis.lba_mid = 0x4F; 1722 fis.lba_hi = 0xC2; 1723 fis.device = ATA_DEVICE_OBS; 1724 1725 return mtip_exec_internal_command(port, 1726 &fis, 1727 5, 1728 buffer_dma, 1729 ATA_SECT_SIZE, 1730 0, 1731 GFP_ATOMIC, 1732 15000); 1733 } 1734 1735 /* 1736 * Get the value of a smart attribute 1737 * 1738 * @port pointer to the port structure 1739 * @id attribute number 1740 * @attrib pointer to return attrib information corresponding to @id 1741 * 1742 * return value 1743 * -EINVAL NULL buffer passed or unsupported attribute @id. 1744 * -EPERM Identify data not valid, SMART not supported or not enabled 1745 */ 1746 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 1747 struct smart_attr *attrib) 1748 { 1749 int rv, i; 1750 struct smart_attr *pattr; 1751 1752 if (!attrib) 1753 return -EINVAL; 1754 1755 if (!port->identify_valid) { 1756 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); 1757 return -EPERM; 1758 } 1759 if (!(port->identify[82] & 0x1)) { 1760 dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); 1761 return -EPERM; 1762 } 1763 if (!(port->identify[85] & 0x1)) { 1764 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); 1765 return -EPERM; 1766 } 1767 1768 memset(port->smart_buf, 0, ATA_SECT_SIZE); 1769 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma); 1770 if (rv) { 1771 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); 1772 return rv; 1773 } 1774 1775 pattr = (struct smart_attr *)(port->smart_buf + 2); 1776 for (i = 0; i < 29; i++, pattr++) 1777 if (pattr->attr_id == id) { 1778 memcpy(attrib, pattr, sizeof(struct smart_attr)); 1779 break; 1780 } 1781 1782 if (i == 29) { 1783 dev_warn(&port->dd->pdev->dev, 1784 "Query for invalid SMART attribute ID\n"); 1785 rv = -EINVAL; 1786 } 1787 1788 return rv; 1789 } 1790 1791 /* 1792 * Trim unused sectors 1793 * 1794 * @dd pointer to driver_data structure 1795 * @lba starting lba 1796 * @len # of 512b sectors to trim 1797 * 1798 * return value 1799 * -ENOMEM Out of dma memory 1800 * -EINVAL Invalid parameters passed in, trim not supported 1801 * -EIO Error submitting trim request to hw 1802 */ 1803 static int mtip_send_trim(struct driver_data *dd, unsigned int lba, 1804 unsigned int len) 1805 { 1806 int i, rv = 0; 1807 u64 tlba, tlen, sect_left; 1808 struct mtip_trim_entry *buf; 1809 dma_addr_t dma_addr; 1810 struct host_to_dev_fis fis; 1811 1812 if (!len || dd->trim_supp == false) 1813 return -EINVAL; 1814 1815 /* Trim request too big */ 1816 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); 1817 1818 /* Trim request not aligned on 4k boundary */ 1819 WARN_ON(len % 8 != 0); 1820 1821 /* Warn if vu_trim structure is too big */ 1822 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); 1823 1824 /* Allocate a DMA buffer for the trim structure */ 1825 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, 1826 GFP_KERNEL); 1827 if (!buf) 1828 return -ENOMEM; 1829 memset(buf, 0, ATA_SECT_SIZE); 1830 1831 for (i = 0, sect_left = len, tlba = lba; 1832 i < MTIP_MAX_TRIM_ENTRIES && sect_left; 1833 i++) { 1834 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? 1835 MTIP_MAX_TRIM_ENTRY_LEN : 1836 sect_left); 1837 buf[i].lba = __force_bit2int cpu_to_le32(tlba); 1838 buf[i].range = __force_bit2int cpu_to_le16(tlen); 1839 tlba += tlen; 1840 sect_left -= tlen; 1841 } 1842 WARN_ON(sect_left != 0); 1843 1844 /* Build the fis */ 1845 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1846 fis.type = 0x27; 1847 fis.opts = 1 << 7; 1848 fis.command = 0xfb; 1849 fis.features = 0x60; 1850 fis.sect_count = 1; 1851 fis.device = ATA_DEVICE_OBS; 1852 1853 if (mtip_exec_internal_command(dd->port, 1854 &fis, 1855 5, 1856 dma_addr, 1857 ATA_SECT_SIZE, 1858 0, 1859 GFP_KERNEL, 1860 MTIP_TRIM_TIMEOUT_MS) < 0) 1861 rv = -EIO; 1862 1863 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); 1864 return rv; 1865 } 1866 1867 /* 1868 * Get the drive capacity. 1869 * 1870 * @dd Pointer to the device data structure. 1871 * @sectors Pointer to the variable that will receive the sector count. 1872 * 1873 * return value 1874 * 1 Capacity was returned successfully. 1875 * 0 The identify information is invalid. 1876 */ 1877 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) 1878 { 1879 struct mtip_port *port = dd->port; 1880 u64 total, raw0, raw1, raw2, raw3; 1881 raw0 = port->identify[100]; 1882 raw1 = port->identify[101]; 1883 raw2 = port->identify[102]; 1884 raw3 = port->identify[103]; 1885 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48; 1886 *sectors = total; 1887 return (bool) !!port->identify_valid; 1888 } 1889 1890 /* 1891 * Display the identify command data. 1892 * 1893 * @port Pointer to the port data structure. 1894 * 1895 * return value 1896 * None 1897 */ 1898 static void mtip_dump_identify(struct mtip_port *port) 1899 { 1900 sector_t sectors; 1901 unsigned short revid; 1902 char cbuf[42]; 1903 1904 if (!port->identify_valid) 1905 return; 1906 1907 strlcpy(cbuf, (char *)(port->identify+10), 21); 1908 dev_info(&port->dd->pdev->dev, 1909 "Serial No.: %s\n", cbuf); 1910 1911 strlcpy(cbuf, (char *)(port->identify+23), 9); 1912 dev_info(&port->dd->pdev->dev, 1913 "Firmware Ver.: %s\n", cbuf); 1914 1915 strlcpy(cbuf, (char *)(port->identify+27), 41); 1916 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf); 1917 1918 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n", 1919 port->identify[128], 1920 port->identify[128] & 0x4 ? "(LOCKED)" : ""); 1921 1922 if (mtip_hw_get_capacity(port->dd, §ors)) 1923 dev_info(&port->dd->pdev->dev, 1924 "Capacity: %llu sectors (%llu MB)\n", 1925 (u64)sectors, 1926 ((u64)sectors) * ATA_SECT_SIZE >> 20); 1927 1928 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid); 1929 switch (revid & 0xFF) { 1930 case 0x1: 1931 strlcpy(cbuf, "A0", 3); 1932 break; 1933 case 0x3: 1934 strlcpy(cbuf, "A2", 3); 1935 break; 1936 default: 1937 strlcpy(cbuf, "?", 2); 1938 break; 1939 } 1940 dev_info(&port->dd->pdev->dev, 1941 "Card Type: %s\n", cbuf); 1942 } 1943 1944 /* 1945 * Map the commands scatter list into the command table. 1946 * 1947 * @command Pointer to the command. 1948 * @nents Number of scatter list entries. 1949 * 1950 * return value 1951 * None 1952 */ 1953 static inline void fill_command_sg(struct driver_data *dd, 1954 struct mtip_cmd *command, 1955 int nents) 1956 { 1957 int n; 1958 unsigned int dma_len; 1959 struct mtip_cmd_sg *command_sg; 1960 struct scatterlist *sg = command->sg; 1961 1962 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; 1963 1964 for (n = 0; n < nents; n++) { 1965 dma_len = sg_dma_len(sg); 1966 if (dma_len > 0x400000) 1967 dev_err(&dd->pdev->dev, 1968 "DMA segment length truncated\n"); 1969 command_sg->info = __force_bit2int 1970 cpu_to_le32((dma_len-1) & 0x3FFFFF); 1971 command_sg->dba = __force_bit2int 1972 cpu_to_le32(sg_dma_address(sg)); 1973 command_sg->dba_upper = __force_bit2int 1974 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); 1975 command_sg++; 1976 sg++; 1977 } 1978 } 1979 1980 /* 1981 * @brief Execute a drive command. 1982 * 1983 * return value 0 The command completed successfully. 1984 * return value -1 An error occurred while executing the command. 1985 */ 1986 static int exec_drive_task(struct mtip_port *port, u8 *command) 1987 { 1988 struct host_to_dev_fis fis; 1989 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); 1990 1991 /* Build the FIS. */ 1992 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1993 fis.type = 0x27; 1994 fis.opts = 1 << 7; 1995 fis.command = command[0]; 1996 fis.features = command[1]; 1997 fis.sect_count = command[2]; 1998 fis.sector = command[3]; 1999 fis.cyl_low = command[4]; 2000 fis.cyl_hi = command[5]; 2001 fis.device = command[6] & ~0x10; /* Clear the dev bit*/ 2002 2003 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", 2004 __func__, 2005 command[0], 2006 command[1], 2007 command[2], 2008 command[3], 2009 command[4], 2010 command[5], 2011 command[6]); 2012 2013 /* Execute the command. */ 2014 if (mtip_exec_internal_command(port, 2015 &fis, 2016 5, 2017 0, 2018 0, 2019 0, 2020 GFP_KERNEL, 2021 MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) { 2022 return -1; 2023 } 2024 2025 command[0] = reply->command; /* Status*/ 2026 command[1] = reply->features; /* Error*/ 2027 command[4] = reply->cyl_low; 2028 command[5] = reply->cyl_hi; 2029 2030 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n", 2031 __func__, 2032 command[0], 2033 command[1], 2034 command[4], 2035 command[5]); 2036 2037 return 0; 2038 } 2039 2040 /* 2041 * @brief Execute a drive command. 2042 * 2043 * @param port Pointer to the port data structure. 2044 * @param command Pointer to the user specified command parameters. 2045 * @param user_buffer Pointer to the user space buffer where read sector 2046 * data should be copied. 2047 * 2048 * return value 0 The command completed successfully. 2049 * return value -EFAULT An error occurred while copying the completion 2050 * data to the user space buffer. 2051 * return value -1 An error occurred while executing the command. 2052 */ 2053 static int exec_drive_command(struct mtip_port *port, u8 *command, 2054 void __user *user_buffer) 2055 { 2056 struct host_to_dev_fis fis; 2057 struct host_to_dev_fis *reply; 2058 u8 *buf = NULL; 2059 dma_addr_t dma_addr = 0; 2060 int rv = 0, xfer_sz = command[3]; 2061 2062 if (xfer_sz) { 2063 if (!user_buffer) 2064 return -EFAULT; 2065 2066 buf = dmam_alloc_coherent(&port->dd->pdev->dev, 2067 ATA_SECT_SIZE * xfer_sz, 2068 &dma_addr, 2069 GFP_KERNEL); 2070 if (!buf) { 2071 dev_err(&port->dd->pdev->dev, 2072 "Memory allocation failed (%d bytes)\n", 2073 ATA_SECT_SIZE * xfer_sz); 2074 return -ENOMEM; 2075 } 2076 memset(buf, 0, ATA_SECT_SIZE * xfer_sz); 2077 } 2078 2079 /* Build the FIS. */ 2080 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 2081 fis.type = 0x27; 2082 fis.opts = 1 << 7; 2083 fis.command = command[0]; 2084 fis.features = command[2]; 2085 fis.sect_count = command[3]; 2086 if (fis.command == ATA_CMD_SMART) { 2087 fis.sector = command[1]; 2088 fis.cyl_low = 0x4F; 2089 fis.cyl_hi = 0xC2; 2090 } 2091 2092 if (xfer_sz) 2093 reply = (port->rxfis + RX_FIS_PIO_SETUP); 2094 else 2095 reply = (port->rxfis + RX_FIS_D2H_REG); 2096 2097 dbg_printk(MTIP_DRV_NAME 2098 " %s: User Command: cmd %x, sect %x, " 2099 "feat %x, sectcnt %x\n", 2100 __func__, 2101 command[0], 2102 command[1], 2103 command[2], 2104 command[3]); 2105 2106 /* Execute the command. */ 2107 if (mtip_exec_internal_command(port, 2108 &fis, 2109 5, 2110 (xfer_sz ? dma_addr : 0), 2111 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 2112 0, 2113 GFP_KERNEL, 2114 MTIP_IOCTL_COMMAND_TIMEOUT_MS) 2115 < 0) { 2116 rv = -EFAULT; 2117 goto exit_drive_command; 2118 } 2119 2120 /* Collect the completion status. */ 2121 command[0] = reply->command; /* Status*/ 2122 command[1] = reply->features; /* Error*/ 2123 command[2] = reply->sect_count; 2124 2125 dbg_printk(MTIP_DRV_NAME 2126 " %s: Completion Status: stat %x, " 2127 "err %x, nsect %x\n", 2128 __func__, 2129 command[0], 2130 command[1], 2131 command[2]); 2132 2133 if (xfer_sz) { 2134 if (copy_to_user(user_buffer, 2135 buf, 2136 ATA_SECT_SIZE * command[3])) { 2137 rv = -EFAULT; 2138 goto exit_drive_command; 2139 } 2140 } 2141 exit_drive_command: 2142 if (buf) 2143 dmam_free_coherent(&port->dd->pdev->dev, 2144 ATA_SECT_SIZE * xfer_sz, buf, dma_addr); 2145 return rv; 2146 } 2147 2148 /* 2149 * Indicates whether a command has a single sector payload. 2150 * 2151 * @command passed to the device to perform the certain event. 2152 * @features passed to the device to perform the certain event. 2153 * 2154 * return value 2155 * 1 command is one that always has a single sector payload, 2156 * regardless of the value in the Sector Count field. 2157 * 0 otherwise 2158 * 2159 */ 2160 static unsigned int implicit_sector(unsigned char command, 2161 unsigned char features) 2162 { 2163 unsigned int rv = 0; 2164 2165 /* list of commands that have an implicit sector count of 1 */ 2166 switch (command) { 2167 case ATA_CMD_SEC_SET_PASS: 2168 case ATA_CMD_SEC_UNLOCK: 2169 case ATA_CMD_SEC_ERASE_PREP: 2170 case ATA_CMD_SEC_ERASE_UNIT: 2171 case ATA_CMD_SEC_FREEZE_LOCK: 2172 case ATA_CMD_SEC_DISABLE_PASS: 2173 case ATA_CMD_PMP_READ: 2174 case ATA_CMD_PMP_WRITE: 2175 rv = 1; 2176 break; 2177 case ATA_CMD_SET_MAX: 2178 if (features == ATA_SET_MAX_UNLOCK) 2179 rv = 1; 2180 break; 2181 case ATA_CMD_SMART: 2182 if ((features == ATA_SMART_READ_VALUES) || 2183 (features == ATA_SMART_READ_THRESHOLDS)) 2184 rv = 1; 2185 break; 2186 case ATA_CMD_CONF_OVERLAY: 2187 if ((features == ATA_DCO_IDENTIFY) || 2188 (features == ATA_DCO_SET)) 2189 rv = 1; 2190 break; 2191 } 2192 return rv; 2193 } 2194 static void mtip_set_timeout(struct driver_data *dd, 2195 struct host_to_dev_fis *fis, 2196 unsigned int *timeout, u8 erasemode) 2197 { 2198 switch (fis->command) { 2199 case ATA_CMD_DOWNLOAD_MICRO: 2200 *timeout = 120000; /* 2 minutes */ 2201 break; 2202 case ATA_CMD_SEC_ERASE_UNIT: 2203 case 0xFC: 2204 if (erasemode) 2205 *timeout = ((*(dd->port->identify + 90) * 2) * 60000); 2206 else 2207 *timeout = ((*(dd->port->identify + 89) * 2) * 60000); 2208 break; 2209 case ATA_CMD_STANDBYNOW1: 2210 *timeout = 120000; /* 2 minutes */ 2211 break; 2212 case 0xF7: 2213 case 0xFA: 2214 *timeout = 60000; /* 60 seconds */ 2215 break; 2216 case ATA_CMD_SMART: 2217 *timeout = 15000; /* 15 seconds */ 2218 break; 2219 default: 2220 *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; 2221 break; 2222 } 2223 } 2224 2225 /* 2226 * Executes a taskfile 2227 * See ide_taskfile_ioctl() for derivation 2228 */ 2229 static int exec_drive_taskfile(struct driver_data *dd, 2230 void __user *buf, 2231 ide_task_request_t *req_task, 2232 int outtotal) 2233 { 2234 struct host_to_dev_fis fis; 2235 struct host_to_dev_fis *reply; 2236 u8 *outbuf = NULL; 2237 u8 *inbuf = NULL; 2238 dma_addr_t outbuf_dma = 0; 2239 dma_addr_t inbuf_dma = 0; 2240 dma_addr_t dma_buffer = 0; 2241 int err = 0; 2242 unsigned int taskin = 0; 2243 unsigned int taskout = 0; 2244 u8 nsect = 0; 2245 unsigned int timeout; 2246 unsigned int force_single_sector; 2247 unsigned int transfer_size; 2248 unsigned long task_file_data; 2249 int intotal = outtotal + req_task->out_size; 2250 int erasemode = 0; 2251 2252 taskout = req_task->out_size; 2253 taskin = req_task->in_size; 2254 /* 130560 = 512 * 0xFF*/ 2255 if (taskin > 130560 || taskout > 130560) { 2256 err = -EINVAL; 2257 goto abort; 2258 } 2259 2260 if (taskout) { 2261 outbuf = kzalloc(taskout, GFP_KERNEL); 2262 if (outbuf == NULL) { 2263 err = -ENOMEM; 2264 goto abort; 2265 } 2266 if (copy_from_user(outbuf, buf + outtotal, taskout)) { 2267 err = -EFAULT; 2268 goto abort; 2269 } 2270 outbuf_dma = pci_map_single(dd->pdev, 2271 outbuf, 2272 taskout, 2273 DMA_TO_DEVICE); 2274 if (outbuf_dma == 0) { 2275 err = -ENOMEM; 2276 goto abort; 2277 } 2278 dma_buffer = outbuf_dma; 2279 } 2280 2281 if (taskin) { 2282 inbuf = kzalloc(taskin, GFP_KERNEL); 2283 if (inbuf == NULL) { 2284 err = -ENOMEM; 2285 goto abort; 2286 } 2287 2288 if (copy_from_user(inbuf, buf + intotal, taskin)) { 2289 err = -EFAULT; 2290 goto abort; 2291 } 2292 inbuf_dma = pci_map_single(dd->pdev, 2293 inbuf, 2294 taskin, DMA_FROM_DEVICE); 2295 if (inbuf_dma == 0) { 2296 err = -ENOMEM; 2297 goto abort; 2298 } 2299 dma_buffer = inbuf_dma; 2300 } 2301 2302 /* only supports PIO and non-data commands from this ioctl. */ 2303 switch (req_task->data_phase) { 2304 case TASKFILE_OUT: 2305 nsect = taskout / ATA_SECT_SIZE; 2306 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 2307 break; 2308 case TASKFILE_IN: 2309 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 2310 break; 2311 case TASKFILE_NO_DATA: 2312 reply = (dd->port->rxfis + RX_FIS_D2H_REG); 2313 break; 2314 default: 2315 err = -EINVAL; 2316 goto abort; 2317 } 2318 2319 /* Build the FIS. */ 2320 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 2321 2322 fis.type = 0x27; 2323 fis.opts = 1 << 7; 2324 fis.command = req_task->io_ports[7]; 2325 fis.features = req_task->io_ports[1]; 2326 fis.sect_count = req_task->io_ports[2]; 2327 fis.lba_low = req_task->io_ports[3]; 2328 fis.lba_mid = req_task->io_ports[4]; 2329 fis.lba_hi = req_task->io_ports[5]; 2330 /* Clear the dev bit*/ 2331 fis.device = req_task->io_ports[6] & ~0x10; 2332 2333 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) { 2334 req_task->in_flags.all = 2335 IDE_TASKFILE_STD_IN_FLAGS | 2336 (IDE_HOB_STD_IN_FLAGS << 8); 2337 fis.lba_low_ex = req_task->hob_ports[3]; 2338 fis.lba_mid_ex = req_task->hob_ports[4]; 2339 fis.lba_hi_ex = req_task->hob_ports[5]; 2340 fis.features_ex = req_task->hob_ports[1]; 2341 fis.sect_cnt_ex = req_task->hob_ports[2]; 2342 2343 } else { 2344 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; 2345 } 2346 2347 force_single_sector = implicit_sector(fis.command, fis.features); 2348 2349 if ((taskin || taskout) && (!fis.sect_count)) { 2350 if (nsect) 2351 fis.sect_count = nsect; 2352 else { 2353 if (!force_single_sector) { 2354 dev_warn(&dd->pdev->dev, 2355 "data movement but " 2356 "sect_count is 0\n"); 2357 err = -EINVAL; 2358 goto abort; 2359 } 2360 } 2361 } 2362 2363 dbg_printk(MTIP_DRV_NAME 2364 " %s: cmd %x, feat %x, nsect %x," 2365 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x," 2366 " head/dev %x\n", 2367 __func__, 2368 fis.command, 2369 fis.features, 2370 fis.sect_count, 2371 fis.lba_low, 2372 fis.lba_mid, 2373 fis.lba_hi, 2374 fis.device); 2375 2376 /* check for erase mode support during secure erase.*/ 2377 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && 2378 (outbuf[0] & MTIP_SEC_ERASE_MODE)) { 2379 erasemode = 1; 2380 } 2381 2382 mtip_set_timeout(dd, &fis, &timeout, erasemode); 2383 2384 /* Determine the correct transfer size.*/ 2385 if (force_single_sector) 2386 transfer_size = ATA_SECT_SIZE; 2387 else 2388 transfer_size = ATA_SECT_SIZE * fis.sect_count; 2389 2390 /* Execute the command.*/ 2391 if (mtip_exec_internal_command(dd->port, 2392 &fis, 2393 5, 2394 dma_buffer, 2395 transfer_size, 2396 0, 2397 GFP_KERNEL, 2398 timeout) < 0) { 2399 err = -EIO; 2400 goto abort; 2401 } 2402 2403 task_file_data = readl(dd->port->mmio+PORT_TFDATA); 2404 2405 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) { 2406 reply = dd->port->rxfis + RX_FIS_PIO_SETUP; 2407 req_task->io_ports[7] = reply->control; 2408 } else { 2409 reply = dd->port->rxfis + RX_FIS_D2H_REG; 2410 req_task->io_ports[7] = reply->command; 2411 } 2412 2413 /* reclaim the DMA buffers.*/ 2414 if (inbuf_dma) 2415 pci_unmap_single(dd->pdev, inbuf_dma, 2416 taskin, DMA_FROM_DEVICE); 2417 if (outbuf_dma) 2418 pci_unmap_single(dd->pdev, outbuf_dma, 2419 taskout, DMA_TO_DEVICE); 2420 inbuf_dma = 0; 2421 outbuf_dma = 0; 2422 2423 /* return the ATA registers to the caller.*/ 2424 req_task->io_ports[1] = reply->features; 2425 req_task->io_ports[2] = reply->sect_count; 2426 req_task->io_ports[3] = reply->lba_low; 2427 req_task->io_ports[4] = reply->lba_mid; 2428 req_task->io_ports[5] = reply->lba_hi; 2429 req_task->io_ports[6] = reply->device; 2430 2431 if (req_task->out_flags.all & 1) { 2432 2433 req_task->hob_ports[3] = reply->lba_low_ex; 2434 req_task->hob_ports[4] = reply->lba_mid_ex; 2435 req_task->hob_ports[5] = reply->lba_hi_ex; 2436 req_task->hob_ports[1] = reply->features_ex; 2437 req_task->hob_ports[2] = reply->sect_cnt_ex; 2438 } 2439 dbg_printk(MTIP_DRV_NAME 2440 " %s: Completion: stat %x," 2441 "err %x, sect_cnt %x, lbalo %x," 2442 "lbamid %x, lbahi %x, dev %x\n", 2443 __func__, 2444 req_task->io_ports[7], 2445 req_task->io_ports[1], 2446 req_task->io_ports[2], 2447 req_task->io_ports[3], 2448 req_task->io_ports[4], 2449 req_task->io_ports[5], 2450 req_task->io_ports[6]); 2451 2452 if (taskout) { 2453 if (copy_to_user(buf + outtotal, outbuf, taskout)) { 2454 err = -EFAULT; 2455 goto abort; 2456 } 2457 } 2458 if (taskin) { 2459 if (copy_to_user(buf + intotal, inbuf, taskin)) { 2460 err = -EFAULT; 2461 goto abort; 2462 } 2463 } 2464 abort: 2465 if (inbuf_dma) 2466 pci_unmap_single(dd->pdev, inbuf_dma, 2467 taskin, DMA_FROM_DEVICE); 2468 if (outbuf_dma) 2469 pci_unmap_single(dd->pdev, outbuf_dma, 2470 taskout, DMA_TO_DEVICE); 2471 kfree(outbuf); 2472 kfree(inbuf); 2473 2474 return err; 2475 } 2476 2477 /* 2478 * Handle IOCTL calls from the Block Layer. 2479 * 2480 * This function is called by the Block Layer when it receives an IOCTL 2481 * command that it does not understand. If the IOCTL command is not supported 2482 * this function returns -ENOTTY. 2483 * 2484 * @dd Pointer to the driver data structure. 2485 * @cmd IOCTL command passed from the Block Layer. 2486 * @arg IOCTL argument passed from the Block Layer. 2487 * 2488 * return value 2489 * 0 The IOCTL completed successfully. 2490 * -ENOTTY The specified command is not supported. 2491 * -EFAULT An error occurred copying data to a user space buffer. 2492 * -EIO An error occurred while executing the command. 2493 */ 2494 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, 2495 unsigned long arg) 2496 { 2497 switch (cmd) { 2498 case HDIO_GET_IDENTITY: 2499 { 2500 if (copy_to_user((void __user *)arg, dd->port->identify, 2501 sizeof(u16) * ATA_ID_WORDS)) 2502 return -EFAULT; 2503 break; 2504 } 2505 case HDIO_DRIVE_CMD: 2506 { 2507 u8 drive_command[4]; 2508 2509 /* Copy the user command info to our buffer. */ 2510 if (copy_from_user(drive_command, 2511 (void __user *) arg, 2512 sizeof(drive_command))) 2513 return -EFAULT; 2514 2515 /* Execute the drive command. */ 2516 if (exec_drive_command(dd->port, 2517 drive_command, 2518 (void __user *) (arg+4))) 2519 return -EIO; 2520 2521 /* Copy the status back to the users buffer. */ 2522 if (copy_to_user((void __user *) arg, 2523 drive_command, 2524 sizeof(drive_command))) 2525 return -EFAULT; 2526 2527 break; 2528 } 2529 case HDIO_DRIVE_TASK: 2530 { 2531 u8 drive_command[7]; 2532 2533 /* Copy the user command info to our buffer. */ 2534 if (copy_from_user(drive_command, 2535 (void __user *) arg, 2536 sizeof(drive_command))) 2537 return -EFAULT; 2538 2539 /* Execute the drive command. */ 2540 if (exec_drive_task(dd->port, drive_command)) 2541 return -EIO; 2542 2543 /* Copy the status back to the users buffer. */ 2544 if (copy_to_user((void __user *) arg, 2545 drive_command, 2546 sizeof(drive_command))) 2547 return -EFAULT; 2548 2549 break; 2550 } 2551 case HDIO_DRIVE_TASKFILE: { 2552 ide_task_request_t req_task; 2553 int ret, outtotal; 2554 2555 if (copy_from_user(&req_task, (void __user *) arg, 2556 sizeof(req_task))) 2557 return -EFAULT; 2558 2559 outtotal = sizeof(req_task); 2560 2561 ret = exec_drive_taskfile(dd, (void __user *) arg, 2562 &req_task, outtotal); 2563 2564 if (copy_to_user((void __user *) arg, &req_task, 2565 sizeof(req_task))) 2566 return -EFAULT; 2567 2568 return ret; 2569 } 2570 2571 default: 2572 return -EINVAL; 2573 } 2574 return 0; 2575 } 2576 2577 /* 2578 * Submit an IO to the hw 2579 * 2580 * This function is called by the block layer to issue an io 2581 * to the device. Upon completion, the callback function will 2582 * be called with the data parameter passed as the callback data. 2583 * 2584 * @dd Pointer to the driver data structure. 2585 * @start First sector to read. 2586 * @nsect Number of sectors to read. 2587 * @nents Number of entries in scatter list for the read command. 2588 * @tag The tag of this read command. 2589 * @callback Pointer to the function that should be called 2590 * when the read completes. 2591 * @data Callback data passed to the callback function 2592 * when the read completes. 2593 * @dir Direction (read or write) 2594 * 2595 * return value 2596 * None 2597 */ 2598 static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, 2599 int nsect, int nents, int tag, void *callback, 2600 void *data, int dir, int unaligned) 2601 { 2602 struct host_to_dev_fis *fis; 2603 struct mtip_port *port = dd->port; 2604 struct mtip_cmd *command = &port->commands[tag]; 2605 int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2606 u64 start = sector; 2607 2608 /* Map the scatter list for DMA access */ 2609 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); 2610 2611 command->scatter_ents = nents; 2612 2613 command->unaligned = unaligned; 2614 /* 2615 * The number of retries for this command before it is 2616 * reported as a failure to the upper layers. 2617 */ 2618 command->retries = MTIP_MAX_RETRIES; 2619 2620 /* Fill out fis */ 2621 fis = command->command; 2622 fis->type = 0x27; 2623 fis->opts = 1 << 7; 2624 fis->command = 2625 (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); 2626 fis->lba_low = start & 0xFF; 2627 fis->lba_mid = (start >> 8) & 0xFF; 2628 fis->lba_hi = (start >> 16) & 0xFF; 2629 fis->lba_low_ex = (start >> 24) & 0xFF; 2630 fis->lba_mid_ex = (start >> 32) & 0xFF; 2631 fis->lba_hi_ex = (start >> 40) & 0xFF; 2632 fis->device = 1 << 6; 2633 fis->features = nsect & 0xFF; 2634 fis->features_ex = (nsect >> 8) & 0xFF; 2635 fis->sect_count = ((tag << 3) | (tag >> 5)); 2636 fis->sect_cnt_ex = 0; 2637 fis->control = 0; 2638 fis->res2 = 0; 2639 fis->res3 = 0; 2640 fill_command_sg(dd, command, nents); 2641 2642 if (unaligned) 2643 fis->device |= 1 << 7; 2644 2645 /* Populate the command header */ 2646 command->command_header->opts = 2647 __force_bit2int cpu_to_le32( 2648 (nents << 16) | 5 | AHCI_CMD_PREFETCH); 2649 command->command_header->byte_count = 0; 2650 2651 /* 2652 * Set the completion function and data for the command 2653 * within this layer. 2654 */ 2655 command->comp_data = dd; 2656 command->comp_func = mtip_async_complete; 2657 command->direction = dma_dir; 2658 2659 /* 2660 * Set the completion function and data for the command passed 2661 * from the upper layer. 2662 */ 2663 command->async_data = data; 2664 command->async_callback = callback; 2665 2666 /* 2667 * To prevent this command from being issued 2668 * if an internal command is in progress or error handling is active. 2669 */ 2670 if (port->flags & MTIP_PF_PAUSE_IO) { 2671 set_bit(tag, port->cmds_to_issue); 2672 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 2673 return; 2674 } 2675 2676 /* Issue the command to the hardware */ 2677 mtip_issue_ncq_command(port, tag); 2678 2679 return; 2680 } 2681 2682 /* 2683 * Release a command slot. 2684 * 2685 * @dd Pointer to the driver data structure. 2686 * @tag Slot tag 2687 * 2688 * return value 2689 * None 2690 */ 2691 static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag, 2692 int unaligned) 2693 { 2694 struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : 2695 &dd->port->cmd_slot; 2696 release_slot(dd->port, tag); 2697 up(sem); 2698 } 2699 2700 /* 2701 * Obtain a command slot and return its associated scatter list. 2702 * 2703 * @dd Pointer to the driver data structure. 2704 * @tag Pointer to an int that will receive the allocated command 2705 * slot tag. 2706 * 2707 * return value 2708 * Pointer to the scatter list for the allocated command slot 2709 * or NULL if no command slots are available. 2710 */ 2711 static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, 2712 int *tag, int unaligned) 2713 { 2714 struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : 2715 &dd->port->cmd_slot; 2716 2717 /* 2718 * It is possible that, even with this semaphore, a thread 2719 * may think that no command slots are available. Therefore, we 2720 * need to make an attempt to get_slot(). 2721 */ 2722 down(sem); 2723 *tag = get_slot(dd->port); 2724 2725 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { 2726 up(sem); 2727 return NULL; 2728 } 2729 if (unlikely(*tag < 0)) { 2730 up(sem); 2731 return NULL; 2732 } 2733 2734 return dd->port->commands[*tag].sg; 2735 } 2736 2737 /* 2738 * Sysfs status dump. 2739 * 2740 * @dev Pointer to the device structure, passed by the kernrel. 2741 * @attr Pointer to the device_attribute structure passed by the kernel. 2742 * @buf Pointer to the char buffer that will receive the stats info. 2743 * 2744 * return value 2745 * The size, in bytes, of the data copied into buf. 2746 */ 2747 static ssize_t mtip_hw_show_status(struct device *dev, 2748 struct device_attribute *attr, 2749 char *buf) 2750 { 2751 struct driver_data *dd = dev_to_disk(dev)->private_data; 2752 int size = 0; 2753 2754 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 2755 size += sprintf(buf, "%s", "thermal_shutdown\n"); 2756 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) 2757 size += sprintf(buf, "%s", "write_protect\n"); 2758 else 2759 size += sprintf(buf, "%s", "online\n"); 2760 2761 return size; 2762 } 2763 2764 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2765 2766 /* debugsfs entries */ 2767 2768 static ssize_t show_device_status(struct device_driver *drv, char *buf) 2769 { 2770 int size = 0; 2771 struct driver_data *dd, *tmp; 2772 unsigned long flags; 2773 char id_buf[42]; 2774 u16 status = 0; 2775 2776 spin_lock_irqsave(&dev_lock, flags); 2777 size += sprintf(&buf[size], "Devices Present:\n"); 2778 list_for_each_entry_safe(dd, tmp, &online_list, online_list) { 2779 if (dd->pdev) { 2780 if (dd->port && 2781 dd->port->identify && 2782 dd->port->identify_valid) { 2783 strlcpy(id_buf, 2784 (char *) (dd->port->identify + 10), 21); 2785 status = *(dd->port->identify + 141); 2786 } else { 2787 memset(id_buf, 0, 42); 2788 status = 0; 2789 } 2790 2791 if (dd->port && 2792 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2793 size += sprintf(&buf[size], 2794 " device %s %s (ftl rebuild %d %%)\n", 2795 dev_name(&dd->pdev->dev), 2796 id_buf, 2797 status); 2798 } else { 2799 size += sprintf(&buf[size], 2800 " device %s %s\n", 2801 dev_name(&dd->pdev->dev), 2802 id_buf); 2803 } 2804 } 2805 } 2806 2807 size += sprintf(&buf[size], "Devices Being Removed:\n"); 2808 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { 2809 if (dd->pdev) { 2810 if (dd->port && 2811 dd->port->identify && 2812 dd->port->identify_valid) { 2813 strlcpy(id_buf, 2814 (char *) (dd->port->identify+10), 21); 2815 status = *(dd->port->identify + 141); 2816 } else { 2817 memset(id_buf, 0, 42); 2818 status = 0; 2819 } 2820 2821 if (dd->port && 2822 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2823 size += sprintf(&buf[size], 2824 " device %s %s (ftl rebuild %d %%)\n", 2825 dev_name(&dd->pdev->dev), 2826 id_buf, 2827 status); 2828 } else { 2829 size += sprintf(&buf[size], 2830 " device %s %s\n", 2831 dev_name(&dd->pdev->dev), 2832 id_buf); 2833 } 2834 } 2835 } 2836 spin_unlock_irqrestore(&dev_lock, flags); 2837 2838 return size; 2839 } 2840 2841 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, 2842 size_t len, loff_t *offset) 2843 { 2844 struct driver_data *dd = (struct driver_data *)f->private_data; 2845 int size = *offset; 2846 char *buf; 2847 int rv = 0; 2848 2849 if (!len || *offset) 2850 return 0; 2851 2852 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2853 if (!buf) { 2854 dev_err(&dd->pdev->dev, 2855 "Memory allocation: status buffer\n"); 2856 return -ENOMEM; 2857 } 2858 2859 size += show_device_status(NULL, buf); 2860 2861 *offset = size <= len ? size : len; 2862 size = copy_to_user(ubuf, buf, *offset); 2863 if (size) 2864 rv = -EFAULT; 2865 2866 kfree(buf); 2867 return rv ? rv : *offset; 2868 } 2869 2870 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, 2871 size_t len, loff_t *offset) 2872 { 2873 struct driver_data *dd = (struct driver_data *)f->private_data; 2874 char *buf; 2875 u32 group_allocated; 2876 int size = *offset; 2877 int n, rv = 0; 2878 2879 if (!len || size) 2880 return 0; 2881 2882 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2883 if (!buf) { 2884 dev_err(&dd->pdev->dev, 2885 "Memory allocation: register buffer\n"); 2886 return -ENOMEM; 2887 } 2888 2889 size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); 2890 2891 for (n = dd->slot_groups-1; n >= 0; n--) 2892 size += sprintf(&buf[size], "%08X ", 2893 readl(dd->port->s_active[n])); 2894 2895 size += sprintf(&buf[size], "]\n"); 2896 size += sprintf(&buf[size], "H/ Command Issue : [ 0x"); 2897 2898 for (n = dd->slot_groups-1; n >= 0; n--) 2899 size += sprintf(&buf[size], "%08X ", 2900 readl(dd->port->cmd_issue[n])); 2901 2902 size += sprintf(&buf[size], "]\n"); 2903 size += sprintf(&buf[size], "H/ Completed : [ 0x"); 2904 2905 for (n = dd->slot_groups-1; n >= 0; n--) 2906 size += sprintf(&buf[size], "%08X ", 2907 readl(dd->port->completed[n])); 2908 2909 size += sprintf(&buf[size], "]\n"); 2910 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n", 2911 readl(dd->port->mmio + PORT_IRQ_STAT)); 2912 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n", 2913 readl(dd->mmio + HOST_IRQ_STAT)); 2914 size += sprintf(&buf[size], "\n"); 2915 2916 size += sprintf(&buf[size], "L/ Allocated : [ 0x"); 2917 2918 for (n = dd->slot_groups-1; n >= 0; n--) { 2919 if (sizeof(long) > sizeof(u32)) 2920 group_allocated = 2921 dd->port->allocated[n/2] >> (32*(n&1)); 2922 else 2923 group_allocated = dd->port->allocated[n]; 2924 size += sprintf(&buf[size], "%08X ", group_allocated); 2925 } 2926 size += sprintf(&buf[size], "]\n"); 2927 2928 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x"); 2929 2930 for (n = dd->slot_groups-1; n >= 0; n--) { 2931 if (sizeof(long) > sizeof(u32)) 2932 group_allocated = 2933 dd->port->cmds_to_issue[n/2] >> (32*(n&1)); 2934 else 2935 group_allocated = dd->port->cmds_to_issue[n]; 2936 size += sprintf(&buf[size], "%08X ", group_allocated); 2937 } 2938 size += sprintf(&buf[size], "]\n"); 2939 2940 *offset = size <= len ? size : len; 2941 size = copy_to_user(ubuf, buf, *offset); 2942 if (size) 2943 rv = -EFAULT; 2944 2945 kfree(buf); 2946 return rv ? rv : *offset; 2947 } 2948 2949 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, 2950 size_t len, loff_t *offset) 2951 { 2952 struct driver_data *dd = (struct driver_data *)f->private_data; 2953 char *buf; 2954 int size = *offset; 2955 int rv = 0; 2956 2957 if (!len || size) 2958 return 0; 2959 2960 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2961 if (!buf) { 2962 dev_err(&dd->pdev->dev, 2963 "Memory allocation: flag buffer\n"); 2964 return -ENOMEM; 2965 } 2966 2967 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", 2968 dd->port->flags); 2969 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", 2970 dd->dd_flag); 2971 2972 *offset = size <= len ? size : len; 2973 size = copy_to_user(ubuf, buf, *offset); 2974 if (size) 2975 rv = -EFAULT; 2976 2977 kfree(buf); 2978 return rv ? rv : *offset; 2979 } 2980 2981 static const struct file_operations mtip_device_status_fops = { 2982 .owner = THIS_MODULE, 2983 .open = simple_open, 2984 .read = mtip_hw_read_device_status, 2985 .llseek = no_llseek, 2986 }; 2987 2988 static const struct file_operations mtip_regs_fops = { 2989 .owner = THIS_MODULE, 2990 .open = simple_open, 2991 .read = mtip_hw_read_registers, 2992 .llseek = no_llseek, 2993 }; 2994 2995 static const struct file_operations mtip_flags_fops = { 2996 .owner = THIS_MODULE, 2997 .open = simple_open, 2998 .read = mtip_hw_read_flags, 2999 .llseek = no_llseek, 3000 }; 3001 3002 /* 3003 * Create the sysfs related attributes. 3004 * 3005 * @dd Pointer to the driver data structure. 3006 * @kobj Pointer to the kobj for the block device. 3007 * 3008 * return value 3009 * 0 Operation completed successfully. 3010 * -EINVAL Invalid parameter. 3011 */ 3012 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) 3013 { 3014 if (!kobj || !dd) 3015 return -EINVAL; 3016 3017 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 3018 dev_warn(&dd->pdev->dev, 3019 "Error creating 'status' sysfs entry\n"); 3020 return 0; 3021 } 3022 3023 /* 3024 * Remove the sysfs related attributes. 3025 * 3026 * @dd Pointer to the driver data structure. 3027 * @kobj Pointer to the kobj for the block device. 3028 * 3029 * return value 3030 * 0 Operation completed successfully. 3031 * -EINVAL Invalid parameter. 3032 */ 3033 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) 3034 { 3035 if (!kobj || !dd) 3036 return -EINVAL; 3037 3038 sysfs_remove_file(kobj, &dev_attr_status.attr); 3039 3040 return 0; 3041 } 3042 3043 static int mtip_hw_debugfs_init(struct driver_data *dd) 3044 { 3045 if (!dfs_parent) 3046 return -1; 3047 3048 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); 3049 if (IS_ERR_OR_NULL(dd->dfs_node)) { 3050 dev_warn(&dd->pdev->dev, 3051 "Error creating node %s under debugfs\n", 3052 dd->disk->disk_name); 3053 dd->dfs_node = NULL; 3054 return -1; 3055 } 3056 3057 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd, 3058 &mtip_flags_fops); 3059 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd, 3060 &mtip_regs_fops); 3061 3062 return 0; 3063 } 3064 3065 static void mtip_hw_debugfs_exit(struct driver_data *dd) 3066 { 3067 if (dd->dfs_node) 3068 debugfs_remove_recursive(dd->dfs_node); 3069 } 3070 3071 static int mtip_free_orphan(struct driver_data *dd) 3072 { 3073 struct kobject *kobj; 3074 3075 if (dd->bdev) { 3076 if (dd->bdev->bd_holders >= 1) 3077 return -2; 3078 3079 bdput(dd->bdev); 3080 dd->bdev = NULL; 3081 } 3082 3083 mtip_hw_debugfs_exit(dd); 3084 3085 spin_lock(&rssd_index_lock); 3086 ida_remove(&rssd_index_ida, dd->index); 3087 spin_unlock(&rssd_index_lock); 3088 3089 if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) && 3090 test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { 3091 put_disk(dd->disk); 3092 } else { 3093 if (dd->disk) { 3094 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 3095 if (kobj) { 3096 mtip_hw_sysfs_exit(dd, kobj); 3097 kobject_put(kobj); 3098 } 3099 del_gendisk(dd->disk); 3100 dd->disk = NULL; 3101 } 3102 if (dd->queue) { 3103 dd->queue->queuedata = NULL; 3104 blk_cleanup_queue(dd->queue); 3105 dd->queue = NULL; 3106 } 3107 } 3108 kfree(dd); 3109 return 0; 3110 } 3111 3112 /* 3113 * Perform any init/resume time hardware setup 3114 * 3115 * @dd Pointer to the driver data structure. 3116 * 3117 * return value 3118 * None 3119 */ 3120 static inline void hba_setup(struct driver_data *dd) 3121 { 3122 u32 hwdata; 3123 hwdata = readl(dd->mmio + HOST_HSORG); 3124 3125 /* interrupt bug workaround: use only 1 IS bit.*/ 3126 writel(hwdata | 3127 HSORG_DISABLE_SLOTGRP_INTR | 3128 HSORG_DISABLE_SLOTGRP_PXIS, 3129 dd->mmio + HOST_HSORG); 3130 } 3131 3132 static int mtip_device_unaligned_constrained(struct driver_data *dd) 3133 { 3134 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0); 3135 } 3136 3137 /* 3138 * Detect the details of the product, and store anything needed 3139 * into the driver data structure. This includes product type and 3140 * version and number of slot groups. 3141 * 3142 * @dd Pointer to the driver data structure. 3143 * 3144 * return value 3145 * None 3146 */ 3147 static void mtip_detect_product(struct driver_data *dd) 3148 { 3149 u32 hwdata; 3150 unsigned int rev, slotgroups; 3151 3152 /* 3153 * HBA base + 0xFC [15:0] - vendor-specific hardware interface 3154 * info register: 3155 * [15:8] hardware/software interface rev# 3156 * [ 3] asic-style interface 3157 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style). 3158 */ 3159 hwdata = readl(dd->mmio + HOST_HSORG); 3160 3161 dd->product_type = MTIP_PRODUCT_UNKNOWN; 3162 dd->slot_groups = 1; 3163 3164 if (hwdata & 0x8) { 3165 dd->product_type = MTIP_PRODUCT_ASICFPGA; 3166 rev = (hwdata & HSORG_HWREV) >> 8; 3167 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1; 3168 dev_info(&dd->pdev->dev, 3169 "ASIC-FPGA design, HS rev 0x%x, " 3170 "%i slot groups [%i slots]\n", 3171 rev, 3172 slotgroups, 3173 slotgroups * 32); 3174 3175 if (slotgroups > MTIP_MAX_SLOT_GROUPS) { 3176 dev_warn(&dd->pdev->dev, 3177 "Warning: driver only supports " 3178 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS); 3179 slotgroups = MTIP_MAX_SLOT_GROUPS; 3180 } 3181 dd->slot_groups = slotgroups; 3182 return; 3183 } 3184 3185 dev_warn(&dd->pdev->dev, "Unrecognized product id\n"); 3186 } 3187 3188 /* 3189 * Blocking wait for FTL rebuild to complete 3190 * 3191 * @dd Pointer to the DRIVER_DATA structure. 3192 * 3193 * return value 3194 * 0 FTL rebuild completed successfully 3195 * -EFAULT FTL rebuild error/timeout/interruption 3196 */ 3197 static int mtip_ftl_rebuild_poll(struct driver_data *dd) 3198 { 3199 unsigned long timeout, cnt = 0, start; 3200 3201 dev_warn(&dd->pdev->dev, 3202 "FTL rebuild in progress. Polling for completion.\n"); 3203 3204 start = jiffies; 3205 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS); 3206 3207 do { 3208 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 3209 &dd->dd_flag))) 3210 return -EFAULT; 3211 if (mtip_check_surprise_removal(dd->pdev)) 3212 return -EFAULT; 3213 3214 if (mtip_get_identify(dd->port, NULL) < 0) 3215 return -EFAULT; 3216 3217 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 3218 MTIP_FTL_REBUILD_MAGIC) { 3219 ssleep(1); 3220 /* Print message every 3 minutes */ 3221 if (cnt++ >= 180) { 3222 dev_warn(&dd->pdev->dev, 3223 "FTL rebuild in progress (%d secs).\n", 3224 jiffies_to_msecs(jiffies - start) / 1000); 3225 cnt = 0; 3226 } 3227 } else { 3228 dev_warn(&dd->pdev->dev, 3229 "FTL rebuild complete (%d secs).\n", 3230 jiffies_to_msecs(jiffies - start) / 1000); 3231 mtip_block_initialize(dd); 3232 return 0; 3233 } 3234 ssleep(10); 3235 } while (time_before(jiffies, timeout)); 3236 3237 /* Check for timeout */ 3238 dev_err(&dd->pdev->dev, 3239 "Timed out waiting for FTL rebuild to complete (%d secs).\n", 3240 jiffies_to_msecs(jiffies - start) / 1000); 3241 return -EFAULT; 3242 } 3243 3244 /* 3245 * service thread to issue queued commands 3246 * 3247 * @data Pointer to the driver data structure. 3248 * 3249 * return value 3250 * 0 3251 */ 3252 3253 static int mtip_service_thread(void *data) 3254 { 3255 struct driver_data *dd = (struct driver_data *)data; 3256 unsigned long slot, slot_start, slot_wrap; 3257 unsigned int num_cmd_slots = dd->slot_groups * 32; 3258 struct mtip_port *port = dd->port; 3259 int ret; 3260 3261 while (1) { 3262 /* 3263 * the condition is to check neither an internal command is 3264 * is in progress nor error handling is active 3265 */ 3266 wait_event_interruptible(port->svc_wait, (port->flags) && 3267 !(port->flags & MTIP_PF_PAUSE_IO)); 3268 3269 if (kthread_should_stop()) 3270 goto st_out; 3271 3272 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 3273 3274 /* If I am an orphan, start self cleanup */ 3275 if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) 3276 break; 3277 3278 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 3279 &dd->dd_flag))) 3280 goto st_out; 3281 3282 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 3283 slot = 1; 3284 /* used to restrict the loop to one iteration */ 3285 slot_start = num_cmd_slots; 3286 slot_wrap = 0; 3287 while (1) { 3288 slot = find_next_bit(port->cmds_to_issue, 3289 num_cmd_slots, slot); 3290 if (slot_wrap == 1) { 3291 if ((slot_start >= slot) || 3292 (slot >= num_cmd_slots)) 3293 break; 3294 } 3295 if (unlikely(slot_start == num_cmd_slots)) 3296 slot_start = slot; 3297 3298 if (unlikely(slot == num_cmd_slots)) { 3299 slot = 1; 3300 slot_wrap = 1; 3301 continue; 3302 } 3303 3304 /* Issue the command to the hardware */ 3305 mtip_issue_ncq_command(port, slot); 3306 3307 clear_bit(slot, port->cmds_to_issue); 3308 } 3309 3310 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 3311 } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { 3312 if (mtip_ftl_rebuild_poll(dd) < 0) 3313 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, 3314 &dd->dd_flag); 3315 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); 3316 } 3317 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 3318 3319 if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) 3320 goto st_out; 3321 } 3322 3323 /* wait for pci remove to exit */ 3324 while (1) { 3325 if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag)) 3326 break; 3327 msleep_interruptible(1000); 3328 if (kthread_should_stop()) 3329 goto st_out; 3330 } 3331 3332 while (1) { 3333 ret = mtip_free_orphan(dd); 3334 if (!ret) { 3335 /* NOTE: All data structures are invalid, do not 3336 * access any here */ 3337 return 0; 3338 } 3339 msleep_interruptible(1000); 3340 if (kthread_should_stop()) 3341 goto st_out; 3342 } 3343 st_out: 3344 return 0; 3345 } 3346 3347 /* 3348 * DMA region teardown 3349 * 3350 * @dd Pointer to driver_data structure 3351 * 3352 * return value 3353 * None 3354 */ 3355 static void mtip_dma_free(struct driver_data *dd) 3356 { 3357 int i; 3358 struct mtip_port *port = dd->port; 3359 3360 if (port->block1) 3361 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3362 port->block1, port->block1_dma); 3363 3364 if (port->command_list) { 3365 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 3366 port->command_list, port->command_list_dma); 3367 } 3368 3369 for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { 3370 if (port->commands[i].command) 3371 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3372 port->commands[i].command, 3373 port->commands[i].command_dma); 3374 } 3375 } 3376 3377 /* 3378 * DMA region setup 3379 * 3380 * @dd Pointer to driver_data structure 3381 * 3382 * return value 3383 * -ENOMEM Not enough free DMA region space to initialize driver 3384 */ 3385 static int mtip_dma_alloc(struct driver_data *dd) 3386 { 3387 struct mtip_port *port = dd->port; 3388 int i, rv = 0; 3389 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; 3390 3391 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ 3392 port->block1 = 3393 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3394 &port->block1_dma, GFP_KERNEL); 3395 if (!port->block1) 3396 return -ENOMEM; 3397 memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ); 3398 3399 /* Allocate dma memory for command list */ 3400 port->command_list = 3401 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 3402 &port->command_list_dma, GFP_KERNEL); 3403 if (!port->command_list) { 3404 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3405 port->block1, port->block1_dma); 3406 port->block1 = NULL; 3407 port->block1_dma = 0; 3408 return -ENOMEM; 3409 } 3410 memset(port->command_list, 0, AHCI_CMD_TBL_SZ); 3411 3412 /* Setup all pointers into first DMA region */ 3413 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET; 3414 port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET; 3415 port->identify = port->block1 + AHCI_IDFY_OFFSET; 3416 port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET; 3417 port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET; 3418 port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET; 3419 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; 3420 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; 3421 3422 /* Setup per command SGL DMA region */ 3423 3424 /* Point the command headers at the command tables */ 3425 for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { 3426 port->commands[i].command = 3427 dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3428 &port->commands[i].command_dma, GFP_KERNEL); 3429 if (!port->commands[i].command) { 3430 rv = -ENOMEM; 3431 mtip_dma_free(dd); 3432 return rv; 3433 } 3434 memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ); 3435 3436 port->commands[i].command_header = port->command_list + 3437 (sizeof(struct mtip_cmd_hdr) * i); 3438 port->commands[i].command_header_dma = 3439 dd->port->command_list_dma + 3440 (sizeof(struct mtip_cmd_hdr) * i); 3441 3442 if (host_cap_64) 3443 port->commands[i].command_header->ctbau = 3444 __force_bit2int cpu_to_le32( 3445 (port->commands[i].command_dma >> 16) >> 16); 3446 3447 port->commands[i].command_header->ctba = 3448 __force_bit2int cpu_to_le32( 3449 port->commands[i].command_dma & 0xFFFFFFFF); 3450 3451 sg_init_table(port->commands[i].sg, MTIP_MAX_SG); 3452 3453 /* Mark command as currently inactive */ 3454 atomic_set(&dd->port->commands[i].active, 0); 3455 } 3456 return 0; 3457 } 3458 3459 /* 3460 * Called once for each card. 3461 * 3462 * @dd Pointer to the driver data structure. 3463 * 3464 * return value 3465 * 0 on success, else an error code. 3466 */ 3467 static int mtip_hw_init(struct driver_data *dd) 3468 { 3469 int i; 3470 int rv; 3471 unsigned int num_command_slots; 3472 unsigned long timeout, timetaken; 3473 unsigned char *buf; 3474 struct smart_attr attr242; 3475 3476 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; 3477 3478 mtip_detect_product(dd); 3479 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) { 3480 rv = -EIO; 3481 goto out1; 3482 } 3483 num_command_slots = dd->slot_groups * 32; 3484 3485 hba_setup(dd); 3486 3487 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, 3488 dd->numa_node); 3489 if (!dd->port) { 3490 dev_err(&dd->pdev->dev, 3491 "Memory allocation: port structure\n"); 3492 return -ENOMEM; 3493 } 3494 3495 /* Continue workqueue setup */ 3496 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 3497 dd->work[i].port = dd->port; 3498 3499 /* Enable unaligned IO constraints for some devices */ 3500 if (mtip_device_unaligned_constrained(dd)) 3501 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS; 3502 else 3503 dd->unal_qdepth = 0; 3504 3505 /* Counting semaphore to track command slot usage */ 3506 sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth); 3507 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); 3508 3509 /* Spinlock to prevent concurrent issue */ 3510 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 3511 spin_lock_init(&dd->port->cmd_issue_lock[i]); 3512 3513 /* Set the port mmio base address. */ 3514 dd->port->mmio = dd->mmio + PORT_OFFSET; 3515 dd->port->dd = dd; 3516 3517 /* DMA allocations */ 3518 rv = mtip_dma_alloc(dd); 3519 if (rv < 0) 3520 goto out1; 3521 3522 /* Setup the pointers to the extended s_active and CI registers. */ 3523 for (i = 0; i < dd->slot_groups; i++) { 3524 dd->port->s_active[i] = 3525 dd->port->mmio + i*0x80 + PORT_SCR_ACT; 3526 dd->port->cmd_issue[i] = 3527 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE; 3528 dd->port->completed[i] = 3529 dd->port->mmio + i*0x80 + PORT_SDBV; 3530 } 3531 3532 timetaken = jiffies; 3533 timeout = jiffies + msecs_to_jiffies(30000); 3534 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) && 3535 time_before(jiffies, timeout)) { 3536 mdelay(100); 3537 } 3538 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 3539 timetaken = jiffies - timetaken; 3540 dev_warn(&dd->pdev->dev, 3541 "Surprise removal detected at %u ms\n", 3542 jiffies_to_msecs(timetaken)); 3543 rv = -ENODEV; 3544 goto out2 ; 3545 } 3546 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { 3547 timetaken = jiffies - timetaken; 3548 dev_warn(&dd->pdev->dev, 3549 "Removal detected at %u ms\n", 3550 jiffies_to_msecs(timetaken)); 3551 rv = -EFAULT; 3552 goto out2; 3553 } 3554 3555 /* Conditionally reset the HBA. */ 3556 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) { 3557 if (mtip_hba_reset(dd) < 0) { 3558 dev_err(&dd->pdev->dev, 3559 "Card did not reset within timeout\n"); 3560 rv = -EIO; 3561 goto out2; 3562 } 3563 } else { 3564 /* Clear any pending interrupts on the HBA */ 3565 writel(readl(dd->mmio + HOST_IRQ_STAT), 3566 dd->mmio + HOST_IRQ_STAT); 3567 } 3568 3569 mtip_init_port(dd->port); 3570 mtip_start_port(dd->port); 3571 3572 /* Setup the ISR and enable interrupts. */ 3573 rv = devm_request_irq(&dd->pdev->dev, 3574 dd->pdev->irq, 3575 mtip_irq_handler, 3576 IRQF_SHARED, 3577 dev_driver_string(&dd->pdev->dev), 3578 dd); 3579 3580 if (rv) { 3581 dev_err(&dd->pdev->dev, 3582 "Unable to allocate IRQ %d\n", dd->pdev->irq); 3583 goto out2; 3584 } 3585 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding)); 3586 3587 /* Enable interrupts on the HBA. */ 3588 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3589 dd->mmio + HOST_CTL); 3590 3591 init_timer(&dd->port->cmd_timer); 3592 init_waitqueue_head(&dd->port->svc_wait); 3593 3594 dd->port->cmd_timer.data = (unsigned long int) dd->port; 3595 dd->port->cmd_timer.function = mtip_timeout_function; 3596 mod_timer(&dd->port->cmd_timer, 3597 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); 3598 3599 3600 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 3601 rv = -EFAULT; 3602 goto out3; 3603 } 3604 3605 if (mtip_get_identify(dd->port, NULL) < 0) { 3606 rv = -EFAULT; 3607 goto out3; 3608 } 3609 mtip_dump_identify(dd->port); 3610 3611 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 3612 MTIP_FTL_REBUILD_MAGIC) { 3613 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); 3614 return MTIP_FTL_REBUILD_MAGIC; 3615 } 3616 3617 /* check write protect, over temp and rebuild statuses */ 3618 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 3619 dd->port->log_buf, 3620 dd->port->log_buf_dma, 1); 3621 if (rv) { 3622 dev_warn(&dd->pdev->dev, 3623 "Error in READ LOG EXT (10h) command\n"); 3624 /* non-critical error, don't fail the load */ 3625 } else { 3626 buf = (unsigned char *)dd->port->log_buf; 3627 if (buf[259] & 0x1) { 3628 dev_info(&dd->pdev->dev, 3629 "Write protect bit is set.\n"); 3630 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 3631 } 3632 if (buf[288] == 0xF7) { 3633 dev_info(&dd->pdev->dev, 3634 "Exceeded Tmax, drive in thermal shutdown.\n"); 3635 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 3636 } 3637 if (buf[288] == 0xBF) { 3638 dev_info(&dd->pdev->dev, 3639 "Drive is in security locked state.\n"); 3640 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); 3641 } 3642 } 3643 3644 /* get write protect progess */ 3645 memset(&attr242, 0, sizeof(struct smart_attr)); 3646 if (mtip_get_smart_attr(dd->port, 242, &attr242)) 3647 dev_warn(&dd->pdev->dev, 3648 "Unable to check write protect progress\n"); 3649 else 3650 dev_info(&dd->pdev->dev, 3651 "Write protect progress: %u%% (%u blocks)\n", 3652 attr242.cur, le32_to_cpu(attr242.data)); 3653 return rv; 3654 3655 out3: 3656 del_timer_sync(&dd->port->cmd_timer); 3657 3658 /* Disable interrupts on the HBA. */ 3659 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3660 dd->mmio + HOST_CTL); 3661 3662 /* Release the IRQ. */ 3663 irq_set_affinity_hint(dd->pdev->irq, NULL); 3664 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3665 3666 out2: 3667 mtip_deinit_port(dd->port); 3668 mtip_dma_free(dd); 3669 3670 out1: 3671 /* Free the memory allocated for the for structure. */ 3672 kfree(dd->port); 3673 3674 return rv; 3675 } 3676 3677 /* 3678 * Called to deinitialize an interface. 3679 * 3680 * @dd Pointer to the driver data structure. 3681 * 3682 * return value 3683 * 0 3684 */ 3685 static int mtip_hw_exit(struct driver_data *dd) 3686 { 3687 /* 3688 * Send standby immediate (E0h) to the drive so that it 3689 * saves its state. 3690 */ 3691 if (!dd->sr) { 3692 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && 3693 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) 3694 if (mtip_standby_immediate(dd->port)) 3695 dev_warn(&dd->pdev->dev, 3696 "STANDBY IMMEDIATE failed\n"); 3697 3698 /* de-initialize the port. */ 3699 mtip_deinit_port(dd->port); 3700 3701 /* Disable interrupts on the HBA. */ 3702 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3703 dd->mmio + HOST_CTL); 3704 } 3705 3706 del_timer_sync(&dd->port->cmd_timer); 3707 3708 /* Release the IRQ. */ 3709 irq_set_affinity_hint(dd->pdev->irq, NULL); 3710 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3711 3712 /* Free dma regions */ 3713 mtip_dma_free(dd); 3714 3715 /* Free the memory allocated for the for structure. */ 3716 kfree(dd->port); 3717 dd->port = NULL; 3718 3719 return 0; 3720 } 3721 3722 /* 3723 * Issue a Standby Immediate command to the device. 3724 * 3725 * This function is called by the Block Layer just before the 3726 * system powers off during a shutdown. 3727 * 3728 * @dd Pointer to the driver data structure. 3729 * 3730 * return value 3731 * 0 3732 */ 3733 static int mtip_hw_shutdown(struct driver_data *dd) 3734 { 3735 /* 3736 * Send standby immediate (E0h) to the drive so that it 3737 * saves its state. 3738 */ 3739 if (!dd->sr && dd->port) 3740 mtip_standby_immediate(dd->port); 3741 3742 return 0; 3743 } 3744 3745 /* 3746 * Suspend function 3747 * 3748 * This function is called by the Block Layer just before the 3749 * system hibernates. 3750 * 3751 * @dd Pointer to the driver data structure. 3752 * 3753 * return value 3754 * 0 Suspend was successful 3755 * -EFAULT Suspend was not successful 3756 */ 3757 static int mtip_hw_suspend(struct driver_data *dd) 3758 { 3759 /* 3760 * Send standby immediate (E0h) to the drive 3761 * so that it saves its state. 3762 */ 3763 if (mtip_standby_immediate(dd->port) != 0) { 3764 dev_err(&dd->pdev->dev, 3765 "Failed standby-immediate command\n"); 3766 return -EFAULT; 3767 } 3768 3769 /* Disable interrupts on the HBA.*/ 3770 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3771 dd->mmio + HOST_CTL); 3772 mtip_deinit_port(dd->port); 3773 3774 return 0; 3775 } 3776 3777 /* 3778 * Resume function 3779 * 3780 * This function is called by the Block Layer as the 3781 * system resumes. 3782 * 3783 * @dd Pointer to the driver data structure. 3784 * 3785 * return value 3786 * 0 Resume was successful 3787 * -EFAULT Resume was not successful 3788 */ 3789 static int mtip_hw_resume(struct driver_data *dd) 3790 { 3791 /* Perform any needed hardware setup steps */ 3792 hba_setup(dd); 3793 3794 /* Reset the HBA */ 3795 if (mtip_hba_reset(dd) != 0) { 3796 dev_err(&dd->pdev->dev, 3797 "Unable to reset the HBA\n"); 3798 return -EFAULT; 3799 } 3800 3801 /* 3802 * Enable the port, DMA engine, and FIS reception specific 3803 * h/w in controller. 3804 */ 3805 mtip_init_port(dd->port); 3806 mtip_start_port(dd->port); 3807 3808 /* Enable interrupts on the HBA.*/ 3809 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3810 dd->mmio + HOST_CTL); 3811 3812 return 0; 3813 } 3814 3815 /* 3816 * Helper function for reusing disk name 3817 * upon hot insertion. 3818 */ 3819 static int rssd_disk_name_format(char *prefix, 3820 int index, 3821 char *buf, 3822 int buflen) 3823 { 3824 const int base = 'z' - 'a' + 1; 3825 char *begin = buf + strlen(prefix); 3826 char *end = buf + buflen; 3827 char *p; 3828 int unit; 3829 3830 p = end - 1; 3831 *p = '\0'; 3832 unit = base; 3833 do { 3834 if (p == begin) 3835 return -EINVAL; 3836 *--p = 'a' + (index % unit); 3837 index = (index / unit) - 1; 3838 } while (index >= 0); 3839 3840 memmove(begin, p, end - p); 3841 memcpy(buf, prefix, strlen(prefix)); 3842 3843 return 0; 3844 } 3845 3846 /* 3847 * Block layer IOCTL handler. 3848 * 3849 * @dev Pointer to the block_device structure. 3850 * @mode ignored 3851 * @cmd IOCTL command passed from the user application. 3852 * @arg Argument passed from the user application. 3853 * 3854 * return value 3855 * 0 IOCTL completed successfully. 3856 * -ENOTTY IOCTL not supported or invalid driver data 3857 * structure pointer. 3858 */ 3859 static int mtip_block_ioctl(struct block_device *dev, 3860 fmode_t mode, 3861 unsigned cmd, 3862 unsigned long arg) 3863 { 3864 struct driver_data *dd = dev->bd_disk->private_data; 3865 3866 if (!capable(CAP_SYS_ADMIN)) 3867 return -EACCES; 3868 3869 if (!dd) 3870 return -ENOTTY; 3871 3872 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3873 return -ENOTTY; 3874 3875 switch (cmd) { 3876 case BLKFLSBUF: 3877 return -ENOTTY; 3878 default: 3879 return mtip_hw_ioctl(dd, cmd, arg); 3880 } 3881 } 3882 3883 #ifdef CONFIG_COMPAT 3884 /* 3885 * Block layer compat IOCTL handler. 3886 * 3887 * @dev Pointer to the block_device structure. 3888 * @mode ignored 3889 * @cmd IOCTL command passed from the user application. 3890 * @arg Argument passed from the user application. 3891 * 3892 * return value 3893 * 0 IOCTL completed successfully. 3894 * -ENOTTY IOCTL not supported or invalid driver data 3895 * structure pointer. 3896 */ 3897 static int mtip_block_compat_ioctl(struct block_device *dev, 3898 fmode_t mode, 3899 unsigned cmd, 3900 unsigned long arg) 3901 { 3902 struct driver_data *dd = dev->bd_disk->private_data; 3903 3904 if (!capable(CAP_SYS_ADMIN)) 3905 return -EACCES; 3906 3907 if (!dd) 3908 return -ENOTTY; 3909 3910 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3911 return -ENOTTY; 3912 3913 switch (cmd) { 3914 case BLKFLSBUF: 3915 return -ENOTTY; 3916 case HDIO_DRIVE_TASKFILE: { 3917 struct mtip_compat_ide_task_request_s __user *compat_req_task; 3918 ide_task_request_t req_task; 3919 int compat_tasksize, outtotal, ret; 3920 3921 compat_tasksize = 3922 sizeof(struct mtip_compat_ide_task_request_s); 3923 3924 compat_req_task = 3925 (struct mtip_compat_ide_task_request_s __user *) arg; 3926 3927 if (copy_from_user(&req_task, (void __user *) arg, 3928 compat_tasksize - (2 * sizeof(compat_long_t)))) 3929 return -EFAULT; 3930 3931 if (get_user(req_task.out_size, &compat_req_task->out_size)) 3932 return -EFAULT; 3933 3934 if (get_user(req_task.in_size, &compat_req_task->in_size)) 3935 return -EFAULT; 3936 3937 outtotal = sizeof(struct mtip_compat_ide_task_request_s); 3938 3939 ret = exec_drive_taskfile(dd, (void __user *) arg, 3940 &req_task, outtotal); 3941 3942 if (copy_to_user((void __user *) arg, &req_task, 3943 compat_tasksize - 3944 (2 * sizeof(compat_long_t)))) 3945 return -EFAULT; 3946 3947 if (put_user(req_task.out_size, &compat_req_task->out_size)) 3948 return -EFAULT; 3949 3950 if (put_user(req_task.in_size, &compat_req_task->in_size)) 3951 return -EFAULT; 3952 3953 return ret; 3954 } 3955 default: 3956 return mtip_hw_ioctl(dd, cmd, arg); 3957 } 3958 } 3959 #endif 3960 3961 /* 3962 * Obtain the geometry of the device. 3963 * 3964 * You may think that this function is obsolete, but some applications, 3965 * fdisk for example still used CHS values. This function describes the 3966 * device as having 224 heads and 56 sectors per cylinder. These values are 3967 * chosen so that each cylinder is aligned on a 4KB boundary. Since a 3968 * partition is described in terms of a start and end cylinder this means 3969 * that each partition is also 4KB aligned. Non-aligned partitions adversely 3970 * affects performance. 3971 * 3972 * @dev Pointer to the block_device strucutre. 3973 * @geo Pointer to a hd_geometry structure. 3974 * 3975 * return value 3976 * 0 Operation completed successfully. 3977 * -ENOTTY An error occurred while reading the drive capacity. 3978 */ 3979 static int mtip_block_getgeo(struct block_device *dev, 3980 struct hd_geometry *geo) 3981 { 3982 struct driver_data *dd = dev->bd_disk->private_data; 3983 sector_t capacity; 3984 3985 if (!dd) 3986 return -ENOTTY; 3987 3988 if (!(mtip_hw_get_capacity(dd, &capacity))) { 3989 dev_warn(&dd->pdev->dev, 3990 "Could not get drive capacity.\n"); 3991 return -ENOTTY; 3992 } 3993 3994 geo->heads = 224; 3995 geo->sectors = 56; 3996 sector_div(capacity, (geo->heads * geo->sectors)); 3997 geo->cylinders = capacity; 3998 return 0; 3999 } 4000 4001 /* 4002 * Block device operation function. 4003 * 4004 * This structure contains pointers to the functions required by the block 4005 * layer. 4006 */ 4007 static const struct block_device_operations mtip_block_ops = { 4008 .ioctl = mtip_block_ioctl, 4009 #ifdef CONFIG_COMPAT 4010 .compat_ioctl = mtip_block_compat_ioctl, 4011 #endif 4012 .getgeo = mtip_block_getgeo, 4013 .owner = THIS_MODULE 4014 }; 4015 4016 /* 4017 * Block layer make request function. 4018 * 4019 * This function is called by the kernel to process a BIO for 4020 * the P320 device. 4021 * 4022 * @queue Pointer to the request queue. Unused other than to obtain 4023 * the driver data structure. 4024 * @bio Pointer to the BIO. 4025 * 4026 */ 4027 static void mtip_make_request(struct request_queue *queue, struct bio *bio) 4028 { 4029 struct driver_data *dd = queue->queuedata; 4030 struct scatterlist *sg; 4031 struct bio_vec bvec; 4032 struct bvec_iter iter; 4033 int nents = 0; 4034 int tag = 0, unaligned = 0; 4035 4036 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 4037 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 4038 &dd->dd_flag))) { 4039 bio_endio(bio, -ENXIO); 4040 return; 4041 } 4042 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { 4043 bio_endio(bio, -ENODATA); 4044 return; 4045 } 4046 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, 4047 &dd->dd_flag) && 4048 bio_data_dir(bio))) { 4049 bio_endio(bio, -ENODATA); 4050 return; 4051 } 4052 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { 4053 bio_endio(bio, -ENODATA); 4054 return; 4055 } 4056 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { 4057 bio_endio(bio, -ENXIO); 4058 return; 4059 } 4060 } 4061 4062 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 4063 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, 4064 bio_sectors(bio))); 4065 return; 4066 } 4067 4068 if (unlikely(!bio_has_data(bio))) { 4069 blk_queue_flush(queue, 0); 4070 bio_endio(bio, 0); 4071 return; 4072 } 4073 4074 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4075 dd->unal_qdepth) { 4076 if (bio->bi_iter.bi_sector % 8 != 0) 4077 /* Unaligned on 4k boundaries */ 4078 unaligned = 1; 4079 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4080 unaligned = 1; 4081 } 4082 4083 sg = mtip_hw_get_scatterlist(dd, &tag, unaligned); 4084 if (likely(sg != NULL)) { 4085 blk_queue_bounce(queue, &bio); 4086 4087 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { 4088 dev_warn(&dd->pdev->dev, 4089 "Maximum number of SGL entries exceeded\n"); 4090 bio_io_error(bio); 4091 mtip_hw_release_scatterlist(dd, tag, unaligned); 4092 return; 4093 } 4094 4095 /* Create the scatter list for this bio. */ 4096 bio_for_each_segment(bvec, bio, iter) { 4097 sg_set_page(&sg[nents], 4098 bvec.bv_page, 4099 bvec.bv_len, 4100 bvec.bv_offset); 4101 nents++; 4102 } 4103 4104 /* Issue the read/write. */ 4105 mtip_hw_submit_io(dd, 4106 bio->bi_iter.bi_sector, 4107 bio_sectors(bio), 4108 nents, 4109 tag, 4110 bio_endio, 4111 bio, 4112 bio_data_dir(bio), 4113 unaligned); 4114 } else 4115 bio_io_error(bio); 4116 } 4117 4118 /* 4119 * Block layer initialization function. 4120 * 4121 * This function is called once by the PCI layer for each P320 4122 * device that is connected to the system. 4123 * 4124 * @dd Pointer to the driver data structure. 4125 * 4126 * return value 4127 * 0 on success else an error code. 4128 */ 4129 static int mtip_block_initialize(struct driver_data *dd) 4130 { 4131 int rv = 0, wait_for_rebuild = 0; 4132 sector_t capacity; 4133 unsigned int index = 0; 4134 struct kobject *kobj; 4135 unsigned char thd_name[16]; 4136 4137 if (dd->disk) 4138 goto skip_create_disk; /* hw init done, before rebuild */ 4139 4140 /* Initialize the protocol layer. */ 4141 wait_for_rebuild = mtip_hw_init(dd); 4142 if (wait_for_rebuild < 0) { 4143 dev_err(&dd->pdev->dev, 4144 "Protocol layer initialization failed\n"); 4145 rv = -EINVAL; 4146 goto protocol_init_error; 4147 } 4148 4149 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); 4150 if (dd->disk == NULL) { 4151 dev_err(&dd->pdev->dev, 4152 "Unable to allocate gendisk structure\n"); 4153 rv = -EINVAL; 4154 goto alloc_disk_error; 4155 } 4156 4157 /* Generate the disk name, implemented same as in sd.c */ 4158 do { 4159 if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) 4160 goto ida_get_error; 4161 4162 spin_lock(&rssd_index_lock); 4163 rv = ida_get_new(&rssd_index_ida, &index); 4164 spin_unlock(&rssd_index_lock); 4165 } while (rv == -EAGAIN); 4166 4167 if (rv) 4168 goto ida_get_error; 4169 4170 rv = rssd_disk_name_format("rssd", 4171 index, 4172 dd->disk->disk_name, 4173 DISK_NAME_LEN); 4174 if (rv) 4175 goto disk_index_error; 4176 4177 dd->disk->driverfs_dev = &dd->pdev->dev; 4178 dd->disk->major = dd->major; 4179 dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS; 4180 dd->disk->fops = &mtip_block_ops; 4181 dd->disk->private_data = dd; 4182 dd->index = index; 4183 4184 mtip_hw_debugfs_init(dd); 4185 4186 /* 4187 * if rebuild pending, start the service thread, and delay the block 4188 * queue creation and add_disk() 4189 */ 4190 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 4191 goto start_service_thread; 4192 4193 skip_create_disk: 4194 /* Allocate the request queue. */ 4195 dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node); 4196 if (dd->queue == NULL) { 4197 dev_err(&dd->pdev->dev, 4198 "Unable to allocate request queue\n"); 4199 rv = -ENOMEM; 4200 goto block_queue_alloc_init_error; 4201 } 4202 4203 /* Attach our request function to the request queue. */ 4204 blk_queue_make_request(dd->queue, mtip_make_request); 4205 4206 dd->disk->queue = dd->queue; 4207 dd->queue->queuedata = dd; 4208 4209 /* Set device limits. */ 4210 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 4211 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 4212 blk_queue_physical_block_size(dd->queue, 4096); 4213 blk_queue_max_hw_sectors(dd->queue, 0xffff); 4214 blk_queue_max_segment_size(dd->queue, 0x400000); 4215 blk_queue_io_min(dd->queue, 4096); 4216 4217 /* 4218 * write back cache is not supported in the device. FUA depends on 4219 * write back cache support, hence setting flush support to zero. 4220 */ 4221 blk_queue_flush(dd->queue, 0); 4222 4223 /* Signal trim support */ 4224 if (dd->trim_supp == true) { 4225 set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); 4226 dd->queue->limits.discard_granularity = 4096; 4227 blk_queue_max_discard_sectors(dd->queue, 4228 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); 4229 dd->queue->limits.discard_zeroes_data = 0; 4230 } 4231 4232 /* Set the capacity of the device in 512 byte sectors. */ 4233 if (!(mtip_hw_get_capacity(dd, &capacity))) { 4234 dev_warn(&dd->pdev->dev, 4235 "Could not read drive capacity\n"); 4236 rv = -EIO; 4237 goto read_capacity_error; 4238 } 4239 set_capacity(dd->disk, capacity); 4240 4241 /* Enable the block device and add it to /dev */ 4242 add_disk(dd->disk); 4243 4244 dd->bdev = bdget_disk(dd->disk, 0); 4245 /* 4246 * Now that the disk is active, initialize any sysfs attributes 4247 * managed by the protocol layer. 4248 */ 4249 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 4250 if (kobj) { 4251 mtip_hw_sysfs_init(dd, kobj); 4252 kobject_put(kobj); 4253 } 4254 4255 if (dd->mtip_svc_handler) { 4256 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4257 return rv; /* service thread created for handling rebuild */ 4258 } 4259 4260 start_service_thread: 4261 sprintf(thd_name, "mtip_svc_thd_%02d", index); 4262 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 4263 dd, dd->numa_node, "%s", 4264 thd_name); 4265 4266 if (IS_ERR(dd->mtip_svc_handler)) { 4267 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 4268 dd->mtip_svc_handler = NULL; 4269 rv = -EFAULT; 4270 goto kthread_run_error; 4271 } 4272 wake_up_process(dd->mtip_svc_handler); 4273 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 4274 rv = wait_for_rebuild; 4275 4276 return rv; 4277 4278 kthread_run_error: 4279 bdput(dd->bdev); 4280 dd->bdev = NULL; 4281 4282 /* Delete our gendisk. This also removes the device from /dev */ 4283 del_gendisk(dd->disk); 4284 4285 read_capacity_error: 4286 blk_cleanup_queue(dd->queue); 4287 4288 block_queue_alloc_init_error: 4289 mtip_hw_debugfs_exit(dd); 4290 disk_index_error: 4291 spin_lock(&rssd_index_lock); 4292 ida_remove(&rssd_index_ida, index); 4293 spin_unlock(&rssd_index_lock); 4294 4295 ida_get_error: 4296 put_disk(dd->disk); 4297 4298 alloc_disk_error: 4299 mtip_hw_exit(dd); /* De-initialize the protocol layer. */ 4300 4301 protocol_init_error: 4302 return rv; 4303 } 4304 4305 /* 4306 * Block layer deinitialization function. 4307 * 4308 * Called by the PCI layer as each P320 device is removed. 4309 * 4310 * @dd Pointer to the driver data structure. 4311 * 4312 * return value 4313 * 0 4314 */ 4315 static int mtip_block_remove(struct driver_data *dd) 4316 { 4317 struct kobject *kobj; 4318 4319 if (!dd->sr) { 4320 mtip_hw_debugfs_exit(dd); 4321 4322 if (dd->mtip_svc_handler) { 4323 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); 4324 wake_up_interruptible(&dd->port->svc_wait); 4325 kthread_stop(dd->mtip_svc_handler); 4326 } 4327 4328 /* Clean up the sysfs attributes, if created */ 4329 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { 4330 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 4331 if (kobj) { 4332 mtip_hw_sysfs_exit(dd, kobj); 4333 kobject_put(kobj); 4334 } 4335 } 4336 /* 4337 * Delete our gendisk structure. This also removes the device 4338 * from /dev 4339 */ 4340 if (dd->bdev) { 4341 bdput(dd->bdev); 4342 dd->bdev = NULL; 4343 } 4344 if (dd->disk) { 4345 if (dd->disk->queue) { 4346 del_gendisk(dd->disk); 4347 blk_cleanup_queue(dd->queue); 4348 dd->queue = NULL; 4349 } else 4350 put_disk(dd->disk); 4351 } 4352 dd->disk = NULL; 4353 4354 spin_lock(&rssd_index_lock); 4355 ida_remove(&rssd_index_ida, dd->index); 4356 spin_unlock(&rssd_index_lock); 4357 } else { 4358 dev_info(&dd->pdev->dev, "device %s surprise removal\n", 4359 dd->disk->disk_name); 4360 } 4361 4362 /* De-initialize the protocol layer. */ 4363 mtip_hw_exit(dd); 4364 4365 return 0; 4366 } 4367 4368 /* 4369 * Function called by the PCI layer when just before the 4370 * machine shuts down. 4371 * 4372 * If a protocol layer shutdown function is present it will be called 4373 * by this function. 4374 * 4375 * @dd Pointer to the driver data structure. 4376 * 4377 * return value 4378 * 0 4379 */ 4380 static int mtip_block_shutdown(struct driver_data *dd) 4381 { 4382 /* Delete our gendisk structure, and cleanup the blk queue. */ 4383 if (dd->disk) { 4384 dev_info(&dd->pdev->dev, 4385 "Shutting down %s ...\n", dd->disk->disk_name); 4386 4387 if (dd->disk->queue) { 4388 del_gendisk(dd->disk); 4389 blk_cleanup_queue(dd->queue); 4390 } else 4391 put_disk(dd->disk); 4392 dd->disk = NULL; 4393 dd->queue = NULL; 4394 } 4395 4396 spin_lock(&rssd_index_lock); 4397 ida_remove(&rssd_index_ida, dd->index); 4398 spin_unlock(&rssd_index_lock); 4399 4400 mtip_hw_shutdown(dd); 4401 return 0; 4402 } 4403 4404 static int mtip_block_suspend(struct driver_data *dd) 4405 { 4406 dev_info(&dd->pdev->dev, 4407 "Suspending %s ...\n", dd->disk->disk_name); 4408 mtip_hw_suspend(dd); 4409 return 0; 4410 } 4411 4412 static int mtip_block_resume(struct driver_data *dd) 4413 { 4414 dev_info(&dd->pdev->dev, "Resuming %s ...\n", 4415 dd->disk->disk_name); 4416 mtip_hw_resume(dd); 4417 return 0; 4418 } 4419 4420 static void drop_cpu(int cpu) 4421 { 4422 cpu_use[cpu]--; 4423 } 4424 4425 static int get_least_used_cpu_on_node(int node) 4426 { 4427 int cpu, least_used_cpu, least_cnt; 4428 const struct cpumask *node_mask; 4429 4430 node_mask = cpumask_of_node(node); 4431 least_used_cpu = cpumask_first(node_mask); 4432 least_cnt = cpu_use[least_used_cpu]; 4433 cpu = least_used_cpu; 4434 4435 for_each_cpu(cpu, node_mask) { 4436 if (cpu_use[cpu] < least_cnt) { 4437 least_used_cpu = cpu; 4438 least_cnt = cpu_use[cpu]; 4439 } 4440 } 4441 cpu_use[least_used_cpu]++; 4442 return least_used_cpu; 4443 } 4444 4445 /* Helper for selecting a node in round robin mode */ 4446 static inline int mtip_get_next_rr_node(void) 4447 { 4448 static int next_node = -1; 4449 4450 if (next_node == -1) { 4451 next_node = first_online_node; 4452 return next_node; 4453 } 4454 4455 next_node = next_online_node(next_node); 4456 if (next_node == MAX_NUMNODES) 4457 next_node = first_online_node; 4458 return next_node; 4459 } 4460 4461 static DEFINE_HANDLER(0); 4462 static DEFINE_HANDLER(1); 4463 static DEFINE_HANDLER(2); 4464 static DEFINE_HANDLER(3); 4465 static DEFINE_HANDLER(4); 4466 static DEFINE_HANDLER(5); 4467 static DEFINE_HANDLER(6); 4468 static DEFINE_HANDLER(7); 4469 4470 /* 4471 * Called for each supported PCI device detected. 4472 * 4473 * This function allocates the private data structure, enables the 4474 * PCI device and then calls the block layer initialization function. 4475 * 4476 * return value 4477 * 0 on success else an error code. 4478 */ 4479 static int mtip_pci_probe(struct pci_dev *pdev, 4480 const struct pci_device_id *ent) 4481 { 4482 int rv = 0; 4483 struct driver_data *dd = NULL; 4484 char cpu_list[256]; 4485 const struct cpumask *node_mask; 4486 int cpu, i = 0, j = 0; 4487 int my_node = NUMA_NO_NODE; 4488 unsigned long flags; 4489 4490 /* Allocate memory for this devices private data. */ 4491 my_node = pcibus_to_node(pdev->bus); 4492 if (my_node != NUMA_NO_NODE) { 4493 if (!node_online(my_node)) 4494 my_node = mtip_get_next_rr_node(); 4495 } else { 4496 dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n"); 4497 my_node = mtip_get_next_rr_node(); 4498 } 4499 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", 4500 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), 4501 cpu_to_node(smp_processor_id()), smp_processor_id()); 4502 4503 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); 4504 if (dd == NULL) { 4505 dev_err(&pdev->dev, 4506 "Unable to allocate memory for driver data\n"); 4507 return -ENOMEM; 4508 } 4509 4510 /* Attach the private data to this PCI device. */ 4511 pci_set_drvdata(pdev, dd); 4512 4513 rv = pcim_enable_device(pdev); 4514 if (rv < 0) { 4515 dev_err(&pdev->dev, "Unable to enable device\n"); 4516 goto iomap_err; 4517 } 4518 4519 /* Map BAR5 to memory. */ 4520 rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME); 4521 if (rv < 0) { 4522 dev_err(&pdev->dev, "Unable to map regions\n"); 4523 goto iomap_err; 4524 } 4525 4526 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4527 rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4528 4529 if (rv) { 4530 rv = pci_set_consistent_dma_mask(pdev, 4531 DMA_BIT_MASK(32)); 4532 if (rv) { 4533 dev_warn(&pdev->dev, 4534 "64-bit DMA enable failed\n"); 4535 goto setmask_err; 4536 } 4537 } 4538 } 4539 4540 /* Copy the info we may need later into the private data structure. */ 4541 dd->major = mtip_major; 4542 dd->instance = instance; 4543 dd->pdev = pdev; 4544 dd->numa_node = my_node; 4545 4546 INIT_LIST_HEAD(&dd->online_list); 4547 INIT_LIST_HEAD(&dd->remove_list); 4548 4549 memset(dd->workq_name, 0, 32); 4550 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); 4551 4552 dd->isr_workq = create_workqueue(dd->workq_name); 4553 if (!dd->isr_workq) { 4554 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4555 rv = -ENOMEM; 4556 goto block_initialize_err; 4557 } 4558 4559 memset(cpu_list, 0, sizeof(cpu_list)); 4560 4561 node_mask = cpumask_of_node(dd->numa_node); 4562 if (!cpumask_empty(node_mask)) { 4563 for_each_cpu(cpu, node_mask) 4564 { 4565 snprintf(&cpu_list[j], 256 - j, "%d ", cpu); 4566 j = strlen(cpu_list); 4567 } 4568 4569 dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n", 4570 dd->numa_node, 4571 topology_physical_package_id(cpumask_first(node_mask)), 4572 nr_cpus_node(dd->numa_node), 4573 cpu_list); 4574 } else 4575 dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n"); 4576 4577 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node); 4578 dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n", 4579 cpu_to_node(dd->isr_binding), dd->isr_binding); 4580 4581 /* first worker context always runs in ISR */ 4582 dd->work[0].cpu_binding = dd->isr_binding; 4583 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4584 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4585 dd->work[3].cpu_binding = dd->work[0].cpu_binding; 4586 dd->work[4].cpu_binding = dd->work[1].cpu_binding; 4587 dd->work[5].cpu_binding = dd->work[2].cpu_binding; 4588 dd->work[6].cpu_binding = dd->work[2].cpu_binding; 4589 dd->work[7].cpu_binding = dd->work[1].cpu_binding; 4590 4591 /* Log the bindings */ 4592 for_each_present_cpu(cpu) { 4593 memset(cpu_list, 0, sizeof(cpu_list)); 4594 for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) { 4595 if (dd->work[i].cpu_binding == cpu) { 4596 snprintf(&cpu_list[j], 256 - j, "%d ", i); 4597 j = strlen(cpu_list); 4598 } 4599 } 4600 if (j) 4601 dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list); 4602 } 4603 4604 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); 4605 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); 4606 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); 4607 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); 4608 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); 4609 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); 4610 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); 4611 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4612 4613 pci_set_master(pdev); 4614 rv = pci_enable_msi(pdev); 4615 if (rv) { 4616 dev_warn(&pdev->dev, 4617 "Unable to enable MSI interrupt.\n"); 4618 goto block_initialize_err; 4619 } 4620 4621 /* Initialize the block layer. */ 4622 rv = mtip_block_initialize(dd); 4623 if (rv < 0) { 4624 dev_err(&pdev->dev, 4625 "Unable to initialize block layer\n"); 4626 goto block_initialize_err; 4627 } 4628 4629 /* 4630 * Increment the instance count so that each device has a unique 4631 * instance number. 4632 */ 4633 instance++; 4634 if (rv != MTIP_FTL_REBUILD_MAGIC) 4635 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4636 else 4637 rv = 0; /* device in rebuild state, return 0 from probe */ 4638 4639 /* Add to online list even if in ftl rebuild */ 4640 spin_lock_irqsave(&dev_lock, flags); 4641 list_add(&dd->online_list, &online_list); 4642 spin_unlock_irqrestore(&dev_lock, flags); 4643 4644 goto done; 4645 4646 block_initialize_err: 4647 pci_disable_msi(pdev); 4648 if (dd->isr_workq) { 4649 flush_workqueue(dd->isr_workq); 4650 destroy_workqueue(dd->isr_workq); 4651 drop_cpu(dd->work[0].cpu_binding); 4652 drop_cpu(dd->work[1].cpu_binding); 4653 drop_cpu(dd->work[2].cpu_binding); 4654 } 4655 setmask_err: 4656 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4657 4658 iomap_err: 4659 kfree(dd); 4660 pci_set_drvdata(pdev, NULL); 4661 return rv; 4662 done: 4663 return rv; 4664 } 4665 4666 /* 4667 * Called for each probed device when the device is removed or the 4668 * driver is unloaded. 4669 * 4670 * return value 4671 * None 4672 */ 4673 static void mtip_pci_remove(struct pci_dev *pdev) 4674 { 4675 struct driver_data *dd = pci_get_drvdata(pdev); 4676 unsigned long flags, to; 4677 4678 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4679 4680 spin_lock_irqsave(&dev_lock, flags); 4681 list_del_init(&dd->online_list); 4682 list_add(&dd->remove_list, &removing_list); 4683 spin_unlock_irqrestore(&dev_lock, flags); 4684 4685 mtip_check_surprise_removal(pdev); 4686 synchronize_irq(dd->pdev->irq); 4687 4688 /* Spin until workers are done */ 4689 to = jiffies + msecs_to_jiffies(4000); 4690 do { 4691 msleep(20); 4692 } while (atomic_read(&dd->irq_workers_active) != 0 && 4693 time_before(jiffies, to)); 4694 4695 if (atomic_read(&dd->irq_workers_active) != 0) { 4696 dev_warn(&dd->pdev->dev, 4697 "Completion workers still active!\n"); 4698 } 4699 /* Cleanup the outstanding commands */ 4700 mtip_command_cleanup(dd); 4701 4702 /* Clean up the block layer. */ 4703 mtip_block_remove(dd); 4704 4705 if (dd->isr_workq) { 4706 flush_workqueue(dd->isr_workq); 4707 destroy_workqueue(dd->isr_workq); 4708 drop_cpu(dd->work[0].cpu_binding); 4709 drop_cpu(dd->work[1].cpu_binding); 4710 drop_cpu(dd->work[2].cpu_binding); 4711 } 4712 4713 pci_disable_msi(pdev); 4714 4715 spin_lock_irqsave(&dev_lock, flags); 4716 list_del_init(&dd->remove_list); 4717 spin_unlock_irqrestore(&dev_lock, flags); 4718 4719 if (!dd->sr) 4720 kfree(dd); 4721 else 4722 set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag); 4723 4724 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4725 pci_set_drvdata(pdev, NULL); 4726 pci_dev_put(pdev); 4727 4728 } 4729 4730 /* 4731 * Called for each probed device when the device is suspended. 4732 * 4733 * return value 4734 * 0 Success 4735 * <0 Error 4736 */ 4737 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 4738 { 4739 int rv = 0; 4740 struct driver_data *dd = pci_get_drvdata(pdev); 4741 4742 if (!dd) { 4743 dev_err(&pdev->dev, 4744 "Driver private datastructure is NULL\n"); 4745 return -EFAULT; 4746 } 4747 4748 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4749 4750 /* Disable ports & interrupts then send standby immediate */ 4751 rv = mtip_block_suspend(dd); 4752 if (rv < 0) { 4753 dev_err(&pdev->dev, 4754 "Failed to suspend controller\n"); 4755 return rv; 4756 } 4757 4758 /* 4759 * Save the pci config space to pdev structure & 4760 * disable the device 4761 */ 4762 pci_save_state(pdev); 4763 pci_disable_device(pdev); 4764 4765 /* Move to Low power state*/ 4766 pci_set_power_state(pdev, PCI_D3hot); 4767 4768 return rv; 4769 } 4770 4771 /* 4772 * Called for each probed device when the device is resumed. 4773 * 4774 * return value 4775 * 0 Success 4776 * <0 Error 4777 */ 4778 static int mtip_pci_resume(struct pci_dev *pdev) 4779 { 4780 int rv = 0; 4781 struct driver_data *dd; 4782 4783 dd = pci_get_drvdata(pdev); 4784 if (!dd) { 4785 dev_err(&pdev->dev, 4786 "Driver private datastructure is NULL\n"); 4787 return -EFAULT; 4788 } 4789 4790 /* Move the device to active State */ 4791 pci_set_power_state(pdev, PCI_D0); 4792 4793 /* Restore PCI configuration space */ 4794 pci_restore_state(pdev); 4795 4796 /* Enable the PCI device*/ 4797 rv = pcim_enable_device(pdev); 4798 if (rv < 0) { 4799 dev_err(&pdev->dev, 4800 "Failed to enable card during resume\n"); 4801 goto err; 4802 } 4803 pci_set_master(pdev); 4804 4805 /* 4806 * Calls hbaReset, initPort, & startPort function 4807 * then enables interrupts 4808 */ 4809 rv = mtip_block_resume(dd); 4810 if (rv < 0) 4811 dev_err(&pdev->dev, "Unable to resume\n"); 4812 4813 err: 4814 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4815 4816 return rv; 4817 } 4818 4819 /* 4820 * Shutdown routine 4821 * 4822 * return value 4823 * None 4824 */ 4825 static void mtip_pci_shutdown(struct pci_dev *pdev) 4826 { 4827 struct driver_data *dd = pci_get_drvdata(pdev); 4828 if (dd) 4829 mtip_block_shutdown(dd); 4830 } 4831 4832 /* Table of device ids supported by this driver. */ 4833 static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { 4834 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, 4835 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, 4836 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, 4837 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, 4838 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, 4839 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, 4840 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, 4841 { 0 } 4842 }; 4843 4844 /* Structure that describes the PCI driver functions. */ 4845 static struct pci_driver mtip_pci_driver = { 4846 .name = MTIP_DRV_NAME, 4847 .id_table = mtip_pci_tbl, 4848 .probe = mtip_pci_probe, 4849 .remove = mtip_pci_remove, 4850 .suspend = mtip_pci_suspend, 4851 .resume = mtip_pci_resume, 4852 .shutdown = mtip_pci_shutdown, 4853 }; 4854 4855 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl); 4856 4857 /* 4858 * Module initialization function. 4859 * 4860 * Called once when the module is loaded. This function allocates a major 4861 * block device number to the Cyclone devices and registers the PCI layer 4862 * of the driver. 4863 * 4864 * Return value 4865 * 0 on success else error code. 4866 */ 4867 static int __init mtip_init(void) 4868 { 4869 int error; 4870 4871 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4872 4873 spin_lock_init(&dev_lock); 4874 4875 INIT_LIST_HEAD(&online_list); 4876 INIT_LIST_HEAD(&removing_list); 4877 4878 /* Allocate a major block device number to use with this driver. */ 4879 error = register_blkdev(0, MTIP_DRV_NAME); 4880 if (error <= 0) { 4881 pr_err("Unable to register block device (%d)\n", 4882 error); 4883 return -EBUSY; 4884 } 4885 mtip_major = error; 4886 4887 dfs_parent = debugfs_create_dir("rssd", NULL); 4888 if (IS_ERR_OR_NULL(dfs_parent)) { 4889 pr_warn("Error creating debugfs parent\n"); 4890 dfs_parent = NULL; 4891 } 4892 if (dfs_parent) { 4893 dfs_device_status = debugfs_create_file("device_status", 4894 S_IRUGO, dfs_parent, NULL, 4895 &mtip_device_status_fops); 4896 if (IS_ERR_OR_NULL(dfs_device_status)) { 4897 pr_err("Error creating device_status node\n"); 4898 dfs_device_status = NULL; 4899 } 4900 } 4901 4902 /* Register our PCI operations. */ 4903 error = pci_register_driver(&mtip_pci_driver); 4904 if (error) { 4905 debugfs_remove(dfs_parent); 4906 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4907 } 4908 4909 return error; 4910 } 4911 4912 /* 4913 * Module de-initialization function. 4914 * 4915 * Called once when the module is unloaded. This function deallocates 4916 * the major block device number allocated by mtip_init() and 4917 * unregisters the PCI layer of the driver. 4918 * 4919 * Return value 4920 * none 4921 */ 4922 static void __exit mtip_exit(void) 4923 { 4924 debugfs_remove_recursive(dfs_parent); 4925 4926 /* Release the allocated major block device number. */ 4927 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4928 4929 /* Unregister the PCI driver. */ 4930 pci_unregister_driver(&mtip_pci_driver); 4931 } 4932 4933 MODULE_AUTHOR("Micron Technology, Inc"); 4934 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver"); 4935 MODULE_LICENSE("GPL"); 4936 MODULE_VERSION(MTIP_DRV_VERSION); 4937 4938 module_init(mtip_init); 4939 module_exit(mtip_exit); 4940