1 /* 2 * Driver for the Micron P320 SSD 3 * Copyright (C) 2011 Micron Technology, Inc. 4 * 5 * Portions of this code were derived from works subjected to the 6 * following copyright: 7 * Copyright (C) 2009 Integrated Device Technology, Inc. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 */ 20 21 #include <linux/pci.h> 22 #include <linux/interrupt.h> 23 #include <linux/ata.h> 24 #include <linux/delay.h> 25 #include <linux/hdreg.h> 26 #include <linux/uaccess.h> 27 #include <linux/random.h> 28 #include <linux/smp.h> 29 #include <linux/compat.h> 30 #include <linux/fs.h> 31 #include <linux/module.h> 32 #include <linux/genhd.h> 33 #include <linux/blkdev.h> 34 #include <linux/blk-mq.h> 35 #include <linux/bio.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/idr.h> 38 #include <linux/kthread.h> 39 #include <../drivers/ata/ahci.h> 40 #include <linux/export.h> 41 #include <linux/debugfs.h> 42 #include <linux/prefetch.h> 43 #include "mtip32xx.h" 44 45 #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 46 47 /* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */ 48 #define AHCI_RX_FIS_SZ 0x100 49 #define AHCI_RX_FIS_OFFSET 0x0 50 #define AHCI_IDFY_SZ ATA_SECT_SIZE 51 #define AHCI_IDFY_OFFSET 0x400 52 #define AHCI_SECTBUF_SZ ATA_SECT_SIZE 53 #define AHCI_SECTBUF_OFFSET 0x800 54 #define AHCI_SMARTBUF_SZ ATA_SECT_SIZE 55 #define AHCI_SMARTBUF_OFFSET 0xC00 56 /* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */ 57 #define BLOCK_DMA_ALLOC_SZ 4096 58 59 /* DMA region containing command table (should be 8192 bytes) */ 60 #define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr) 61 #define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ) 62 #define AHCI_CMD_TBL_OFFSET 0x0 63 64 /* DMA region per command (contains header and SGL) */ 65 #define AHCI_CMD_TBL_HDR_SZ 0x80 66 #define AHCI_CMD_TBL_HDR_OFFSET 0x0 67 #define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg)) 68 #define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ 69 #define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ) 70 71 72 #define HOST_CAP_NZDMA (1 << 19) 73 #define HOST_HSORG 0xFC 74 #define HSORG_DISABLE_SLOTGRP_INTR (1<<24) 75 #define HSORG_DISABLE_SLOTGRP_PXIS (1<<16) 76 #define HSORG_HWREV 0xFF00 77 #define HSORG_STYLE 0x8 78 #define HSORG_SLOTGROUPS 0x7 79 80 #define PORT_COMMAND_ISSUE 0x38 81 #define PORT_SDBV 0x7C 82 83 #define PORT_OFFSET 0x100 84 #define PORT_MEM_SIZE 0x80 85 86 #define PORT_IRQ_ERR \ 87 (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \ 88 PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \ 89 PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \ 90 PORT_IRQ_OVERFLOW) 91 #define PORT_IRQ_LEGACY \ 92 (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) 93 #define PORT_IRQ_HANDLED \ 94 (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \ 95 PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \ 96 PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY) 97 #define DEF_PORT_IRQ \ 98 (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS) 99 100 /* product numbers */ 101 #define MTIP_PRODUCT_UNKNOWN 0x00 102 #define MTIP_PRODUCT_ASICFPGA 0x11 103 104 /* Device instance number, incremented each time a device is probed. */ 105 static int instance; 106 107 struct list_head online_list; 108 struct list_head removing_list; 109 spinlock_t dev_lock; 110 111 /* 112 * Global variable used to hold the major block device number 113 * allocated in mtip_init(). 114 */ 115 static int mtip_major; 116 static struct dentry *dfs_parent; 117 static struct dentry *dfs_device_status; 118 119 static u32 cpu_use[NR_CPUS]; 120 121 static DEFINE_SPINLOCK(rssd_index_lock); 122 static DEFINE_IDA(rssd_index_ida); 123 124 static int mtip_block_initialize(struct driver_data *dd); 125 126 #ifdef CONFIG_COMPAT 127 struct mtip_compat_ide_task_request_s { 128 __u8 io_ports[8]; 129 __u8 hob_ports[8]; 130 ide_reg_valid_t out_flags; 131 ide_reg_valid_t in_flags; 132 int data_phase; 133 int req_cmd; 134 compat_ulong_t out_size; 135 compat_ulong_t in_size; 136 }; 137 #endif 138 139 /* 140 * This function check_for_surprise_removal is called 141 * while card is removed from the system and it will 142 * read the vendor id from the configration space 143 * 144 * @pdev Pointer to the pci_dev structure. 145 * 146 * return value 147 * true if device removed, else false 148 */ 149 static bool mtip_check_surprise_removal(struct pci_dev *pdev) 150 { 151 u16 vendor_id = 0; 152 struct driver_data *dd = pci_get_drvdata(pdev); 153 154 if (dd->sr) 155 return true; 156 157 /* Read the vendorID from the configuration space */ 158 pci_read_config_word(pdev, 0x00, &vendor_id); 159 if (vendor_id == 0xFFFF) { 160 dd->sr = true; 161 if (dd->queue) 162 set_bit(QUEUE_FLAG_DEAD, &dd->queue->queue_flags); 163 else 164 dev_warn(&dd->pdev->dev, 165 "%s: dd->queue is NULL\n", __func__); 166 if (dd->port) { 167 set_bit(MTIP_PF_SR_CLEANUP_BIT, &dd->port->flags); 168 wake_up_interruptible(&dd->port->svc_wait); 169 } else 170 dev_warn(&dd->pdev->dev, 171 "%s: dd->port is NULL\n", __func__); 172 return true; /* device removed */ 173 } 174 175 return false; /* device present */ 176 } 177 178 static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) 179 { 180 struct request *rq; 181 182 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); 183 return blk_mq_rq_to_pdu(rq); 184 } 185 186 static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) 187 { 188 blk_put_request(blk_mq_rq_from_pdu(cmd)); 189 } 190 191 /* 192 * Once we add support for one hctx per mtip group, this will change a bit 193 */ 194 static struct request *mtip_rq_from_tag(struct driver_data *dd, 195 unsigned int tag) 196 { 197 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0]; 198 199 return blk_mq_tag_to_rq(hctx->tags, tag); 200 } 201 202 static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, 203 unsigned int tag) 204 { 205 struct request *rq = mtip_rq_from_tag(dd, tag); 206 207 return blk_mq_rq_to_pdu(rq); 208 } 209 210 /* 211 * IO completion function. 212 * 213 * This completion function is called by the driver ISR when a 214 * command that was issued by the kernel completes. It first calls the 215 * asynchronous completion function which normally calls back into the block 216 * layer passing the asynchronous callback data, then unmaps the 217 * scatter list associated with the completed command, and finally 218 * clears the allocated bit associated with the completed command. 219 * 220 * @port Pointer to the port data structure. 221 * @tag Tag of the command. 222 * @data Pointer to driver_data. 223 * @status Completion status. 224 * 225 * return value 226 * None 227 */ 228 static void mtip_async_complete(struct mtip_port *port, 229 int tag, struct mtip_cmd *cmd, int status) 230 { 231 struct driver_data *dd = port->dd; 232 struct request *rq; 233 234 if (unlikely(!dd) || unlikely(!port)) 235 return; 236 237 if (unlikely(status == PORT_IRQ_TF_ERR)) { 238 dev_warn(&port->dd->pdev->dev, 239 "Command tag %d failed due to TFE\n", tag); 240 } 241 242 /* Unmap the DMA scatter list entries */ 243 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction); 244 245 rq = mtip_rq_from_tag(dd, tag); 246 247 if (unlikely(cmd->unaligned)) 248 up(&port->cmd_slot_unal); 249 250 blk_mq_end_request(rq, status ? -EIO : 0); 251 } 252 253 /* 254 * Reset the HBA (without sleeping) 255 * 256 * @dd Pointer to the driver data structure. 257 * 258 * return value 259 * 0 The reset was successful. 260 * -1 The HBA Reset bit did not clear. 261 */ 262 static int mtip_hba_reset(struct driver_data *dd) 263 { 264 unsigned long timeout; 265 266 /* Set the reset bit */ 267 writel(HOST_RESET, dd->mmio + HOST_CTL); 268 269 /* Flush */ 270 readl(dd->mmio + HOST_CTL); 271 272 /* Spin for up to 2 seconds, waiting for reset acknowledgement */ 273 timeout = jiffies + msecs_to_jiffies(2000); 274 do { 275 mdelay(10); 276 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) 277 return -1; 278 279 } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) 280 && time_before(jiffies, timeout)); 281 282 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 283 return -1; 284 285 return 0; 286 } 287 288 /* 289 * Issue a command to the hardware. 290 * 291 * Set the appropriate bit in the s_active and Command Issue hardware 292 * registers, causing hardware command processing to begin. 293 * 294 * @port Pointer to the port structure. 295 * @tag The tag of the command to be issued. 296 * 297 * return value 298 * None 299 */ 300 static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) 301 { 302 int group = tag >> 5; 303 304 /* guard SACT and CI registers */ 305 spin_lock(&port->cmd_issue_lock[group]); 306 writel((1 << MTIP_TAG_BIT(tag)), 307 port->s_active[MTIP_TAG_INDEX(tag)]); 308 writel((1 << MTIP_TAG_BIT(tag)), 309 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 310 spin_unlock(&port->cmd_issue_lock[group]); 311 } 312 313 /* 314 * Enable/disable the reception of FIS 315 * 316 * @port Pointer to the port data structure 317 * @enable 1 to enable, 0 to disable 318 * 319 * return value 320 * Previous state: 1 enabled, 0 disabled 321 */ 322 static int mtip_enable_fis(struct mtip_port *port, int enable) 323 { 324 u32 tmp; 325 326 /* enable FIS reception */ 327 tmp = readl(port->mmio + PORT_CMD); 328 if (enable) 329 writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 330 else 331 writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD); 332 333 /* Flush */ 334 readl(port->mmio + PORT_CMD); 335 336 return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX)); 337 } 338 339 /* 340 * Enable/disable the DMA engine 341 * 342 * @port Pointer to the port data structure 343 * @enable 1 to enable, 0 to disable 344 * 345 * return value 346 * Previous state: 1 enabled, 0 disabled. 347 */ 348 static int mtip_enable_engine(struct mtip_port *port, int enable) 349 { 350 u32 tmp; 351 352 /* enable FIS reception */ 353 tmp = readl(port->mmio + PORT_CMD); 354 if (enable) 355 writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD); 356 else 357 writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD); 358 359 readl(port->mmio + PORT_CMD); 360 return (((tmp & PORT_CMD_START) == PORT_CMD_START)); 361 } 362 363 /* 364 * Enables the port DMA engine and FIS reception. 365 * 366 * return value 367 * None 368 */ 369 static inline void mtip_start_port(struct mtip_port *port) 370 { 371 /* Enable FIS reception */ 372 mtip_enable_fis(port, 1); 373 374 /* Enable the DMA engine */ 375 mtip_enable_engine(port, 1); 376 } 377 378 /* 379 * Deinitialize a port by disabling port interrupts, the DMA engine, 380 * and FIS reception. 381 * 382 * @port Pointer to the port structure 383 * 384 * return value 385 * None 386 */ 387 static inline void mtip_deinit_port(struct mtip_port *port) 388 { 389 /* Disable interrupts on this port */ 390 writel(0, port->mmio + PORT_IRQ_MASK); 391 392 /* Disable the DMA engine */ 393 mtip_enable_engine(port, 0); 394 395 /* Disable FIS reception */ 396 mtip_enable_fis(port, 0); 397 } 398 399 /* 400 * Initialize a port. 401 * 402 * This function deinitializes the port by calling mtip_deinit_port() and 403 * then initializes it by setting the command header and RX FIS addresses, 404 * clearing the SError register and any pending port interrupts before 405 * re-enabling the default set of port interrupts. 406 * 407 * @port Pointer to the port structure. 408 * 409 * return value 410 * None 411 */ 412 static void mtip_init_port(struct mtip_port *port) 413 { 414 int i; 415 mtip_deinit_port(port); 416 417 /* Program the command list base and FIS base addresses */ 418 if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) { 419 writel((port->command_list_dma >> 16) >> 16, 420 port->mmio + PORT_LST_ADDR_HI); 421 writel((port->rxfis_dma >> 16) >> 16, 422 port->mmio + PORT_FIS_ADDR_HI); 423 } 424 425 writel(port->command_list_dma & 0xFFFFFFFF, 426 port->mmio + PORT_LST_ADDR); 427 writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR); 428 429 /* Clear SError */ 430 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR); 431 432 /* reset the completed registers.*/ 433 for (i = 0; i < port->dd->slot_groups; i++) 434 writel(0xFFFFFFFF, port->completed[i]); 435 436 /* Clear any pending interrupts for this port */ 437 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT); 438 439 /* Clear any pending interrupts on the HBA. */ 440 writel(readl(port->dd->mmio + HOST_IRQ_STAT), 441 port->dd->mmio + HOST_IRQ_STAT); 442 443 /* Enable port interrupts */ 444 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK); 445 } 446 447 /* 448 * Restart a port 449 * 450 * @port Pointer to the port data structure. 451 * 452 * return value 453 * None 454 */ 455 static void mtip_restart_port(struct mtip_port *port) 456 { 457 unsigned long timeout; 458 459 /* Disable the DMA engine */ 460 mtip_enable_engine(port, 0); 461 462 /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */ 463 timeout = jiffies + msecs_to_jiffies(500); 464 while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) 465 && time_before(jiffies, timeout)) 466 ; 467 468 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 469 return; 470 471 /* 472 * Chip quirk: escalate to hba reset if 473 * PxCMD.CR not clear after 500 ms 474 */ 475 if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) { 476 dev_warn(&port->dd->pdev->dev, 477 "PxCMD.CR not clear, escalating reset\n"); 478 479 if (mtip_hba_reset(port->dd)) 480 dev_err(&port->dd->pdev->dev, 481 "HBA reset escalation failed.\n"); 482 483 /* 30 ms delay before com reset to quiesce chip */ 484 mdelay(30); 485 } 486 487 dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n"); 488 489 /* Set PxSCTL.DET */ 490 writel(readl(port->mmio + PORT_SCR_CTL) | 491 1, port->mmio + PORT_SCR_CTL); 492 readl(port->mmio + PORT_SCR_CTL); 493 494 /* Wait 1 ms to quiesce chip function */ 495 timeout = jiffies + msecs_to_jiffies(1); 496 while (time_before(jiffies, timeout)) 497 ; 498 499 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 500 return; 501 502 /* Clear PxSCTL.DET */ 503 writel(readl(port->mmio + PORT_SCR_CTL) & ~1, 504 port->mmio + PORT_SCR_CTL); 505 readl(port->mmio + PORT_SCR_CTL); 506 507 /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */ 508 timeout = jiffies + msecs_to_jiffies(500); 509 while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 510 && time_before(jiffies, timeout)) 511 ; 512 513 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 514 return; 515 516 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 517 dev_warn(&port->dd->pdev->dev, 518 "COM reset failed\n"); 519 520 mtip_init_port(port); 521 mtip_start_port(port); 522 523 } 524 525 static int mtip_device_reset(struct driver_data *dd) 526 { 527 int rv = 0; 528 529 if (mtip_check_surprise_removal(dd->pdev)) 530 return 0; 531 532 if (mtip_hba_reset(dd) < 0) 533 rv = -EFAULT; 534 535 mdelay(1); 536 mtip_init_port(dd->port); 537 mtip_start_port(dd->port); 538 539 /* Enable interrupts on the HBA. */ 540 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 541 dd->mmio + HOST_CTL); 542 return rv; 543 } 544 545 /* 546 * Helper function for tag logging 547 */ 548 static void print_tags(struct driver_data *dd, 549 char *msg, 550 unsigned long *tagbits, 551 int cnt) 552 { 553 unsigned char tagmap[128]; 554 int group, tagmap_len = 0; 555 556 memset(tagmap, 0, sizeof(tagmap)); 557 for (group = SLOTBITS_IN_LONGS; group > 0; group--) 558 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ", 559 tagbits[group-1]); 560 dev_warn(&dd->pdev->dev, 561 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); 562 } 563 564 /* 565 * Internal command completion callback function. 566 * 567 * This function is normally called by the driver ISR when an internal 568 * command completed. This function signals the command completion by 569 * calling complete(). 570 * 571 * @port Pointer to the port data structure. 572 * @tag Tag of the command that has completed. 573 * @data Pointer to a completion structure. 574 * @status Completion status. 575 * 576 * return value 577 * None 578 */ 579 static void mtip_completion(struct mtip_port *port, 580 int tag, struct mtip_cmd *command, int status) 581 { 582 struct completion *waiting = command->comp_data; 583 if (unlikely(status == PORT_IRQ_TF_ERR)) 584 dev_warn(&port->dd->pdev->dev, 585 "Internal command %d completed with TFE\n", tag); 586 587 complete(waiting); 588 } 589 590 static void mtip_null_completion(struct mtip_port *port, 591 int tag, struct mtip_cmd *command, int status) 592 { 593 } 594 595 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 596 dma_addr_t buffer_dma, unsigned int sectors); 597 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 598 struct smart_attr *attrib); 599 /* 600 * Handle an error. 601 * 602 * @dd Pointer to the DRIVER_DATA structure. 603 * 604 * return value 605 * None 606 */ 607 static void mtip_handle_tfe(struct driver_data *dd) 608 { 609 int group, tag, bit, reissue, rv; 610 struct mtip_port *port; 611 struct mtip_cmd *cmd; 612 u32 completed; 613 struct host_to_dev_fis *fis; 614 unsigned long tagaccum[SLOTBITS_IN_LONGS]; 615 unsigned int cmd_cnt = 0; 616 unsigned char *buf; 617 char *fail_reason = NULL; 618 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0; 619 620 dev_warn(&dd->pdev->dev, "Taskfile error\n"); 621 622 port = dd->port; 623 624 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 625 626 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 627 test_bit(MTIP_TAG_INTERNAL, port->allocated)) { 628 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 629 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); 630 631 if (cmd->comp_data && cmd->comp_func) { 632 cmd->comp_func(port, MTIP_TAG_INTERNAL, 633 cmd, PORT_IRQ_TF_ERR); 634 } 635 goto handle_tfe_exit; 636 } 637 638 /* clear the tag accumulator */ 639 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 640 641 /* Loop through all the groups */ 642 for (group = 0; group < dd->slot_groups; group++) { 643 completed = readl(port->completed[group]); 644 645 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed); 646 647 /* clear completed status register in the hardware.*/ 648 writel(completed, port->completed[group]); 649 650 /* Process successfully completed commands */ 651 for (bit = 0; bit < 32 && completed; bit++) { 652 if (!(completed & (1<<bit))) 653 continue; 654 tag = (group << 5) + bit; 655 656 /* Skip the internal command slot */ 657 if (tag == MTIP_TAG_INTERNAL) 658 continue; 659 660 cmd = mtip_cmd_from_tag(dd, tag); 661 if (likely(cmd->comp_func)) { 662 set_bit(tag, tagaccum); 663 cmd_cnt++; 664 cmd->comp_func(port, tag, cmd, 0); 665 } else { 666 dev_err(&port->dd->pdev->dev, 667 "Missing completion func for tag %d", 668 tag); 669 if (mtip_check_surprise_removal(dd->pdev)) { 670 /* don't proceed further */ 671 return; 672 } 673 } 674 } 675 } 676 677 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt); 678 679 /* Restart the port */ 680 mdelay(20); 681 mtip_restart_port(port); 682 683 /* Trying to determine the cause of the error */ 684 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 685 dd->port->log_buf, 686 dd->port->log_buf_dma, 1); 687 if (rv) { 688 dev_warn(&dd->pdev->dev, 689 "Error in READ LOG EXT (10h) command\n"); 690 /* non-critical error, don't fail the load */ 691 } else { 692 buf = (unsigned char *)dd->port->log_buf; 693 if (buf[259] & 0x1) { 694 dev_info(&dd->pdev->dev, 695 "Write protect bit is set.\n"); 696 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 697 fail_all_ncq_write = 1; 698 fail_reason = "write protect"; 699 } 700 if (buf[288] == 0xF7) { 701 dev_info(&dd->pdev->dev, 702 "Exceeded Tmax, drive in thermal shutdown.\n"); 703 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 704 fail_all_ncq_cmds = 1; 705 fail_reason = "thermal shutdown"; 706 } 707 if (buf[288] == 0xBF) { 708 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); 709 dev_info(&dd->pdev->dev, 710 "Drive indicates rebuild has failed. Secure erase required.\n"); 711 fail_all_ncq_cmds = 1; 712 fail_reason = "rebuild failed"; 713 } 714 } 715 716 /* clear the tag accumulator */ 717 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 718 719 /* Loop through all the groups */ 720 for (group = 0; group < dd->slot_groups; group++) { 721 for (bit = 0; bit < 32; bit++) { 722 reissue = 1; 723 tag = (group << 5) + bit; 724 cmd = mtip_cmd_from_tag(dd, tag); 725 726 fis = (struct host_to_dev_fis *)cmd->command; 727 728 /* Should re-issue? */ 729 if (tag == MTIP_TAG_INTERNAL || 730 fis->command == ATA_CMD_SET_FEATURES) 731 reissue = 0; 732 else { 733 if (fail_all_ncq_cmds || 734 (fail_all_ncq_write && 735 fis->command == ATA_CMD_FPDMA_WRITE)) { 736 dev_warn(&dd->pdev->dev, 737 " Fail: %s w/tag %d [%s].\n", 738 fis->command == ATA_CMD_FPDMA_WRITE ? 739 "write" : "read", 740 tag, 741 fail_reason != NULL ? 742 fail_reason : "unknown"); 743 if (cmd->comp_func) { 744 cmd->comp_func(port, tag, 745 cmd, -ENODATA); 746 } 747 continue; 748 } 749 } 750 751 /* 752 * First check if this command has 753 * exceeded its retries. 754 */ 755 if (reissue && (cmd->retries-- > 0)) { 756 757 set_bit(tag, tagaccum); 758 759 /* Re-issue the command. */ 760 mtip_issue_ncq_command(port, tag); 761 762 continue; 763 } 764 765 /* Retire a command that will not be reissued */ 766 dev_warn(&port->dd->pdev->dev, 767 "retiring tag %d\n", tag); 768 769 if (cmd->comp_func) 770 cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR); 771 else 772 dev_warn(&port->dd->pdev->dev, 773 "Bad completion for tag %d\n", 774 tag); 775 } 776 } 777 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); 778 779 handle_tfe_exit: 780 /* clear eh_active */ 781 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 782 wake_up_interruptible(&port->svc_wait); 783 } 784 785 /* 786 * Handle a set device bits interrupt 787 */ 788 static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, 789 u32 completed) 790 { 791 struct driver_data *dd = port->dd; 792 int tag, bit; 793 struct mtip_cmd *command; 794 795 if (!completed) { 796 WARN_ON_ONCE(!completed); 797 return; 798 } 799 /* clear completed status register in the hardware.*/ 800 writel(completed, port->completed[group]); 801 802 /* Process completed commands. */ 803 for (bit = 0; (bit < 32) && completed; bit++) { 804 if (completed & 0x01) { 805 tag = (group << 5) | bit; 806 807 /* skip internal command slot. */ 808 if (unlikely(tag == MTIP_TAG_INTERNAL)) 809 continue; 810 811 command = mtip_cmd_from_tag(dd, tag); 812 if (likely(command->comp_func)) 813 command->comp_func(port, tag, command, 0); 814 else { 815 dev_dbg(&dd->pdev->dev, 816 "Null completion for tag %d", 817 tag); 818 819 if (mtip_check_surprise_removal( 820 dd->pdev)) { 821 return; 822 } 823 } 824 } 825 completed >>= 1; 826 } 827 828 /* If last, re-enable interrupts */ 829 if (atomic_dec_return(&dd->irq_workers_active) == 0) 830 writel(0xffffffff, dd->mmio + HOST_IRQ_STAT); 831 } 832 833 /* 834 * Process legacy pio and d2h interrupts 835 */ 836 static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) 837 { 838 struct mtip_port *port = dd->port; 839 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); 840 841 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 842 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 843 & (1 << MTIP_TAG_INTERNAL))) { 844 if (cmd->comp_func) { 845 cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0); 846 return; 847 } 848 } 849 850 return; 851 } 852 853 /* 854 * Demux and handle errors 855 */ 856 static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) 857 { 858 859 if (unlikely(port_stat & PORT_IRQ_CONNECT)) { 860 dev_warn(&dd->pdev->dev, 861 "Clearing PxSERR.DIAG.x\n"); 862 writel((1 << 26), dd->port->mmio + PORT_SCR_ERR); 863 } 864 865 if (unlikely(port_stat & PORT_IRQ_PHYRDY)) { 866 dev_warn(&dd->pdev->dev, 867 "Clearing PxSERR.DIAG.n\n"); 868 writel((1 << 16), dd->port->mmio + PORT_SCR_ERR); 869 } 870 871 if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) { 872 dev_warn(&dd->pdev->dev, 873 "Port stat errors %x unhandled\n", 874 (port_stat & ~PORT_IRQ_HANDLED)); 875 if (mtip_check_surprise_removal(dd->pdev)) 876 return; 877 } 878 if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) { 879 set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags); 880 wake_up_interruptible(&dd->port->svc_wait); 881 } 882 } 883 884 static inline irqreturn_t mtip_handle_irq(struct driver_data *data) 885 { 886 struct driver_data *dd = (struct driver_data *) data; 887 struct mtip_port *port = dd->port; 888 u32 hba_stat, port_stat; 889 int rv = IRQ_NONE; 890 int do_irq_enable = 1, i, workers; 891 struct mtip_work *twork; 892 893 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 894 if (hba_stat) { 895 rv = IRQ_HANDLED; 896 897 /* Acknowledge the interrupt status on the port.*/ 898 port_stat = readl(port->mmio + PORT_IRQ_STAT); 899 writel(port_stat, port->mmio + PORT_IRQ_STAT); 900 901 /* Demux port status */ 902 if (likely(port_stat & PORT_IRQ_SDB_FIS)) { 903 do_irq_enable = 0; 904 WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0); 905 906 /* Start at 1: group zero is always local? */ 907 for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS; 908 i++) { 909 twork = &dd->work[i]; 910 twork->completed = readl(port->completed[i]); 911 if (twork->completed) 912 workers++; 913 } 914 915 atomic_set(&dd->irq_workers_active, workers); 916 if (workers) { 917 for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) { 918 twork = &dd->work[i]; 919 if (twork->completed) 920 queue_work_on( 921 twork->cpu_binding, 922 dd->isr_workq, 923 &twork->work); 924 } 925 926 if (likely(dd->work[0].completed)) 927 mtip_workq_sdbfx(port, 0, 928 dd->work[0].completed); 929 930 } else { 931 /* 932 * Chip quirk: SDB interrupt but nothing 933 * to complete 934 */ 935 do_irq_enable = 1; 936 } 937 } 938 939 if (unlikely(port_stat & PORT_IRQ_ERR)) { 940 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 941 /* don't proceed further */ 942 return IRQ_HANDLED; 943 } 944 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 945 &dd->dd_flag)) 946 return rv; 947 948 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); 949 } 950 951 if (unlikely(port_stat & PORT_IRQ_LEGACY)) 952 mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY); 953 } 954 955 /* acknowledge interrupt */ 956 if (unlikely(do_irq_enable)) 957 writel(hba_stat, dd->mmio + HOST_IRQ_STAT); 958 959 return rv; 960 } 961 962 /* 963 * HBA interrupt subroutine. 964 * 965 * @irq IRQ number. 966 * @instance Pointer to the driver data structure. 967 * 968 * return value 969 * IRQ_HANDLED A HBA interrupt was pending and handled. 970 * IRQ_NONE This interrupt was not for the HBA. 971 */ 972 static irqreturn_t mtip_irq_handler(int irq, void *instance) 973 { 974 struct driver_data *dd = instance; 975 976 return mtip_handle_irq(dd); 977 } 978 979 static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) 980 { 981 writel(1 << MTIP_TAG_BIT(tag), 982 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 983 } 984 985 static bool mtip_pause_ncq(struct mtip_port *port, 986 struct host_to_dev_fis *fis) 987 { 988 struct host_to_dev_fis *reply; 989 unsigned long task_file_data; 990 991 reply = port->rxfis + RX_FIS_D2H_REG; 992 task_file_data = readl(port->mmio+PORT_TFDATA); 993 994 if (fis->command == ATA_CMD_SEC_ERASE_UNIT) 995 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 996 997 if ((task_file_data & 1)) 998 return false; 999 1000 if (fis->command == ATA_CMD_SEC_ERASE_PREP) { 1001 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1002 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1003 port->ic_pause_timer = jiffies; 1004 return true; 1005 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && 1006 (fis->features == 0x03)) { 1007 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1008 port->ic_pause_timer = jiffies; 1009 return true; 1010 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) || 1011 ((fis->command == 0xFC) && 1012 (fis->features == 0x27 || fis->features == 0x72 || 1013 fis->features == 0x62 || fis->features == 0x26))) { 1014 /* Com reset after secure erase or lowlevel format */ 1015 mtip_restart_port(port); 1016 return false; 1017 } 1018 1019 return false; 1020 } 1021 1022 /* 1023 * Wait for port to quiesce 1024 * 1025 * @port Pointer to port data structure 1026 * @timeout Max duration to wait (ms) 1027 * 1028 * return value 1029 * 0 Success 1030 * -EBUSY Commands still active 1031 */ 1032 static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) 1033 { 1034 unsigned long to; 1035 unsigned int n; 1036 unsigned int active = 1; 1037 1038 blk_mq_stop_hw_queues(port->dd->queue); 1039 1040 to = jiffies + msecs_to_jiffies(timeout); 1041 do { 1042 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && 1043 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 1044 msleep(20); 1045 continue; /* svc thd is actively issuing commands */ 1046 } 1047 1048 msleep(100); 1049 if (mtip_check_surprise_removal(port->dd->pdev)) 1050 goto err_fault; 1051 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 1052 goto err_fault; 1053 1054 /* 1055 * Ignore s_active bit 0 of array element 0. 1056 * This bit will always be set 1057 */ 1058 active = readl(port->s_active[0]) & 0xFFFFFFFE; 1059 for (n = 1; n < port->dd->slot_groups; n++) 1060 active |= readl(port->s_active[n]); 1061 1062 if (!active) 1063 break; 1064 } while (time_before(jiffies, to)); 1065 1066 blk_mq_start_stopped_hw_queues(port->dd->queue, true); 1067 return active ? -EBUSY : 0; 1068 err_fault: 1069 blk_mq_start_stopped_hw_queues(port->dd->queue, true); 1070 return -EFAULT; 1071 } 1072 1073 /* 1074 * Execute an internal command and wait for the completion. 1075 * 1076 * @port Pointer to the port data structure. 1077 * @fis Pointer to the FIS that describes the command. 1078 * @fis_len Length in WORDS of the FIS. 1079 * @buffer DMA accessible for command data. 1080 * @buf_len Length, in bytes, of the data buffer. 1081 * @opts Command header options, excluding the FIS length 1082 * and the number of PRD entries. 1083 * @timeout Time in ms to wait for the command to complete. 1084 * 1085 * return value 1086 * 0 Command completed successfully. 1087 * -EFAULT The buffer address is not correctly aligned. 1088 * -EBUSY Internal command or other IO in progress. 1089 * -EAGAIN Time out waiting for command to complete. 1090 */ 1091 static int mtip_exec_internal_command(struct mtip_port *port, 1092 struct host_to_dev_fis *fis, 1093 int fis_len, 1094 dma_addr_t buffer, 1095 int buf_len, 1096 u32 opts, 1097 gfp_t atomic, 1098 unsigned long timeout) 1099 { 1100 struct mtip_cmd_sg *command_sg; 1101 DECLARE_COMPLETION_ONSTACK(wait); 1102 struct mtip_cmd *int_cmd; 1103 struct driver_data *dd = port->dd; 1104 int rv = 0; 1105 1106 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1107 if (buffer & 0x00000007) { 1108 dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); 1109 return -EFAULT; 1110 } 1111 1112 int_cmd = mtip_get_int_command(dd); 1113 1114 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1115 port->ic_pause_timer = 0; 1116 1117 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); 1118 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); 1119 1120 if (atomic == GFP_KERNEL) { 1121 if (fis->command != ATA_CMD_STANDBYNOW1) { 1122 /* wait for io to complete if non atomic */ 1123 if (mtip_quiesce_io(port, 1124 MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) { 1125 dev_warn(&dd->pdev->dev, 1126 "Failed to quiesce IO\n"); 1127 mtip_put_int_command(dd, int_cmd); 1128 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1129 wake_up_interruptible(&port->svc_wait); 1130 return -EBUSY; 1131 } 1132 } 1133 1134 /* Set the completion function and data for the command. */ 1135 int_cmd->comp_data = &wait; 1136 int_cmd->comp_func = mtip_completion; 1137 1138 } else { 1139 /* Clear completion - we're going to poll */ 1140 int_cmd->comp_data = NULL; 1141 int_cmd->comp_func = mtip_null_completion; 1142 } 1143 1144 /* Copy the command to the command table */ 1145 memcpy(int_cmd->command, fis, fis_len*4); 1146 1147 /* Populate the SG list */ 1148 int_cmd->command_header->opts = 1149 __force_bit2int cpu_to_le32(opts | fis_len); 1150 if (buf_len) { 1151 command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ; 1152 1153 command_sg->info = 1154 __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF); 1155 command_sg->dba = 1156 __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF); 1157 command_sg->dba_upper = 1158 __force_bit2int cpu_to_le32((buffer >> 16) >> 16); 1159 1160 int_cmd->command_header->opts |= 1161 __force_bit2int cpu_to_le32((1 << 16)); 1162 } 1163 1164 /* Populate the command header */ 1165 int_cmd->command_header->byte_count = 0; 1166 1167 /* Issue the command to the hardware */ 1168 mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); 1169 1170 if (atomic == GFP_KERNEL) { 1171 /* Wait for the command to complete or timeout. */ 1172 if ((rv = wait_for_completion_interruptible_timeout( 1173 &wait, 1174 msecs_to_jiffies(timeout))) <= 0) { 1175 if (rv == -ERESTARTSYS) { /* interrupted */ 1176 dev_err(&dd->pdev->dev, 1177 "Internal command [%02X] was interrupted after %lu ms\n", 1178 fis->command, timeout); 1179 rv = -EINTR; 1180 goto exec_ic_exit; 1181 } else if (rv == 0) /* timeout */ 1182 dev_err(&dd->pdev->dev, 1183 "Internal command did not complete [%02X] within timeout of %lu ms\n", 1184 fis->command, timeout); 1185 else 1186 dev_err(&dd->pdev->dev, 1187 "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", 1188 fis->command, rv, timeout); 1189 1190 if (mtip_check_surprise_removal(dd->pdev) || 1191 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1192 &dd->dd_flag)) { 1193 dev_err(&dd->pdev->dev, 1194 "Internal command [%02X] wait returned due to SR\n", 1195 fis->command); 1196 rv = -ENXIO; 1197 goto exec_ic_exit; 1198 } 1199 mtip_device_reset(dd); /* recover from timeout issue */ 1200 rv = -EAGAIN; 1201 goto exec_ic_exit; 1202 } 1203 } else { 1204 u32 hba_stat, port_stat; 1205 1206 /* Spin for <timeout> checking if command still outstanding */ 1207 timeout = jiffies + msecs_to_jiffies(timeout); 1208 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1209 & (1 << MTIP_TAG_INTERNAL)) 1210 && time_before(jiffies, timeout)) { 1211 if (mtip_check_surprise_removal(dd->pdev)) { 1212 rv = -ENXIO; 1213 goto exec_ic_exit; 1214 } 1215 if ((fis->command != ATA_CMD_STANDBYNOW1) && 1216 test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 1217 &dd->dd_flag)) { 1218 rv = -ENXIO; 1219 goto exec_ic_exit; 1220 } 1221 port_stat = readl(port->mmio + PORT_IRQ_STAT); 1222 if (!port_stat) 1223 continue; 1224 1225 if (port_stat & PORT_IRQ_ERR) { 1226 dev_err(&dd->pdev->dev, 1227 "Internal command [%02X] failed\n", 1228 fis->command); 1229 mtip_device_reset(dd); 1230 rv = -EIO; 1231 goto exec_ic_exit; 1232 } else { 1233 writel(port_stat, port->mmio + PORT_IRQ_STAT); 1234 hba_stat = readl(dd->mmio + HOST_IRQ_STAT); 1235 if (hba_stat) 1236 writel(hba_stat, 1237 dd->mmio + HOST_IRQ_STAT); 1238 } 1239 break; 1240 } 1241 } 1242 1243 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1244 & (1 << MTIP_TAG_INTERNAL)) { 1245 rv = -ENXIO; 1246 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 1247 mtip_device_reset(dd); 1248 rv = -EAGAIN; 1249 } 1250 } 1251 exec_ic_exit: 1252 /* Clear the allocated and active bits for the internal command. */ 1253 mtip_put_int_command(dd, int_cmd); 1254 if (rv >= 0 && mtip_pause_ncq(port, fis)) { 1255 /* NCQ paused */ 1256 return rv; 1257 } 1258 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1259 wake_up_interruptible(&port->svc_wait); 1260 1261 return rv; 1262 } 1263 1264 /* 1265 * Byte-swap ATA ID strings. 1266 * 1267 * ATA identify data contains strings in byte-swapped 16-bit words. 1268 * They must be swapped (on all architectures) to be usable as C strings. 1269 * This function swaps bytes in-place. 1270 * 1271 * @buf The buffer location of the string 1272 * @len The number of bytes to swap 1273 * 1274 * return value 1275 * None 1276 */ 1277 static inline void ata_swap_string(u16 *buf, unsigned int len) 1278 { 1279 int i; 1280 for (i = 0; i < (len/2); i++) 1281 be16_to_cpus(&buf[i]); 1282 } 1283 1284 static void mtip_set_timeout(struct driver_data *dd, 1285 struct host_to_dev_fis *fis, 1286 unsigned int *timeout, u8 erasemode) 1287 { 1288 switch (fis->command) { 1289 case ATA_CMD_DOWNLOAD_MICRO: 1290 *timeout = 120000; /* 2 minutes */ 1291 break; 1292 case ATA_CMD_SEC_ERASE_UNIT: 1293 case 0xFC: 1294 if (erasemode) 1295 *timeout = ((*(dd->port->identify + 90) * 2) * 60000); 1296 else 1297 *timeout = ((*(dd->port->identify + 89) * 2) * 60000); 1298 break; 1299 case ATA_CMD_STANDBYNOW1: 1300 *timeout = 120000; /* 2 minutes */ 1301 break; 1302 case 0xF7: 1303 case 0xFA: 1304 *timeout = 60000; /* 60 seconds */ 1305 break; 1306 case ATA_CMD_SMART: 1307 *timeout = 15000; /* 15 seconds */ 1308 break; 1309 default: 1310 *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS; 1311 break; 1312 } 1313 } 1314 1315 /* 1316 * Request the device identity information. 1317 * 1318 * If a user space buffer is not specified, i.e. is NULL, the 1319 * identify information is still read from the drive and placed 1320 * into the identify data buffer (@e port->identify) in the 1321 * port data structure. 1322 * When the identify buffer contains valid identify information @e 1323 * port->identify_valid is non-zero. 1324 * 1325 * @port Pointer to the port structure. 1326 * @user_buffer A user space buffer where the identify data should be 1327 * copied. 1328 * 1329 * return value 1330 * 0 Command completed successfully. 1331 * -EFAULT An error occurred while coping data to the user buffer. 1332 * -1 Command failed. 1333 */ 1334 static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) 1335 { 1336 int rv = 0; 1337 struct host_to_dev_fis fis; 1338 1339 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) 1340 return -EFAULT; 1341 1342 /* Build the FIS. */ 1343 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1344 fis.type = 0x27; 1345 fis.opts = 1 << 7; 1346 fis.command = ATA_CMD_ID_ATA; 1347 1348 /* Set the identify information as invalid. */ 1349 port->identify_valid = 0; 1350 1351 /* Clear the identify information. */ 1352 memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS); 1353 1354 /* Execute the command. */ 1355 if (mtip_exec_internal_command(port, 1356 &fis, 1357 5, 1358 port->identify_dma, 1359 sizeof(u16) * ATA_ID_WORDS, 1360 0, 1361 GFP_KERNEL, 1362 MTIP_INT_CMD_TIMEOUT_MS) 1363 < 0) { 1364 rv = -1; 1365 goto out; 1366 } 1367 1368 /* 1369 * Perform any necessary byte-swapping. Yes, the kernel does in fact 1370 * perform field-sensitive swapping on the string fields. 1371 * See the kernel use of ata_id_string() for proof of this. 1372 */ 1373 #ifdef __LITTLE_ENDIAN 1374 ata_swap_string(port->identify + 27, 40); /* model string*/ 1375 ata_swap_string(port->identify + 23, 8); /* firmware string*/ 1376 ata_swap_string(port->identify + 10, 20); /* serial# string*/ 1377 #else 1378 { 1379 int i; 1380 for (i = 0; i < ATA_ID_WORDS; i++) 1381 port->identify[i] = le16_to_cpu(port->identify[i]); 1382 } 1383 #endif 1384 1385 /* Check security locked state */ 1386 if (port->identify[128] & 0x4) 1387 set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1388 else 1389 clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); 1390 1391 #ifdef MTIP_TRIM /* Disabling TRIM support temporarily */ 1392 /* Demux ID.DRAT & ID.RZAT to determine trim support */ 1393 if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5)) 1394 port->dd->trim_supp = true; 1395 else 1396 #endif 1397 port->dd->trim_supp = false; 1398 1399 /* Set the identify buffer as valid. */ 1400 port->identify_valid = 1; 1401 1402 if (user_buffer) { 1403 if (copy_to_user( 1404 user_buffer, 1405 port->identify, 1406 ATA_ID_WORDS * sizeof(u16))) { 1407 rv = -EFAULT; 1408 goto out; 1409 } 1410 } 1411 1412 out: 1413 return rv; 1414 } 1415 1416 /* 1417 * Issue a standby immediate command to the device. 1418 * 1419 * @port Pointer to the port structure. 1420 * 1421 * return value 1422 * 0 Command was executed successfully. 1423 * -1 An error occurred while executing the command. 1424 */ 1425 static int mtip_standby_immediate(struct mtip_port *port) 1426 { 1427 int rv; 1428 struct host_to_dev_fis fis; 1429 unsigned long start; 1430 unsigned int timeout; 1431 1432 /* Build the FIS. */ 1433 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1434 fis.type = 0x27; 1435 fis.opts = 1 << 7; 1436 fis.command = ATA_CMD_STANDBYNOW1; 1437 1438 mtip_set_timeout(port->dd, &fis, &timeout, 0); 1439 1440 start = jiffies; 1441 rv = mtip_exec_internal_command(port, 1442 &fis, 1443 5, 1444 0, 1445 0, 1446 0, 1447 GFP_ATOMIC, 1448 timeout); 1449 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", 1450 jiffies_to_msecs(jiffies - start)); 1451 if (rv) 1452 dev_warn(&port->dd->pdev->dev, 1453 "STANDBY IMMEDIATE command failed.\n"); 1454 1455 return rv; 1456 } 1457 1458 /* 1459 * Issue a READ LOG EXT command to the device. 1460 * 1461 * @port pointer to the port structure. 1462 * @page page number to fetch 1463 * @buffer pointer to buffer 1464 * @buffer_dma dma address corresponding to @buffer 1465 * @sectors page length to fetch, in sectors 1466 * 1467 * return value 1468 * @rv return value from mtip_exec_internal_command() 1469 */ 1470 static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 1471 dma_addr_t buffer_dma, unsigned int sectors) 1472 { 1473 struct host_to_dev_fis fis; 1474 1475 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1476 fis.type = 0x27; 1477 fis.opts = 1 << 7; 1478 fis.command = ATA_CMD_READ_LOG_EXT; 1479 fis.sect_count = sectors & 0xFF; 1480 fis.sect_cnt_ex = (sectors >> 8) & 0xFF; 1481 fis.lba_low = page; 1482 fis.lba_mid = 0; 1483 fis.device = ATA_DEVICE_OBS; 1484 1485 memset(buffer, 0, sectors * ATA_SECT_SIZE); 1486 1487 return mtip_exec_internal_command(port, 1488 &fis, 1489 5, 1490 buffer_dma, 1491 sectors * ATA_SECT_SIZE, 1492 0, 1493 GFP_ATOMIC, 1494 MTIP_INT_CMD_TIMEOUT_MS); 1495 } 1496 1497 /* 1498 * Issue a SMART READ DATA command to the device. 1499 * 1500 * @port pointer to the port structure. 1501 * @buffer pointer to buffer 1502 * @buffer_dma dma address corresponding to @buffer 1503 * 1504 * return value 1505 * @rv return value from mtip_exec_internal_command() 1506 */ 1507 static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer, 1508 dma_addr_t buffer_dma) 1509 { 1510 struct host_to_dev_fis fis; 1511 1512 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1513 fis.type = 0x27; 1514 fis.opts = 1 << 7; 1515 fis.command = ATA_CMD_SMART; 1516 fis.features = 0xD0; 1517 fis.sect_count = 1; 1518 fis.lba_mid = 0x4F; 1519 fis.lba_hi = 0xC2; 1520 fis.device = ATA_DEVICE_OBS; 1521 1522 return mtip_exec_internal_command(port, 1523 &fis, 1524 5, 1525 buffer_dma, 1526 ATA_SECT_SIZE, 1527 0, 1528 GFP_ATOMIC, 1529 15000); 1530 } 1531 1532 /* 1533 * Get the value of a smart attribute 1534 * 1535 * @port pointer to the port structure 1536 * @id attribute number 1537 * @attrib pointer to return attrib information corresponding to @id 1538 * 1539 * return value 1540 * -EINVAL NULL buffer passed or unsupported attribute @id. 1541 * -EPERM Identify data not valid, SMART not supported or not enabled 1542 */ 1543 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, 1544 struct smart_attr *attrib) 1545 { 1546 int rv, i; 1547 struct smart_attr *pattr; 1548 1549 if (!attrib) 1550 return -EINVAL; 1551 1552 if (!port->identify_valid) { 1553 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n"); 1554 return -EPERM; 1555 } 1556 if (!(port->identify[82] & 0x1)) { 1557 dev_warn(&port->dd->pdev->dev, "SMART not supported\n"); 1558 return -EPERM; 1559 } 1560 if (!(port->identify[85] & 0x1)) { 1561 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n"); 1562 return -EPERM; 1563 } 1564 1565 memset(port->smart_buf, 0, ATA_SECT_SIZE); 1566 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma); 1567 if (rv) { 1568 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n"); 1569 return rv; 1570 } 1571 1572 pattr = (struct smart_attr *)(port->smart_buf + 2); 1573 for (i = 0; i < 29; i++, pattr++) 1574 if (pattr->attr_id == id) { 1575 memcpy(attrib, pattr, sizeof(struct smart_attr)); 1576 break; 1577 } 1578 1579 if (i == 29) { 1580 dev_warn(&port->dd->pdev->dev, 1581 "Query for invalid SMART attribute ID\n"); 1582 rv = -EINVAL; 1583 } 1584 1585 return rv; 1586 } 1587 1588 /* 1589 * Trim unused sectors 1590 * 1591 * @dd pointer to driver_data structure 1592 * @lba starting lba 1593 * @len # of 512b sectors to trim 1594 * 1595 * return value 1596 * -ENOMEM Out of dma memory 1597 * -EINVAL Invalid parameters passed in, trim not supported 1598 * -EIO Error submitting trim request to hw 1599 */ 1600 static int mtip_send_trim(struct driver_data *dd, unsigned int lba, 1601 unsigned int len) 1602 { 1603 int i, rv = 0; 1604 u64 tlba, tlen, sect_left; 1605 struct mtip_trim_entry *buf; 1606 dma_addr_t dma_addr; 1607 struct host_to_dev_fis fis; 1608 1609 if (!len || dd->trim_supp == false) 1610 return -EINVAL; 1611 1612 /* Trim request too big */ 1613 WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES)); 1614 1615 /* Trim request not aligned on 4k boundary */ 1616 WARN_ON(len % 8 != 0); 1617 1618 /* Warn if vu_trim structure is too big */ 1619 WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); 1620 1621 /* Allocate a DMA buffer for the trim structure */ 1622 buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, 1623 GFP_KERNEL); 1624 if (!buf) 1625 return -ENOMEM; 1626 memset(buf, 0, ATA_SECT_SIZE); 1627 1628 for (i = 0, sect_left = len, tlba = lba; 1629 i < MTIP_MAX_TRIM_ENTRIES && sect_left; 1630 i++) { 1631 tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ? 1632 MTIP_MAX_TRIM_ENTRY_LEN : 1633 sect_left); 1634 buf[i].lba = __force_bit2int cpu_to_le32(tlba); 1635 buf[i].range = __force_bit2int cpu_to_le16(tlen); 1636 tlba += tlen; 1637 sect_left -= tlen; 1638 } 1639 WARN_ON(sect_left != 0); 1640 1641 /* Build the fis */ 1642 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1643 fis.type = 0x27; 1644 fis.opts = 1 << 7; 1645 fis.command = 0xfb; 1646 fis.features = 0x60; 1647 fis.sect_count = 1; 1648 fis.device = ATA_DEVICE_OBS; 1649 1650 if (mtip_exec_internal_command(dd->port, 1651 &fis, 1652 5, 1653 dma_addr, 1654 ATA_SECT_SIZE, 1655 0, 1656 GFP_KERNEL, 1657 MTIP_TRIM_TIMEOUT_MS) < 0) 1658 rv = -EIO; 1659 1660 dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); 1661 return rv; 1662 } 1663 1664 /* 1665 * Get the drive capacity. 1666 * 1667 * @dd Pointer to the device data structure. 1668 * @sectors Pointer to the variable that will receive the sector count. 1669 * 1670 * return value 1671 * 1 Capacity was returned successfully. 1672 * 0 The identify information is invalid. 1673 */ 1674 static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) 1675 { 1676 struct mtip_port *port = dd->port; 1677 u64 total, raw0, raw1, raw2, raw3; 1678 raw0 = port->identify[100]; 1679 raw1 = port->identify[101]; 1680 raw2 = port->identify[102]; 1681 raw3 = port->identify[103]; 1682 total = raw0 | raw1<<16 | raw2<<32 | raw3<<48; 1683 *sectors = total; 1684 return (bool) !!port->identify_valid; 1685 } 1686 1687 /* 1688 * Display the identify command data. 1689 * 1690 * @port Pointer to the port data structure. 1691 * 1692 * return value 1693 * None 1694 */ 1695 static void mtip_dump_identify(struct mtip_port *port) 1696 { 1697 sector_t sectors; 1698 unsigned short revid; 1699 char cbuf[42]; 1700 1701 if (!port->identify_valid) 1702 return; 1703 1704 strlcpy(cbuf, (char *)(port->identify+10), 21); 1705 dev_info(&port->dd->pdev->dev, 1706 "Serial No.: %s\n", cbuf); 1707 1708 strlcpy(cbuf, (char *)(port->identify+23), 9); 1709 dev_info(&port->dd->pdev->dev, 1710 "Firmware Ver.: %s\n", cbuf); 1711 1712 strlcpy(cbuf, (char *)(port->identify+27), 41); 1713 dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf); 1714 1715 dev_info(&port->dd->pdev->dev, "Security: %04x %s\n", 1716 port->identify[128], 1717 port->identify[128] & 0x4 ? "(LOCKED)" : ""); 1718 1719 if (mtip_hw_get_capacity(port->dd, §ors)) 1720 dev_info(&port->dd->pdev->dev, 1721 "Capacity: %llu sectors (%llu MB)\n", 1722 (u64)sectors, 1723 ((u64)sectors) * ATA_SECT_SIZE >> 20); 1724 1725 pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid); 1726 switch (revid & 0xFF) { 1727 case 0x1: 1728 strlcpy(cbuf, "A0", 3); 1729 break; 1730 case 0x3: 1731 strlcpy(cbuf, "A2", 3); 1732 break; 1733 default: 1734 strlcpy(cbuf, "?", 2); 1735 break; 1736 } 1737 dev_info(&port->dd->pdev->dev, 1738 "Card Type: %s\n", cbuf); 1739 } 1740 1741 /* 1742 * Map the commands scatter list into the command table. 1743 * 1744 * @command Pointer to the command. 1745 * @nents Number of scatter list entries. 1746 * 1747 * return value 1748 * None 1749 */ 1750 static inline void fill_command_sg(struct driver_data *dd, 1751 struct mtip_cmd *command, 1752 int nents) 1753 { 1754 int n; 1755 unsigned int dma_len; 1756 struct mtip_cmd_sg *command_sg; 1757 struct scatterlist *sg = command->sg; 1758 1759 command_sg = command->command + AHCI_CMD_TBL_HDR_SZ; 1760 1761 for (n = 0; n < nents; n++) { 1762 dma_len = sg_dma_len(sg); 1763 if (dma_len > 0x400000) 1764 dev_err(&dd->pdev->dev, 1765 "DMA segment length truncated\n"); 1766 command_sg->info = __force_bit2int 1767 cpu_to_le32((dma_len-1) & 0x3FFFFF); 1768 command_sg->dba = __force_bit2int 1769 cpu_to_le32(sg_dma_address(sg)); 1770 command_sg->dba_upper = __force_bit2int 1771 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); 1772 command_sg++; 1773 sg++; 1774 } 1775 } 1776 1777 /* 1778 * @brief Execute a drive command. 1779 * 1780 * return value 0 The command completed successfully. 1781 * return value -1 An error occurred while executing the command. 1782 */ 1783 static int exec_drive_task(struct mtip_port *port, u8 *command) 1784 { 1785 struct host_to_dev_fis fis; 1786 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); 1787 unsigned int to; 1788 1789 /* Build the FIS. */ 1790 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1791 fis.type = 0x27; 1792 fis.opts = 1 << 7; 1793 fis.command = command[0]; 1794 fis.features = command[1]; 1795 fis.sect_count = command[2]; 1796 fis.sector = command[3]; 1797 fis.cyl_low = command[4]; 1798 fis.cyl_hi = command[5]; 1799 fis.device = command[6] & ~0x10; /* Clear the dev bit*/ 1800 1801 mtip_set_timeout(port->dd, &fis, &to, 0); 1802 1803 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", 1804 __func__, 1805 command[0], 1806 command[1], 1807 command[2], 1808 command[3], 1809 command[4], 1810 command[5], 1811 command[6]); 1812 1813 /* Execute the command. */ 1814 if (mtip_exec_internal_command(port, 1815 &fis, 1816 5, 1817 0, 1818 0, 1819 0, 1820 GFP_KERNEL, 1821 to) < 0) { 1822 return -1; 1823 } 1824 1825 command[0] = reply->command; /* Status*/ 1826 command[1] = reply->features; /* Error*/ 1827 command[4] = reply->cyl_low; 1828 command[5] = reply->cyl_hi; 1829 1830 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n", 1831 __func__, 1832 command[0], 1833 command[1], 1834 command[4], 1835 command[5]); 1836 1837 return 0; 1838 } 1839 1840 /* 1841 * @brief Execute a drive command. 1842 * 1843 * @param port Pointer to the port data structure. 1844 * @param command Pointer to the user specified command parameters. 1845 * @param user_buffer Pointer to the user space buffer where read sector 1846 * data should be copied. 1847 * 1848 * return value 0 The command completed successfully. 1849 * return value -EFAULT An error occurred while copying the completion 1850 * data to the user space buffer. 1851 * return value -1 An error occurred while executing the command. 1852 */ 1853 static int exec_drive_command(struct mtip_port *port, u8 *command, 1854 void __user *user_buffer) 1855 { 1856 struct host_to_dev_fis fis; 1857 struct host_to_dev_fis *reply; 1858 u8 *buf = NULL; 1859 dma_addr_t dma_addr = 0; 1860 int rv = 0, xfer_sz = command[3]; 1861 unsigned int to; 1862 1863 if (xfer_sz) { 1864 if (!user_buffer) 1865 return -EFAULT; 1866 1867 buf = dmam_alloc_coherent(&port->dd->pdev->dev, 1868 ATA_SECT_SIZE * xfer_sz, 1869 &dma_addr, 1870 GFP_KERNEL); 1871 if (!buf) { 1872 dev_err(&port->dd->pdev->dev, 1873 "Memory allocation failed (%d bytes)\n", 1874 ATA_SECT_SIZE * xfer_sz); 1875 return -ENOMEM; 1876 } 1877 memset(buf, 0, ATA_SECT_SIZE * xfer_sz); 1878 } 1879 1880 /* Build the FIS. */ 1881 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1882 fis.type = 0x27; 1883 fis.opts = 1 << 7; 1884 fis.command = command[0]; 1885 fis.features = command[2]; 1886 fis.sect_count = command[3]; 1887 if (fis.command == ATA_CMD_SMART) { 1888 fis.sector = command[1]; 1889 fis.cyl_low = 0x4F; 1890 fis.cyl_hi = 0xC2; 1891 } 1892 1893 mtip_set_timeout(port->dd, &fis, &to, 0); 1894 1895 if (xfer_sz) 1896 reply = (port->rxfis + RX_FIS_PIO_SETUP); 1897 else 1898 reply = (port->rxfis + RX_FIS_D2H_REG); 1899 1900 dbg_printk(MTIP_DRV_NAME 1901 " %s: User Command: cmd %x, sect %x, " 1902 "feat %x, sectcnt %x\n", 1903 __func__, 1904 command[0], 1905 command[1], 1906 command[2], 1907 command[3]); 1908 1909 /* Execute the command. */ 1910 if (mtip_exec_internal_command(port, 1911 &fis, 1912 5, 1913 (xfer_sz ? dma_addr : 0), 1914 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 1915 0, 1916 GFP_KERNEL, 1917 to) 1918 < 0) { 1919 rv = -EFAULT; 1920 goto exit_drive_command; 1921 } 1922 1923 /* Collect the completion status. */ 1924 command[0] = reply->command; /* Status*/ 1925 command[1] = reply->features; /* Error*/ 1926 command[2] = reply->sect_count; 1927 1928 dbg_printk(MTIP_DRV_NAME 1929 " %s: Completion Status: stat %x, " 1930 "err %x, nsect %x\n", 1931 __func__, 1932 command[0], 1933 command[1], 1934 command[2]); 1935 1936 if (xfer_sz) { 1937 if (copy_to_user(user_buffer, 1938 buf, 1939 ATA_SECT_SIZE * command[3])) { 1940 rv = -EFAULT; 1941 goto exit_drive_command; 1942 } 1943 } 1944 exit_drive_command: 1945 if (buf) 1946 dmam_free_coherent(&port->dd->pdev->dev, 1947 ATA_SECT_SIZE * xfer_sz, buf, dma_addr); 1948 return rv; 1949 } 1950 1951 /* 1952 * Indicates whether a command has a single sector payload. 1953 * 1954 * @command passed to the device to perform the certain event. 1955 * @features passed to the device to perform the certain event. 1956 * 1957 * return value 1958 * 1 command is one that always has a single sector payload, 1959 * regardless of the value in the Sector Count field. 1960 * 0 otherwise 1961 * 1962 */ 1963 static unsigned int implicit_sector(unsigned char command, 1964 unsigned char features) 1965 { 1966 unsigned int rv = 0; 1967 1968 /* list of commands that have an implicit sector count of 1 */ 1969 switch (command) { 1970 case ATA_CMD_SEC_SET_PASS: 1971 case ATA_CMD_SEC_UNLOCK: 1972 case ATA_CMD_SEC_ERASE_PREP: 1973 case ATA_CMD_SEC_ERASE_UNIT: 1974 case ATA_CMD_SEC_FREEZE_LOCK: 1975 case ATA_CMD_SEC_DISABLE_PASS: 1976 case ATA_CMD_PMP_READ: 1977 case ATA_CMD_PMP_WRITE: 1978 rv = 1; 1979 break; 1980 case ATA_CMD_SET_MAX: 1981 if (features == ATA_SET_MAX_UNLOCK) 1982 rv = 1; 1983 break; 1984 case ATA_CMD_SMART: 1985 if ((features == ATA_SMART_READ_VALUES) || 1986 (features == ATA_SMART_READ_THRESHOLDS)) 1987 rv = 1; 1988 break; 1989 case ATA_CMD_CONF_OVERLAY: 1990 if ((features == ATA_DCO_IDENTIFY) || 1991 (features == ATA_DCO_SET)) 1992 rv = 1; 1993 break; 1994 } 1995 return rv; 1996 } 1997 1998 /* 1999 * Executes a taskfile 2000 * See ide_taskfile_ioctl() for derivation 2001 */ 2002 static int exec_drive_taskfile(struct driver_data *dd, 2003 void __user *buf, 2004 ide_task_request_t *req_task, 2005 int outtotal) 2006 { 2007 struct host_to_dev_fis fis; 2008 struct host_to_dev_fis *reply; 2009 u8 *outbuf = NULL; 2010 u8 *inbuf = NULL; 2011 dma_addr_t outbuf_dma = 0; 2012 dma_addr_t inbuf_dma = 0; 2013 dma_addr_t dma_buffer = 0; 2014 int err = 0; 2015 unsigned int taskin = 0; 2016 unsigned int taskout = 0; 2017 u8 nsect = 0; 2018 unsigned int timeout; 2019 unsigned int force_single_sector; 2020 unsigned int transfer_size; 2021 unsigned long task_file_data; 2022 int intotal = outtotal + req_task->out_size; 2023 int erasemode = 0; 2024 2025 taskout = req_task->out_size; 2026 taskin = req_task->in_size; 2027 /* 130560 = 512 * 0xFF*/ 2028 if (taskin > 130560 || taskout > 130560) { 2029 err = -EINVAL; 2030 goto abort; 2031 } 2032 2033 if (taskout) { 2034 outbuf = kzalloc(taskout, GFP_KERNEL); 2035 if (outbuf == NULL) { 2036 err = -ENOMEM; 2037 goto abort; 2038 } 2039 if (copy_from_user(outbuf, buf + outtotal, taskout)) { 2040 err = -EFAULT; 2041 goto abort; 2042 } 2043 outbuf_dma = pci_map_single(dd->pdev, 2044 outbuf, 2045 taskout, 2046 DMA_TO_DEVICE); 2047 if (outbuf_dma == 0) { 2048 err = -ENOMEM; 2049 goto abort; 2050 } 2051 dma_buffer = outbuf_dma; 2052 } 2053 2054 if (taskin) { 2055 inbuf = kzalloc(taskin, GFP_KERNEL); 2056 if (inbuf == NULL) { 2057 err = -ENOMEM; 2058 goto abort; 2059 } 2060 2061 if (copy_from_user(inbuf, buf + intotal, taskin)) { 2062 err = -EFAULT; 2063 goto abort; 2064 } 2065 inbuf_dma = pci_map_single(dd->pdev, 2066 inbuf, 2067 taskin, DMA_FROM_DEVICE); 2068 if (inbuf_dma == 0) { 2069 err = -ENOMEM; 2070 goto abort; 2071 } 2072 dma_buffer = inbuf_dma; 2073 } 2074 2075 /* only supports PIO and non-data commands from this ioctl. */ 2076 switch (req_task->data_phase) { 2077 case TASKFILE_OUT: 2078 nsect = taskout / ATA_SECT_SIZE; 2079 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 2080 break; 2081 case TASKFILE_IN: 2082 reply = (dd->port->rxfis + RX_FIS_PIO_SETUP); 2083 break; 2084 case TASKFILE_NO_DATA: 2085 reply = (dd->port->rxfis + RX_FIS_D2H_REG); 2086 break; 2087 default: 2088 err = -EINVAL; 2089 goto abort; 2090 } 2091 2092 /* Build the FIS. */ 2093 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 2094 2095 fis.type = 0x27; 2096 fis.opts = 1 << 7; 2097 fis.command = req_task->io_ports[7]; 2098 fis.features = req_task->io_ports[1]; 2099 fis.sect_count = req_task->io_ports[2]; 2100 fis.lba_low = req_task->io_ports[3]; 2101 fis.lba_mid = req_task->io_ports[4]; 2102 fis.lba_hi = req_task->io_ports[5]; 2103 /* Clear the dev bit*/ 2104 fis.device = req_task->io_ports[6] & ~0x10; 2105 2106 if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) { 2107 req_task->in_flags.all = 2108 IDE_TASKFILE_STD_IN_FLAGS | 2109 (IDE_HOB_STD_IN_FLAGS << 8); 2110 fis.lba_low_ex = req_task->hob_ports[3]; 2111 fis.lba_mid_ex = req_task->hob_ports[4]; 2112 fis.lba_hi_ex = req_task->hob_ports[5]; 2113 fis.features_ex = req_task->hob_ports[1]; 2114 fis.sect_cnt_ex = req_task->hob_ports[2]; 2115 2116 } else { 2117 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; 2118 } 2119 2120 force_single_sector = implicit_sector(fis.command, fis.features); 2121 2122 if ((taskin || taskout) && (!fis.sect_count)) { 2123 if (nsect) 2124 fis.sect_count = nsect; 2125 else { 2126 if (!force_single_sector) { 2127 dev_warn(&dd->pdev->dev, 2128 "data movement but " 2129 "sect_count is 0\n"); 2130 err = -EINVAL; 2131 goto abort; 2132 } 2133 } 2134 } 2135 2136 dbg_printk(MTIP_DRV_NAME 2137 " %s: cmd %x, feat %x, nsect %x," 2138 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x," 2139 " head/dev %x\n", 2140 __func__, 2141 fis.command, 2142 fis.features, 2143 fis.sect_count, 2144 fis.lba_low, 2145 fis.lba_mid, 2146 fis.lba_hi, 2147 fis.device); 2148 2149 /* check for erase mode support during secure erase.*/ 2150 if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf && 2151 (outbuf[0] & MTIP_SEC_ERASE_MODE)) { 2152 erasemode = 1; 2153 } 2154 2155 mtip_set_timeout(dd, &fis, &timeout, erasemode); 2156 2157 /* Determine the correct transfer size.*/ 2158 if (force_single_sector) 2159 transfer_size = ATA_SECT_SIZE; 2160 else 2161 transfer_size = ATA_SECT_SIZE * fis.sect_count; 2162 2163 /* Execute the command.*/ 2164 if (mtip_exec_internal_command(dd->port, 2165 &fis, 2166 5, 2167 dma_buffer, 2168 transfer_size, 2169 0, 2170 GFP_KERNEL, 2171 timeout) < 0) { 2172 err = -EIO; 2173 goto abort; 2174 } 2175 2176 task_file_data = readl(dd->port->mmio+PORT_TFDATA); 2177 2178 if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) { 2179 reply = dd->port->rxfis + RX_FIS_PIO_SETUP; 2180 req_task->io_ports[7] = reply->control; 2181 } else { 2182 reply = dd->port->rxfis + RX_FIS_D2H_REG; 2183 req_task->io_ports[7] = reply->command; 2184 } 2185 2186 /* reclaim the DMA buffers.*/ 2187 if (inbuf_dma) 2188 pci_unmap_single(dd->pdev, inbuf_dma, 2189 taskin, DMA_FROM_DEVICE); 2190 if (outbuf_dma) 2191 pci_unmap_single(dd->pdev, outbuf_dma, 2192 taskout, DMA_TO_DEVICE); 2193 inbuf_dma = 0; 2194 outbuf_dma = 0; 2195 2196 /* return the ATA registers to the caller.*/ 2197 req_task->io_ports[1] = reply->features; 2198 req_task->io_ports[2] = reply->sect_count; 2199 req_task->io_ports[3] = reply->lba_low; 2200 req_task->io_ports[4] = reply->lba_mid; 2201 req_task->io_ports[5] = reply->lba_hi; 2202 req_task->io_ports[6] = reply->device; 2203 2204 if (req_task->out_flags.all & 1) { 2205 2206 req_task->hob_ports[3] = reply->lba_low_ex; 2207 req_task->hob_ports[4] = reply->lba_mid_ex; 2208 req_task->hob_ports[5] = reply->lba_hi_ex; 2209 req_task->hob_ports[1] = reply->features_ex; 2210 req_task->hob_ports[2] = reply->sect_cnt_ex; 2211 } 2212 dbg_printk(MTIP_DRV_NAME 2213 " %s: Completion: stat %x," 2214 "err %x, sect_cnt %x, lbalo %x," 2215 "lbamid %x, lbahi %x, dev %x\n", 2216 __func__, 2217 req_task->io_ports[7], 2218 req_task->io_ports[1], 2219 req_task->io_ports[2], 2220 req_task->io_ports[3], 2221 req_task->io_ports[4], 2222 req_task->io_ports[5], 2223 req_task->io_ports[6]); 2224 2225 if (taskout) { 2226 if (copy_to_user(buf + outtotal, outbuf, taskout)) { 2227 err = -EFAULT; 2228 goto abort; 2229 } 2230 } 2231 if (taskin) { 2232 if (copy_to_user(buf + intotal, inbuf, taskin)) { 2233 err = -EFAULT; 2234 goto abort; 2235 } 2236 } 2237 abort: 2238 if (inbuf_dma) 2239 pci_unmap_single(dd->pdev, inbuf_dma, 2240 taskin, DMA_FROM_DEVICE); 2241 if (outbuf_dma) 2242 pci_unmap_single(dd->pdev, outbuf_dma, 2243 taskout, DMA_TO_DEVICE); 2244 kfree(outbuf); 2245 kfree(inbuf); 2246 2247 return err; 2248 } 2249 2250 /* 2251 * Handle IOCTL calls from the Block Layer. 2252 * 2253 * This function is called by the Block Layer when it receives an IOCTL 2254 * command that it does not understand. If the IOCTL command is not supported 2255 * this function returns -ENOTTY. 2256 * 2257 * @dd Pointer to the driver data structure. 2258 * @cmd IOCTL command passed from the Block Layer. 2259 * @arg IOCTL argument passed from the Block Layer. 2260 * 2261 * return value 2262 * 0 The IOCTL completed successfully. 2263 * -ENOTTY The specified command is not supported. 2264 * -EFAULT An error occurred copying data to a user space buffer. 2265 * -EIO An error occurred while executing the command. 2266 */ 2267 static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, 2268 unsigned long arg) 2269 { 2270 switch (cmd) { 2271 case HDIO_GET_IDENTITY: 2272 { 2273 if (copy_to_user((void __user *)arg, dd->port->identify, 2274 sizeof(u16) * ATA_ID_WORDS)) 2275 return -EFAULT; 2276 break; 2277 } 2278 case HDIO_DRIVE_CMD: 2279 { 2280 u8 drive_command[4]; 2281 2282 /* Copy the user command info to our buffer. */ 2283 if (copy_from_user(drive_command, 2284 (void __user *) arg, 2285 sizeof(drive_command))) 2286 return -EFAULT; 2287 2288 /* Execute the drive command. */ 2289 if (exec_drive_command(dd->port, 2290 drive_command, 2291 (void __user *) (arg+4))) 2292 return -EIO; 2293 2294 /* Copy the status back to the users buffer. */ 2295 if (copy_to_user((void __user *) arg, 2296 drive_command, 2297 sizeof(drive_command))) 2298 return -EFAULT; 2299 2300 break; 2301 } 2302 case HDIO_DRIVE_TASK: 2303 { 2304 u8 drive_command[7]; 2305 2306 /* Copy the user command info to our buffer. */ 2307 if (copy_from_user(drive_command, 2308 (void __user *) arg, 2309 sizeof(drive_command))) 2310 return -EFAULT; 2311 2312 /* Execute the drive command. */ 2313 if (exec_drive_task(dd->port, drive_command)) 2314 return -EIO; 2315 2316 /* Copy the status back to the users buffer. */ 2317 if (copy_to_user((void __user *) arg, 2318 drive_command, 2319 sizeof(drive_command))) 2320 return -EFAULT; 2321 2322 break; 2323 } 2324 case HDIO_DRIVE_TASKFILE: { 2325 ide_task_request_t req_task; 2326 int ret, outtotal; 2327 2328 if (copy_from_user(&req_task, (void __user *) arg, 2329 sizeof(req_task))) 2330 return -EFAULT; 2331 2332 outtotal = sizeof(req_task); 2333 2334 ret = exec_drive_taskfile(dd, (void __user *) arg, 2335 &req_task, outtotal); 2336 2337 if (copy_to_user((void __user *) arg, &req_task, 2338 sizeof(req_task))) 2339 return -EFAULT; 2340 2341 return ret; 2342 } 2343 2344 default: 2345 return -EINVAL; 2346 } 2347 return 0; 2348 } 2349 2350 /* 2351 * Submit an IO to the hw 2352 * 2353 * This function is called by the block layer to issue an io 2354 * to the device. Upon completion, the callback function will 2355 * be called with the data parameter passed as the callback data. 2356 * 2357 * @dd Pointer to the driver data structure. 2358 * @start First sector to read. 2359 * @nsect Number of sectors to read. 2360 * @nents Number of entries in scatter list for the read command. 2361 * @tag The tag of this read command. 2362 * @callback Pointer to the function that should be called 2363 * when the read completes. 2364 * @data Callback data passed to the callback function 2365 * when the read completes. 2366 * @dir Direction (read or write) 2367 * 2368 * return value 2369 * None 2370 */ 2371 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, 2372 struct mtip_cmd *command, int nents, 2373 struct blk_mq_hw_ctx *hctx) 2374 { 2375 struct host_to_dev_fis *fis; 2376 struct mtip_port *port = dd->port; 2377 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2378 u64 start = blk_rq_pos(rq); 2379 unsigned int nsect = blk_rq_sectors(rq); 2380 2381 /* Map the scatter list for DMA access */ 2382 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); 2383 2384 prefetch(&port->flags); 2385 2386 command->scatter_ents = nents; 2387 2388 /* 2389 * The number of retries for this command before it is 2390 * reported as a failure to the upper layers. 2391 */ 2392 command->retries = MTIP_MAX_RETRIES; 2393 2394 /* Fill out fis */ 2395 fis = command->command; 2396 fis->type = 0x27; 2397 fis->opts = 1 << 7; 2398 if (dma_dir == DMA_FROM_DEVICE) 2399 fis->command = ATA_CMD_FPDMA_READ; 2400 else 2401 fis->command = ATA_CMD_FPDMA_WRITE; 2402 fis->lba_low = start & 0xFF; 2403 fis->lba_mid = (start >> 8) & 0xFF; 2404 fis->lba_hi = (start >> 16) & 0xFF; 2405 fis->lba_low_ex = (start >> 24) & 0xFF; 2406 fis->lba_mid_ex = (start >> 32) & 0xFF; 2407 fis->lba_hi_ex = (start >> 40) & 0xFF; 2408 fis->device = 1 << 6; 2409 fis->features = nsect & 0xFF; 2410 fis->features_ex = (nsect >> 8) & 0xFF; 2411 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5)); 2412 fis->sect_cnt_ex = 0; 2413 fis->control = 0; 2414 fis->res2 = 0; 2415 fis->res3 = 0; 2416 fill_command_sg(dd, command, nents); 2417 2418 if (unlikely(command->unaligned)) 2419 fis->device |= 1 << 7; 2420 2421 /* Populate the command header */ 2422 command->command_header->opts = 2423 __force_bit2int cpu_to_le32( 2424 (nents << 16) | 5 | AHCI_CMD_PREFETCH); 2425 command->command_header->byte_count = 0; 2426 2427 /* 2428 * Set the completion function and data for the command 2429 * within this layer. 2430 */ 2431 command->comp_data = dd; 2432 command->comp_func = mtip_async_complete; 2433 command->direction = dma_dir; 2434 2435 /* 2436 * To prevent this command from being issued 2437 * if an internal command is in progress or error handling is active. 2438 */ 2439 if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) { 2440 set_bit(rq->tag, port->cmds_to_issue); 2441 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 2442 return; 2443 } 2444 2445 /* Issue the command to the hardware */ 2446 mtip_issue_ncq_command(port, rq->tag); 2447 } 2448 2449 /* 2450 * Sysfs status dump. 2451 * 2452 * @dev Pointer to the device structure, passed by the kernrel. 2453 * @attr Pointer to the device_attribute structure passed by the kernel. 2454 * @buf Pointer to the char buffer that will receive the stats info. 2455 * 2456 * return value 2457 * The size, in bytes, of the data copied into buf. 2458 */ 2459 static ssize_t mtip_hw_show_status(struct device *dev, 2460 struct device_attribute *attr, 2461 char *buf) 2462 { 2463 struct driver_data *dd = dev_to_disk(dev)->private_data; 2464 int size = 0; 2465 2466 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) 2467 size += sprintf(buf, "%s", "thermal_shutdown\n"); 2468 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) 2469 size += sprintf(buf, "%s", "write_protect\n"); 2470 else 2471 size += sprintf(buf, "%s", "online\n"); 2472 2473 return size; 2474 } 2475 2476 static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2477 2478 /* debugsfs entries */ 2479 2480 static ssize_t show_device_status(struct device_driver *drv, char *buf) 2481 { 2482 int size = 0; 2483 struct driver_data *dd, *tmp; 2484 unsigned long flags; 2485 char id_buf[42]; 2486 u16 status = 0; 2487 2488 spin_lock_irqsave(&dev_lock, flags); 2489 size += sprintf(&buf[size], "Devices Present:\n"); 2490 list_for_each_entry_safe(dd, tmp, &online_list, online_list) { 2491 if (dd->pdev) { 2492 if (dd->port && 2493 dd->port->identify && 2494 dd->port->identify_valid) { 2495 strlcpy(id_buf, 2496 (char *) (dd->port->identify + 10), 21); 2497 status = *(dd->port->identify + 141); 2498 } else { 2499 memset(id_buf, 0, 42); 2500 status = 0; 2501 } 2502 2503 if (dd->port && 2504 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2505 size += sprintf(&buf[size], 2506 " device %s %s (ftl rebuild %d %%)\n", 2507 dev_name(&dd->pdev->dev), 2508 id_buf, 2509 status); 2510 } else { 2511 size += sprintf(&buf[size], 2512 " device %s %s\n", 2513 dev_name(&dd->pdev->dev), 2514 id_buf); 2515 } 2516 } 2517 } 2518 2519 size += sprintf(&buf[size], "Devices Being Removed:\n"); 2520 list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { 2521 if (dd->pdev) { 2522 if (dd->port && 2523 dd->port->identify && 2524 dd->port->identify_valid) { 2525 strlcpy(id_buf, 2526 (char *) (dd->port->identify+10), 21); 2527 status = *(dd->port->identify + 141); 2528 } else { 2529 memset(id_buf, 0, 42); 2530 status = 0; 2531 } 2532 2533 if (dd->port && 2534 test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { 2535 size += sprintf(&buf[size], 2536 " device %s %s (ftl rebuild %d %%)\n", 2537 dev_name(&dd->pdev->dev), 2538 id_buf, 2539 status); 2540 } else { 2541 size += sprintf(&buf[size], 2542 " device %s %s\n", 2543 dev_name(&dd->pdev->dev), 2544 id_buf); 2545 } 2546 } 2547 } 2548 spin_unlock_irqrestore(&dev_lock, flags); 2549 2550 return size; 2551 } 2552 2553 static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, 2554 size_t len, loff_t *offset) 2555 { 2556 struct driver_data *dd = (struct driver_data *)f->private_data; 2557 int size = *offset; 2558 char *buf; 2559 int rv = 0; 2560 2561 if (!len || *offset) 2562 return 0; 2563 2564 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2565 if (!buf) { 2566 dev_err(&dd->pdev->dev, 2567 "Memory allocation: status buffer\n"); 2568 return -ENOMEM; 2569 } 2570 2571 size += show_device_status(NULL, buf); 2572 2573 *offset = size <= len ? size : len; 2574 size = copy_to_user(ubuf, buf, *offset); 2575 if (size) 2576 rv = -EFAULT; 2577 2578 kfree(buf); 2579 return rv ? rv : *offset; 2580 } 2581 2582 static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, 2583 size_t len, loff_t *offset) 2584 { 2585 struct driver_data *dd = (struct driver_data *)f->private_data; 2586 char *buf; 2587 u32 group_allocated; 2588 int size = *offset; 2589 int n, rv = 0; 2590 2591 if (!len || size) 2592 return 0; 2593 2594 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2595 if (!buf) { 2596 dev_err(&dd->pdev->dev, 2597 "Memory allocation: register buffer\n"); 2598 return -ENOMEM; 2599 } 2600 2601 size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); 2602 2603 for (n = dd->slot_groups-1; n >= 0; n--) 2604 size += sprintf(&buf[size], "%08X ", 2605 readl(dd->port->s_active[n])); 2606 2607 size += sprintf(&buf[size], "]\n"); 2608 size += sprintf(&buf[size], "H/ Command Issue : [ 0x"); 2609 2610 for (n = dd->slot_groups-1; n >= 0; n--) 2611 size += sprintf(&buf[size], "%08X ", 2612 readl(dd->port->cmd_issue[n])); 2613 2614 size += sprintf(&buf[size], "]\n"); 2615 size += sprintf(&buf[size], "H/ Completed : [ 0x"); 2616 2617 for (n = dd->slot_groups-1; n >= 0; n--) 2618 size += sprintf(&buf[size], "%08X ", 2619 readl(dd->port->completed[n])); 2620 2621 size += sprintf(&buf[size], "]\n"); 2622 size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n", 2623 readl(dd->port->mmio + PORT_IRQ_STAT)); 2624 size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n", 2625 readl(dd->mmio + HOST_IRQ_STAT)); 2626 size += sprintf(&buf[size], "\n"); 2627 2628 size += sprintf(&buf[size], "L/ Allocated : [ 0x"); 2629 2630 for (n = dd->slot_groups-1; n >= 0; n--) { 2631 if (sizeof(long) > sizeof(u32)) 2632 group_allocated = 2633 dd->port->allocated[n/2] >> (32*(n&1)); 2634 else 2635 group_allocated = dd->port->allocated[n]; 2636 size += sprintf(&buf[size], "%08X ", group_allocated); 2637 } 2638 size += sprintf(&buf[size], "]\n"); 2639 2640 size += sprintf(&buf[size], "L/ Commands in Q : [ 0x"); 2641 2642 for (n = dd->slot_groups-1; n >= 0; n--) { 2643 if (sizeof(long) > sizeof(u32)) 2644 group_allocated = 2645 dd->port->cmds_to_issue[n/2] >> (32*(n&1)); 2646 else 2647 group_allocated = dd->port->cmds_to_issue[n]; 2648 size += sprintf(&buf[size], "%08X ", group_allocated); 2649 } 2650 size += sprintf(&buf[size], "]\n"); 2651 2652 *offset = size <= len ? size : len; 2653 size = copy_to_user(ubuf, buf, *offset); 2654 if (size) 2655 rv = -EFAULT; 2656 2657 kfree(buf); 2658 return rv ? rv : *offset; 2659 } 2660 2661 static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, 2662 size_t len, loff_t *offset) 2663 { 2664 struct driver_data *dd = (struct driver_data *)f->private_data; 2665 char *buf; 2666 int size = *offset; 2667 int rv = 0; 2668 2669 if (!len || size) 2670 return 0; 2671 2672 buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL); 2673 if (!buf) { 2674 dev_err(&dd->pdev->dev, 2675 "Memory allocation: flag buffer\n"); 2676 return -ENOMEM; 2677 } 2678 2679 size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", 2680 dd->port->flags); 2681 size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", 2682 dd->dd_flag); 2683 2684 *offset = size <= len ? size : len; 2685 size = copy_to_user(ubuf, buf, *offset); 2686 if (size) 2687 rv = -EFAULT; 2688 2689 kfree(buf); 2690 return rv ? rv : *offset; 2691 } 2692 2693 static const struct file_operations mtip_device_status_fops = { 2694 .owner = THIS_MODULE, 2695 .open = simple_open, 2696 .read = mtip_hw_read_device_status, 2697 .llseek = no_llseek, 2698 }; 2699 2700 static const struct file_operations mtip_regs_fops = { 2701 .owner = THIS_MODULE, 2702 .open = simple_open, 2703 .read = mtip_hw_read_registers, 2704 .llseek = no_llseek, 2705 }; 2706 2707 static const struct file_operations mtip_flags_fops = { 2708 .owner = THIS_MODULE, 2709 .open = simple_open, 2710 .read = mtip_hw_read_flags, 2711 .llseek = no_llseek, 2712 }; 2713 2714 /* 2715 * Create the sysfs related attributes. 2716 * 2717 * @dd Pointer to the driver data structure. 2718 * @kobj Pointer to the kobj for the block device. 2719 * 2720 * return value 2721 * 0 Operation completed successfully. 2722 * -EINVAL Invalid parameter. 2723 */ 2724 static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) 2725 { 2726 if (!kobj || !dd) 2727 return -EINVAL; 2728 2729 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2730 dev_warn(&dd->pdev->dev, 2731 "Error creating 'status' sysfs entry\n"); 2732 return 0; 2733 } 2734 2735 /* 2736 * Remove the sysfs related attributes. 2737 * 2738 * @dd Pointer to the driver data structure. 2739 * @kobj Pointer to the kobj for the block device. 2740 * 2741 * return value 2742 * 0 Operation completed successfully. 2743 * -EINVAL Invalid parameter. 2744 */ 2745 static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) 2746 { 2747 if (!kobj || !dd) 2748 return -EINVAL; 2749 2750 sysfs_remove_file(kobj, &dev_attr_status.attr); 2751 2752 return 0; 2753 } 2754 2755 static int mtip_hw_debugfs_init(struct driver_data *dd) 2756 { 2757 if (!dfs_parent) 2758 return -1; 2759 2760 dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); 2761 if (IS_ERR_OR_NULL(dd->dfs_node)) { 2762 dev_warn(&dd->pdev->dev, 2763 "Error creating node %s under debugfs\n", 2764 dd->disk->disk_name); 2765 dd->dfs_node = NULL; 2766 return -1; 2767 } 2768 2769 debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd, 2770 &mtip_flags_fops); 2771 debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd, 2772 &mtip_regs_fops); 2773 2774 return 0; 2775 } 2776 2777 static void mtip_hw_debugfs_exit(struct driver_data *dd) 2778 { 2779 if (dd->dfs_node) 2780 debugfs_remove_recursive(dd->dfs_node); 2781 } 2782 2783 static int mtip_free_orphan(struct driver_data *dd) 2784 { 2785 struct kobject *kobj; 2786 2787 if (dd->bdev) { 2788 if (dd->bdev->bd_holders >= 1) 2789 return -2; 2790 2791 bdput(dd->bdev); 2792 dd->bdev = NULL; 2793 } 2794 2795 mtip_hw_debugfs_exit(dd); 2796 2797 spin_lock(&rssd_index_lock); 2798 ida_remove(&rssd_index_ida, dd->index); 2799 spin_unlock(&rssd_index_lock); 2800 2801 if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) && 2802 test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { 2803 put_disk(dd->disk); 2804 } else { 2805 if (dd->disk) { 2806 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 2807 if (kobj) { 2808 mtip_hw_sysfs_exit(dd, kobj); 2809 kobject_put(kobj); 2810 } 2811 del_gendisk(dd->disk); 2812 dd->disk = NULL; 2813 } 2814 if (dd->queue) { 2815 dd->queue->queuedata = NULL; 2816 blk_cleanup_queue(dd->queue); 2817 blk_mq_free_tag_set(&dd->tags); 2818 dd->queue = NULL; 2819 } 2820 } 2821 kfree(dd); 2822 return 0; 2823 } 2824 2825 /* 2826 * Perform any init/resume time hardware setup 2827 * 2828 * @dd Pointer to the driver data structure. 2829 * 2830 * return value 2831 * None 2832 */ 2833 static inline void hba_setup(struct driver_data *dd) 2834 { 2835 u32 hwdata; 2836 hwdata = readl(dd->mmio + HOST_HSORG); 2837 2838 /* interrupt bug workaround: use only 1 IS bit.*/ 2839 writel(hwdata | 2840 HSORG_DISABLE_SLOTGRP_INTR | 2841 HSORG_DISABLE_SLOTGRP_PXIS, 2842 dd->mmio + HOST_HSORG); 2843 } 2844 2845 static int mtip_device_unaligned_constrained(struct driver_data *dd) 2846 { 2847 return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0); 2848 } 2849 2850 /* 2851 * Detect the details of the product, and store anything needed 2852 * into the driver data structure. This includes product type and 2853 * version and number of slot groups. 2854 * 2855 * @dd Pointer to the driver data structure. 2856 * 2857 * return value 2858 * None 2859 */ 2860 static void mtip_detect_product(struct driver_data *dd) 2861 { 2862 u32 hwdata; 2863 unsigned int rev, slotgroups; 2864 2865 /* 2866 * HBA base + 0xFC [15:0] - vendor-specific hardware interface 2867 * info register: 2868 * [15:8] hardware/software interface rev# 2869 * [ 3] asic-style interface 2870 * [ 2:0] number of slot groups, minus 1 (only valid for asic-style). 2871 */ 2872 hwdata = readl(dd->mmio + HOST_HSORG); 2873 2874 dd->product_type = MTIP_PRODUCT_UNKNOWN; 2875 dd->slot_groups = 1; 2876 2877 if (hwdata & 0x8) { 2878 dd->product_type = MTIP_PRODUCT_ASICFPGA; 2879 rev = (hwdata & HSORG_HWREV) >> 8; 2880 slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1; 2881 dev_info(&dd->pdev->dev, 2882 "ASIC-FPGA design, HS rev 0x%x, " 2883 "%i slot groups [%i slots]\n", 2884 rev, 2885 slotgroups, 2886 slotgroups * 32); 2887 2888 if (slotgroups > MTIP_MAX_SLOT_GROUPS) { 2889 dev_warn(&dd->pdev->dev, 2890 "Warning: driver only supports " 2891 "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS); 2892 slotgroups = MTIP_MAX_SLOT_GROUPS; 2893 } 2894 dd->slot_groups = slotgroups; 2895 return; 2896 } 2897 2898 dev_warn(&dd->pdev->dev, "Unrecognized product id\n"); 2899 } 2900 2901 /* 2902 * Blocking wait for FTL rebuild to complete 2903 * 2904 * @dd Pointer to the DRIVER_DATA structure. 2905 * 2906 * return value 2907 * 0 FTL rebuild completed successfully 2908 * -EFAULT FTL rebuild error/timeout/interruption 2909 */ 2910 static int mtip_ftl_rebuild_poll(struct driver_data *dd) 2911 { 2912 unsigned long timeout, cnt = 0, start; 2913 2914 dev_warn(&dd->pdev->dev, 2915 "FTL rebuild in progress. Polling for completion.\n"); 2916 2917 start = jiffies; 2918 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS); 2919 2920 do { 2921 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 2922 &dd->dd_flag))) 2923 return -EFAULT; 2924 if (mtip_check_surprise_removal(dd->pdev)) 2925 return -EFAULT; 2926 2927 if (mtip_get_identify(dd->port, NULL) < 0) 2928 return -EFAULT; 2929 2930 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 2931 MTIP_FTL_REBUILD_MAGIC) { 2932 ssleep(1); 2933 /* Print message every 3 minutes */ 2934 if (cnt++ >= 180) { 2935 dev_warn(&dd->pdev->dev, 2936 "FTL rebuild in progress (%d secs).\n", 2937 jiffies_to_msecs(jiffies - start) / 1000); 2938 cnt = 0; 2939 } 2940 } else { 2941 dev_warn(&dd->pdev->dev, 2942 "FTL rebuild complete (%d secs).\n", 2943 jiffies_to_msecs(jiffies - start) / 1000); 2944 mtip_block_initialize(dd); 2945 return 0; 2946 } 2947 ssleep(10); 2948 } while (time_before(jiffies, timeout)); 2949 2950 /* Check for timeout */ 2951 dev_err(&dd->pdev->dev, 2952 "Timed out waiting for FTL rebuild to complete (%d secs).\n", 2953 jiffies_to_msecs(jiffies - start) / 1000); 2954 return -EFAULT; 2955 } 2956 2957 /* 2958 * service thread to issue queued commands 2959 * 2960 * @data Pointer to the driver data structure. 2961 * 2962 * return value 2963 * 0 2964 */ 2965 2966 static int mtip_service_thread(void *data) 2967 { 2968 struct driver_data *dd = (struct driver_data *)data; 2969 unsigned long slot, slot_start, slot_wrap; 2970 unsigned int num_cmd_slots = dd->slot_groups * 32; 2971 struct mtip_port *port = dd->port; 2972 int ret; 2973 2974 while (1) { 2975 if (kthread_should_stop() || 2976 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) 2977 goto st_out; 2978 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 2979 2980 /* 2981 * the condition is to check neither an internal command is 2982 * is in progress nor error handling is active 2983 */ 2984 wait_event_interruptible(port->svc_wait, (port->flags) && 2985 !(port->flags & MTIP_PF_PAUSE_IO)); 2986 2987 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); 2988 2989 if (kthread_should_stop() || 2990 test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) 2991 goto st_out; 2992 2993 /* If I am an orphan, start self cleanup */ 2994 if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) 2995 break; 2996 2997 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 2998 &dd->dd_flag))) 2999 goto st_out; 3000 3001 restart_eh: 3002 /* Demux bits: start with error handling */ 3003 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) { 3004 mtip_handle_tfe(dd); 3005 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 3006 } 3007 3008 if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) 3009 goto restart_eh; 3010 3011 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { 3012 slot = 1; 3013 /* used to restrict the loop to one iteration */ 3014 slot_start = num_cmd_slots; 3015 slot_wrap = 0; 3016 while (1) { 3017 slot = find_next_bit(port->cmds_to_issue, 3018 num_cmd_slots, slot); 3019 if (slot_wrap == 1) { 3020 if ((slot_start >= slot) || 3021 (slot >= num_cmd_slots)) 3022 break; 3023 } 3024 if (unlikely(slot_start == num_cmd_slots)) 3025 slot_start = slot; 3026 3027 if (unlikely(slot == num_cmd_slots)) { 3028 slot = 1; 3029 slot_wrap = 1; 3030 continue; 3031 } 3032 3033 /* Issue the command to the hardware */ 3034 mtip_issue_ncq_command(port, slot); 3035 3036 clear_bit(slot, port->cmds_to_issue); 3037 } 3038 3039 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 3040 } 3041 3042 if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { 3043 if (mtip_ftl_rebuild_poll(dd) < 0) 3044 set_bit(MTIP_DDF_REBUILD_FAILED_BIT, 3045 &dd->dd_flag); 3046 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); 3047 } 3048 } 3049 3050 /* wait for pci remove to exit */ 3051 while (1) { 3052 if (test_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag)) 3053 break; 3054 msleep_interruptible(1000); 3055 if (kthread_should_stop()) 3056 goto st_out; 3057 } 3058 3059 while (1) { 3060 ret = mtip_free_orphan(dd); 3061 if (!ret) { 3062 /* NOTE: All data structures are invalid, do not 3063 * access any here */ 3064 return 0; 3065 } 3066 msleep_interruptible(1000); 3067 if (kthread_should_stop()) 3068 goto st_out; 3069 } 3070 st_out: 3071 return 0; 3072 } 3073 3074 /* 3075 * DMA region teardown 3076 * 3077 * @dd Pointer to driver_data structure 3078 * 3079 * return value 3080 * None 3081 */ 3082 static void mtip_dma_free(struct driver_data *dd) 3083 { 3084 struct mtip_port *port = dd->port; 3085 3086 if (port->block1) 3087 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3088 port->block1, port->block1_dma); 3089 3090 if (port->command_list) { 3091 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 3092 port->command_list, port->command_list_dma); 3093 } 3094 } 3095 3096 /* 3097 * DMA region setup 3098 * 3099 * @dd Pointer to driver_data structure 3100 * 3101 * return value 3102 * -ENOMEM Not enough free DMA region space to initialize driver 3103 */ 3104 static int mtip_dma_alloc(struct driver_data *dd) 3105 { 3106 struct mtip_port *port = dd->port; 3107 3108 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ 3109 port->block1 = 3110 dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3111 &port->block1_dma, GFP_KERNEL); 3112 if (!port->block1) 3113 return -ENOMEM; 3114 memset(port->block1, 0, BLOCK_DMA_ALLOC_SZ); 3115 3116 /* Allocate dma memory for command list */ 3117 port->command_list = 3118 dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 3119 &port->command_list_dma, GFP_KERNEL); 3120 if (!port->command_list) { 3121 dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, 3122 port->block1, port->block1_dma); 3123 port->block1 = NULL; 3124 port->block1_dma = 0; 3125 return -ENOMEM; 3126 } 3127 memset(port->command_list, 0, AHCI_CMD_TBL_SZ); 3128 3129 /* Setup all pointers into first DMA region */ 3130 port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET; 3131 port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET; 3132 port->identify = port->block1 + AHCI_IDFY_OFFSET; 3133 port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET; 3134 port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET; 3135 port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET; 3136 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; 3137 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; 3138 3139 return 0; 3140 } 3141 3142 static int mtip_hw_get_identify(struct driver_data *dd) 3143 { 3144 struct smart_attr attr242; 3145 unsigned char *buf; 3146 int rv; 3147 3148 if (mtip_get_identify(dd->port, NULL) < 0) 3149 return -EFAULT; 3150 3151 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 3152 MTIP_FTL_REBUILD_MAGIC) { 3153 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); 3154 return MTIP_FTL_REBUILD_MAGIC; 3155 } 3156 mtip_dump_identify(dd->port); 3157 3158 /* check write protect, over temp and rebuild statuses */ 3159 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, 3160 dd->port->log_buf, 3161 dd->port->log_buf_dma, 1); 3162 if (rv) { 3163 dev_warn(&dd->pdev->dev, 3164 "Error in READ LOG EXT (10h) command\n"); 3165 /* non-critical error, don't fail the load */ 3166 } else { 3167 buf = (unsigned char *)dd->port->log_buf; 3168 if (buf[259] & 0x1) { 3169 dev_info(&dd->pdev->dev, 3170 "Write protect bit is set.\n"); 3171 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); 3172 } 3173 if (buf[288] == 0xF7) { 3174 dev_info(&dd->pdev->dev, 3175 "Exceeded Tmax, drive in thermal shutdown.\n"); 3176 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); 3177 } 3178 if (buf[288] == 0xBF) { 3179 dev_info(&dd->pdev->dev, 3180 "Drive indicates rebuild has failed.\n"); 3181 /* TODO */ 3182 } 3183 } 3184 3185 /* get write protect progess */ 3186 memset(&attr242, 0, sizeof(struct smart_attr)); 3187 if (mtip_get_smart_attr(dd->port, 242, &attr242)) 3188 dev_warn(&dd->pdev->dev, 3189 "Unable to check write protect progress\n"); 3190 else 3191 dev_info(&dd->pdev->dev, 3192 "Write protect progress: %u%% (%u blocks)\n", 3193 attr242.cur, le32_to_cpu(attr242.data)); 3194 3195 return rv; 3196 } 3197 3198 /* 3199 * Called once for each card. 3200 * 3201 * @dd Pointer to the driver data structure. 3202 * 3203 * return value 3204 * 0 on success, else an error code. 3205 */ 3206 static int mtip_hw_init(struct driver_data *dd) 3207 { 3208 int i; 3209 int rv; 3210 unsigned int num_command_slots; 3211 unsigned long timeout, timetaken; 3212 3213 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; 3214 3215 mtip_detect_product(dd); 3216 if (dd->product_type == MTIP_PRODUCT_UNKNOWN) { 3217 rv = -EIO; 3218 goto out1; 3219 } 3220 num_command_slots = dd->slot_groups * 32; 3221 3222 hba_setup(dd); 3223 3224 dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL, 3225 dd->numa_node); 3226 if (!dd->port) { 3227 dev_err(&dd->pdev->dev, 3228 "Memory allocation: port structure\n"); 3229 return -ENOMEM; 3230 } 3231 3232 /* Continue workqueue setup */ 3233 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 3234 dd->work[i].port = dd->port; 3235 3236 /* Enable unaligned IO constraints for some devices */ 3237 if (mtip_device_unaligned_constrained(dd)) 3238 dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS; 3239 else 3240 dd->unal_qdepth = 0; 3241 3242 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); 3243 3244 /* Spinlock to prevent concurrent issue */ 3245 for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++) 3246 spin_lock_init(&dd->port->cmd_issue_lock[i]); 3247 3248 /* Set the port mmio base address. */ 3249 dd->port->mmio = dd->mmio + PORT_OFFSET; 3250 dd->port->dd = dd; 3251 3252 /* DMA allocations */ 3253 rv = mtip_dma_alloc(dd); 3254 if (rv < 0) 3255 goto out1; 3256 3257 /* Setup the pointers to the extended s_active and CI registers. */ 3258 for (i = 0; i < dd->slot_groups; i++) { 3259 dd->port->s_active[i] = 3260 dd->port->mmio + i*0x80 + PORT_SCR_ACT; 3261 dd->port->cmd_issue[i] = 3262 dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE; 3263 dd->port->completed[i] = 3264 dd->port->mmio + i*0x80 + PORT_SDBV; 3265 } 3266 3267 timetaken = jiffies; 3268 timeout = jiffies + msecs_to_jiffies(30000); 3269 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) && 3270 time_before(jiffies, timeout)) { 3271 mdelay(100); 3272 } 3273 if (unlikely(mtip_check_surprise_removal(dd->pdev))) { 3274 timetaken = jiffies - timetaken; 3275 dev_warn(&dd->pdev->dev, 3276 "Surprise removal detected at %u ms\n", 3277 jiffies_to_msecs(timetaken)); 3278 rv = -ENODEV; 3279 goto out2 ; 3280 } 3281 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { 3282 timetaken = jiffies - timetaken; 3283 dev_warn(&dd->pdev->dev, 3284 "Removal detected at %u ms\n", 3285 jiffies_to_msecs(timetaken)); 3286 rv = -EFAULT; 3287 goto out2; 3288 } 3289 3290 /* Conditionally reset the HBA. */ 3291 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) { 3292 if (mtip_hba_reset(dd) < 0) { 3293 dev_err(&dd->pdev->dev, 3294 "Card did not reset within timeout\n"); 3295 rv = -EIO; 3296 goto out2; 3297 } 3298 } else { 3299 /* Clear any pending interrupts on the HBA */ 3300 writel(readl(dd->mmio + HOST_IRQ_STAT), 3301 dd->mmio + HOST_IRQ_STAT); 3302 } 3303 3304 mtip_init_port(dd->port); 3305 mtip_start_port(dd->port); 3306 3307 /* Setup the ISR and enable interrupts. */ 3308 rv = devm_request_irq(&dd->pdev->dev, 3309 dd->pdev->irq, 3310 mtip_irq_handler, 3311 IRQF_SHARED, 3312 dev_driver_string(&dd->pdev->dev), 3313 dd); 3314 3315 if (rv) { 3316 dev_err(&dd->pdev->dev, 3317 "Unable to allocate IRQ %d\n", dd->pdev->irq); 3318 goto out2; 3319 } 3320 irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding)); 3321 3322 /* Enable interrupts on the HBA. */ 3323 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3324 dd->mmio + HOST_CTL); 3325 3326 init_waitqueue_head(&dd->port->svc_wait); 3327 3328 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 3329 rv = -EFAULT; 3330 goto out3; 3331 } 3332 3333 return rv; 3334 3335 out3: 3336 /* Disable interrupts on the HBA. */ 3337 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3338 dd->mmio + HOST_CTL); 3339 3340 /* Release the IRQ. */ 3341 irq_set_affinity_hint(dd->pdev->irq, NULL); 3342 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3343 3344 out2: 3345 mtip_deinit_port(dd->port); 3346 mtip_dma_free(dd); 3347 3348 out1: 3349 /* Free the memory allocated for the for structure. */ 3350 kfree(dd->port); 3351 3352 return rv; 3353 } 3354 3355 static void mtip_standby_drive(struct driver_data *dd) 3356 { 3357 if (dd->sr) 3358 return; 3359 3360 /* 3361 * Send standby immediate (E0h) to the drive so that it 3362 * saves its state. 3363 */ 3364 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && 3365 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) 3366 if (mtip_standby_immediate(dd->port)) 3367 dev_warn(&dd->pdev->dev, 3368 "STANDBY IMMEDIATE failed\n"); 3369 } 3370 3371 /* 3372 * Called to deinitialize an interface. 3373 * 3374 * @dd Pointer to the driver data structure. 3375 * 3376 * return value 3377 * 0 3378 */ 3379 static int mtip_hw_exit(struct driver_data *dd) 3380 { 3381 /* 3382 * Send standby immediate (E0h) to the drive so that it 3383 * saves its state. 3384 */ 3385 if (!dd->sr) { 3386 /* de-initialize the port. */ 3387 mtip_deinit_port(dd->port); 3388 3389 /* Disable interrupts on the HBA. */ 3390 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3391 dd->mmio + HOST_CTL); 3392 } 3393 3394 /* Release the IRQ. */ 3395 irq_set_affinity_hint(dd->pdev->irq, NULL); 3396 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3397 3398 /* Free dma regions */ 3399 mtip_dma_free(dd); 3400 3401 /* Free the memory allocated for the for structure. */ 3402 kfree(dd->port); 3403 dd->port = NULL; 3404 3405 return 0; 3406 } 3407 3408 /* 3409 * Issue a Standby Immediate command to the device. 3410 * 3411 * This function is called by the Block Layer just before the 3412 * system powers off during a shutdown. 3413 * 3414 * @dd Pointer to the driver data structure. 3415 * 3416 * return value 3417 * 0 3418 */ 3419 static int mtip_hw_shutdown(struct driver_data *dd) 3420 { 3421 /* 3422 * Send standby immediate (E0h) to the drive so that it 3423 * saves its state. 3424 */ 3425 if (!dd->sr && dd->port) 3426 mtip_standby_immediate(dd->port); 3427 3428 return 0; 3429 } 3430 3431 /* 3432 * Suspend function 3433 * 3434 * This function is called by the Block Layer just before the 3435 * system hibernates. 3436 * 3437 * @dd Pointer to the driver data structure. 3438 * 3439 * return value 3440 * 0 Suspend was successful 3441 * -EFAULT Suspend was not successful 3442 */ 3443 static int mtip_hw_suspend(struct driver_data *dd) 3444 { 3445 /* 3446 * Send standby immediate (E0h) to the drive 3447 * so that it saves its state. 3448 */ 3449 if (mtip_standby_immediate(dd->port) != 0) { 3450 dev_err(&dd->pdev->dev, 3451 "Failed standby-immediate command\n"); 3452 return -EFAULT; 3453 } 3454 3455 /* Disable interrupts on the HBA.*/ 3456 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3457 dd->mmio + HOST_CTL); 3458 mtip_deinit_port(dd->port); 3459 3460 return 0; 3461 } 3462 3463 /* 3464 * Resume function 3465 * 3466 * This function is called by the Block Layer as the 3467 * system resumes. 3468 * 3469 * @dd Pointer to the driver data structure. 3470 * 3471 * return value 3472 * 0 Resume was successful 3473 * -EFAULT Resume was not successful 3474 */ 3475 static int mtip_hw_resume(struct driver_data *dd) 3476 { 3477 /* Perform any needed hardware setup steps */ 3478 hba_setup(dd); 3479 3480 /* Reset the HBA */ 3481 if (mtip_hba_reset(dd) != 0) { 3482 dev_err(&dd->pdev->dev, 3483 "Unable to reset the HBA\n"); 3484 return -EFAULT; 3485 } 3486 3487 /* 3488 * Enable the port, DMA engine, and FIS reception specific 3489 * h/w in controller. 3490 */ 3491 mtip_init_port(dd->port); 3492 mtip_start_port(dd->port); 3493 3494 /* Enable interrupts on the HBA.*/ 3495 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3496 dd->mmio + HOST_CTL); 3497 3498 return 0; 3499 } 3500 3501 /* 3502 * Helper function for reusing disk name 3503 * upon hot insertion. 3504 */ 3505 static int rssd_disk_name_format(char *prefix, 3506 int index, 3507 char *buf, 3508 int buflen) 3509 { 3510 const int base = 'z' - 'a' + 1; 3511 char *begin = buf + strlen(prefix); 3512 char *end = buf + buflen; 3513 char *p; 3514 int unit; 3515 3516 p = end - 1; 3517 *p = '\0'; 3518 unit = base; 3519 do { 3520 if (p == begin) 3521 return -EINVAL; 3522 *--p = 'a' + (index % unit); 3523 index = (index / unit) - 1; 3524 } while (index >= 0); 3525 3526 memmove(begin, p, end - p); 3527 memcpy(buf, prefix, strlen(prefix)); 3528 3529 return 0; 3530 } 3531 3532 /* 3533 * Block layer IOCTL handler. 3534 * 3535 * @dev Pointer to the block_device structure. 3536 * @mode ignored 3537 * @cmd IOCTL command passed from the user application. 3538 * @arg Argument passed from the user application. 3539 * 3540 * return value 3541 * 0 IOCTL completed successfully. 3542 * -ENOTTY IOCTL not supported or invalid driver data 3543 * structure pointer. 3544 */ 3545 static int mtip_block_ioctl(struct block_device *dev, 3546 fmode_t mode, 3547 unsigned cmd, 3548 unsigned long arg) 3549 { 3550 struct driver_data *dd = dev->bd_disk->private_data; 3551 3552 if (!capable(CAP_SYS_ADMIN)) 3553 return -EACCES; 3554 3555 if (!dd) 3556 return -ENOTTY; 3557 3558 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3559 return -ENOTTY; 3560 3561 switch (cmd) { 3562 case BLKFLSBUF: 3563 return -ENOTTY; 3564 default: 3565 return mtip_hw_ioctl(dd, cmd, arg); 3566 } 3567 } 3568 3569 #ifdef CONFIG_COMPAT 3570 /* 3571 * Block layer compat IOCTL handler. 3572 * 3573 * @dev Pointer to the block_device structure. 3574 * @mode ignored 3575 * @cmd IOCTL command passed from the user application. 3576 * @arg Argument passed from the user application. 3577 * 3578 * return value 3579 * 0 IOCTL completed successfully. 3580 * -ENOTTY IOCTL not supported or invalid driver data 3581 * structure pointer. 3582 */ 3583 static int mtip_block_compat_ioctl(struct block_device *dev, 3584 fmode_t mode, 3585 unsigned cmd, 3586 unsigned long arg) 3587 { 3588 struct driver_data *dd = dev->bd_disk->private_data; 3589 3590 if (!capable(CAP_SYS_ADMIN)) 3591 return -EACCES; 3592 3593 if (!dd) 3594 return -ENOTTY; 3595 3596 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) 3597 return -ENOTTY; 3598 3599 switch (cmd) { 3600 case BLKFLSBUF: 3601 return -ENOTTY; 3602 case HDIO_DRIVE_TASKFILE: { 3603 struct mtip_compat_ide_task_request_s __user *compat_req_task; 3604 ide_task_request_t req_task; 3605 int compat_tasksize, outtotal, ret; 3606 3607 compat_tasksize = 3608 sizeof(struct mtip_compat_ide_task_request_s); 3609 3610 compat_req_task = 3611 (struct mtip_compat_ide_task_request_s __user *) arg; 3612 3613 if (copy_from_user(&req_task, (void __user *) arg, 3614 compat_tasksize - (2 * sizeof(compat_long_t)))) 3615 return -EFAULT; 3616 3617 if (get_user(req_task.out_size, &compat_req_task->out_size)) 3618 return -EFAULT; 3619 3620 if (get_user(req_task.in_size, &compat_req_task->in_size)) 3621 return -EFAULT; 3622 3623 outtotal = sizeof(struct mtip_compat_ide_task_request_s); 3624 3625 ret = exec_drive_taskfile(dd, (void __user *) arg, 3626 &req_task, outtotal); 3627 3628 if (copy_to_user((void __user *) arg, &req_task, 3629 compat_tasksize - 3630 (2 * sizeof(compat_long_t)))) 3631 return -EFAULT; 3632 3633 if (put_user(req_task.out_size, &compat_req_task->out_size)) 3634 return -EFAULT; 3635 3636 if (put_user(req_task.in_size, &compat_req_task->in_size)) 3637 return -EFAULT; 3638 3639 return ret; 3640 } 3641 default: 3642 return mtip_hw_ioctl(dd, cmd, arg); 3643 } 3644 } 3645 #endif 3646 3647 /* 3648 * Obtain the geometry of the device. 3649 * 3650 * You may think that this function is obsolete, but some applications, 3651 * fdisk for example still used CHS values. This function describes the 3652 * device as having 224 heads and 56 sectors per cylinder. These values are 3653 * chosen so that each cylinder is aligned on a 4KB boundary. Since a 3654 * partition is described in terms of a start and end cylinder this means 3655 * that each partition is also 4KB aligned. Non-aligned partitions adversely 3656 * affects performance. 3657 * 3658 * @dev Pointer to the block_device strucutre. 3659 * @geo Pointer to a hd_geometry structure. 3660 * 3661 * return value 3662 * 0 Operation completed successfully. 3663 * -ENOTTY An error occurred while reading the drive capacity. 3664 */ 3665 static int mtip_block_getgeo(struct block_device *dev, 3666 struct hd_geometry *geo) 3667 { 3668 struct driver_data *dd = dev->bd_disk->private_data; 3669 sector_t capacity; 3670 3671 if (!dd) 3672 return -ENOTTY; 3673 3674 if (!(mtip_hw_get_capacity(dd, &capacity))) { 3675 dev_warn(&dd->pdev->dev, 3676 "Could not get drive capacity.\n"); 3677 return -ENOTTY; 3678 } 3679 3680 geo->heads = 224; 3681 geo->sectors = 56; 3682 sector_div(capacity, (geo->heads * geo->sectors)); 3683 geo->cylinders = capacity; 3684 return 0; 3685 } 3686 3687 /* 3688 * Block device operation function. 3689 * 3690 * This structure contains pointers to the functions required by the block 3691 * layer. 3692 */ 3693 static const struct block_device_operations mtip_block_ops = { 3694 .ioctl = mtip_block_ioctl, 3695 #ifdef CONFIG_COMPAT 3696 .compat_ioctl = mtip_block_compat_ioctl, 3697 #endif 3698 .getgeo = mtip_block_getgeo, 3699 .owner = THIS_MODULE 3700 }; 3701 3702 /* 3703 * Block layer make request function. 3704 * 3705 * This function is called by the kernel to process a BIO for 3706 * the P320 device. 3707 * 3708 * @queue Pointer to the request queue. Unused other than to obtain 3709 * the driver data structure. 3710 * @rq Pointer to the request. 3711 * 3712 */ 3713 static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) 3714 { 3715 struct driver_data *dd = hctx->queue->queuedata; 3716 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3717 unsigned int nents; 3718 3719 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3720 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 3721 &dd->dd_flag))) { 3722 return -ENXIO; 3723 } 3724 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { 3725 return -ENODATA; 3726 } 3727 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, 3728 &dd->dd_flag) && 3729 rq_data_dir(rq))) { 3730 return -ENODATA; 3731 } 3732 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) 3733 return -ENODATA; 3734 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) 3735 return -ENXIO; 3736 } 3737 3738 if (rq->cmd_flags & REQ_DISCARD) { 3739 int err; 3740 3741 err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); 3742 blk_mq_end_request(rq, err); 3743 return 0; 3744 } 3745 3746 /* Create the scatter list for this request. */ 3747 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); 3748 3749 /* Issue the read/write. */ 3750 mtip_hw_submit_io(dd, rq, cmd, nents, hctx); 3751 return 0; 3752 } 3753 3754 static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, 3755 struct request *rq) 3756 { 3757 struct driver_data *dd = hctx->queue->queuedata; 3758 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3759 3760 if (rq_data_dir(rq) == READ || !dd->unal_qdepth) 3761 return false; 3762 3763 /* 3764 * If unaligned depth must be limited on this controller, mark it 3765 * as unaligned if the IO isn't on a 4k boundary (start of length). 3766 */ 3767 if (blk_rq_sectors(rq) <= 64) { 3768 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) 3769 cmd->unaligned = 1; 3770 } 3771 3772 if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) 3773 return true; 3774 3775 return false; 3776 } 3777 3778 static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3779 const struct blk_mq_queue_data *bd) 3780 { 3781 struct request *rq = bd->rq; 3782 int ret; 3783 3784 if (unlikely(mtip_check_unal_depth(hctx, rq))) 3785 return BLK_MQ_RQ_QUEUE_BUSY; 3786 3787 blk_mq_start_request(rq); 3788 3789 ret = mtip_submit_request(hctx, rq); 3790 if (likely(!ret)) 3791 return BLK_MQ_RQ_QUEUE_OK; 3792 3793 rq->errors = ret; 3794 return BLK_MQ_RQ_QUEUE_ERROR; 3795 } 3796 3797 static void mtip_free_cmd(void *data, struct request *rq, 3798 unsigned int hctx_idx, unsigned int request_idx) 3799 { 3800 struct driver_data *dd = data; 3801 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3802 3803 if (!cmd->command) 3804 return; 3805 3806 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3807 cmd->command, cmd->command_dma); 3808 } 3809 3810 static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, 3811 unsigned int request_idx, unsigned int numa_node) 3812 { 3813 struct driver_data *dd = data; 3814 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); 3815 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; 3816 3817 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, 3818 &cmd->command_dma, GFP_KERNEL); 3819 if (!cmd->command) 3820 return -ENOMEM; 3821 3822 memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); 3823 3824 /* Point the command headers at the command tables. */ 3825 cmd->command_header = dd->port->command_list + 3826 (sizeof(struct mtip_cmd_hdr) * request_idx); 3827 cmd->command_header_dma = dd->port->command_list_dma + 3828 (sizeof(struct mtip_cmd_hdr) * request_idx); 3829 3830 if (host_cap_64) 3831 cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); 3832 3833 cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); 3834 3835 sg_init_table(cmd->sg, MTIP_MAX_SG); 3836 return 0; 3837 } 3838 3839 static struct blk_mq_ops mtip_mq_ops = { 3840 .queue_rq = mtip_queue_rq, 3841 .map_queue = blk_mq_map_queue, 3842 .init_request = mtip_init_cmd, 3843 .exit_request = mtip_free_cmd, 3844 }; 3845 3846 /* 3847 * Block layer initialization function. 3848 * 3849 * This function is called once by the PCI layer for each P320 3850 * device that is connected to the system. 3851 * 3852 * @dd Pointer to the driver data structure. 3853 * 3854 * return value 3855 * 0 on success else an error code. 3856 */ 3857 static int mtip_block_initialize(struct driver_data *dd) 3858 { 3859 int rv = 0, wait_for_rebuild = 0; 3860 sector_t capacity; 3861 unsigned int index = 0; 3862 struct kobject *kobj; 3863 unsigned char thd_name[16]; 3864 3865 if (dd->disk) 3866 goto skip_create_disk; /* hw init done, before rebuild */ 3867 3868 if (mtip_hw_init(dd)) { 3869 rv = -EINVAL; 3870 goto protocol_init_error; 3871 } 3872 3873 dd->disk = alloc_disk_node(MTIP_MAX_MINORS, dd->numa_node); 3874 if (dd->disk == NULL) { 3875 dev_err(&dd->pdev->dev, 3876 "Unable to allocate gendisk structure\n"); 3877 rv = -EINVAL; 3878 goto alloc_disk_error; 3879 } 3880 3881 /* Generate the disk name, implemented same as in sd.c */ 3882 do { 3883 if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL)) 3884 goto ida_get_error; 3885 3886 spin_lock(&rssd_index_lock); 3887 rv = ida_get_new(&rssd_index_ida, &index); 3888 spin_unlock(&rssd_index_lock); 3889 } while (rv == -EAGAIN); 3890 3891 if (rv) 3892 goto ida_get_error; 3893 3894 rv = rssd_disk_name_format("rssd", 3895 index, 3896 dd->disk->disk_name, 3897 DISK_NAME_LEN); 3898 if (rv) 3899 goto disk_index_error; 3900 3901 dd->disk->driverfs_dev = &dd->pdev->dev; 3902 dd->disk->major = dd->major; 3903 dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS; 3904 dd->disk->fops = &mtip_block_ops; 3905 dd->disk->private_data = dd; 3906 dd->index = index; 3907 3908 mtip_hw_debugfs_init(dd); 3909 3910 skip_create_disk: 3911 memset(&dd->tags, 0, sizeof(dd->tags)); 3912 dd->tags.ops = &mtip_mq_ops; 3913 dd->tags.nr_hw_queues = 1; 3914 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; 3915 dd->tags.reserved_tags = 1; 3916 dd->tags.cmd_size = sizeof(struct mtip_cmd); 3917 dd->tags.numa_node = dd->numa_node; 3918 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; 3919 dd->tags.driver_data = dd; 3920 3921 rv = blk_mq_alloc_tag_set(&dd->tags); 3922 if (rv) { 3923 dev_err(&dd->pdev->dev, 3924 "Unable to allocate request queue\n"); 3925 goto block_queue_alloc_init_error; 3926 } 3927 3928 /* Allocate the request queue. */ 3929 dd->queue = blk_mq_init_queue(&dd->tags); 3930 if (IS_ERR(dd->queue)) { 3931 dev_err(&dd->pdev->dev, 3932 "Unable to allocate request queue\n"); 3933 rv = -ENOMEM; 3934 goto block_queue_alloc_init_error; 3935 } 3936 3937 dd->disk->queue = dd->queue; 3938 dd->queue->queuedata = dd; 3939 3940 /* Initialize the protocol layer. */ 3941 wait_for_rebuild = mtip_hw_get_identify(dd); 3942 if (wait_for_rebuild < 0) { 3943 dev_err(&dd->pdev->dev, 3944 "Protocol layer initialization failed\n"); 3945 rv = -EINVAL; 3946 goto init_hw_cmds_error; 3947 } 3948 3949 /* 3950 * if rebuild pending, start the service thread, and delay the block 3951 * queue creation and add_disk() 3952 */ 3953 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 3954 goto start_service_thread; 3955 3956 /* Set device limits. */ 3957 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 3958 clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags); 3959 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3960 blk_queue_physical_block_size(dd->queue, 4096); 3961 blk_queue_max_hw_sectors(dd->queue, 0xffff); 3962 blk_queue_max_segment_size(dd->queue, 0x400000); 3963 blk_queue_io_min(dd->queue, 4096); 3964 blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask); 3965 3966 /* 3967 * write back cache is not supported in the device. FUA depends on 3968 * write back cache support, hence setting flush support to zero. 3969 */ 3970 blk_queue_flush(dd->queue, 0); 3971 3972 /* Signal trim support */ 3973 if (dd->trim_supp == true) { 3974 set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags); 3975 dd->queue->limits.discard_granularity = 4096; 3976 blk_queue_max_discard_sectors(dd->queue, 3977 MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES); 3978 dd->queue->limits.discard_zeroes_data = 0; 3979 } 3980 3981 /* Set the capacity of the device in 512 byte sectors. */ 3982 if (!(mtip_hw_get_capacity(dd, &capacity))) { 3983 dev_warn(&dd->pdev->dev, 3984 "Could not read drive capacity\n"); 3985 rv = -EIO; 3986 goto read_capacity_error; 3987 } 3988 set_capacity(dd->disk, capacity); 3989 3990 /* Enable the block device and add it to /dev */ 3991 add_disk(dd->disk); 3992 3993 dd->bdev = bdget_disk(dd->disk, 0); 3994 /* 3995 * Now that the disk is active, initialize any sysfs attributes 3996 * managed by the protocol layer. 3997 */ 3998 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 3999 if (kobj) { 4000 mtip_hw_sysfs_init(dd, kobj); 4001 kobject_put(kobj); 4002 } 4003 4004 if (dd->mtip_svc_handler) { 4005 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4006 return rv; /* service thread created for handling rebuild */ 4007 } 4008 4009 start_service_thread: 4010 sprintf(thd_name, "mtip_svc_thd_%02d", index); 4011 dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, 4012 dd, dd->numa_node, "%s", 4013 thd_name); 4014 4015 if (IS_ERR(dd->mtip_svc_handler)) { 4016 dev_err(&dd->pdev->dev, "service thread failed to start\n"); 4017 dd->mtip_svc_handler = NULL; 4018 rv = -EFAULT; 4019 goto kthread_run_error; 4020 } 4021 wake_up_process(dd->mtip_svc_handler); 4022 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) 4023 rv = wait_for_rebuild; 4024 4025 return rv; 4026 4027 kthread_run_error: 4028 bdput(dd->bdev); 4029 dd->bdev = NULL; 4030 4031 /* Delete our gendisk. This also removes the device from /dev */ 4032 del_gendisk(dd->disk); 4033 4034 read_capacity_error: 4035 init_hw_cmds_error: 4036 blk_cleanup_queue(dd->queue); 4037 blk_mq_free_tag_set(&dd->tags); 4038 block_queue_alloc_init_error: 4039 mtip_hw_debugfs_exit(dd); 4040 disk_index_error: 4041 spin_lock(&rssd_index_lock); 4042 ida_remove(&rssd_index_ida, index); 4043 spin_unlock(&rssd_index_lock); 4044 4045 ida_get_error: 4046 put_disk(dd->disk); 4047 4048 alloc_disk_error: 4049 mtip_hw_exit(dd); /* De-initialize the protocol layer. */ 4050 4051 protocol_init_error: 4052 return rv; 4053 } 4054 4055 /* 4056 * Block layer deinitialization function. 4057 * 4058 * Called by the PCI layer as each P320 device is removed. 4059 * 4060 * @dd Pointer to the driver data structure. 4061 * 4062 * return value 4063 * 0 4064 */ 4065 static int mtip_block_remove(struct driver_data *dd) 4066 { 4067 struct kobject *kobj; 4068 4069 if (!dd->sr) { 4070 mtip_hw_debugfs_exit(dd); 4071 4072 if (dd->mtip_svc_handler) { 4073 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags); 4074 wake_up_interruptible(&dd->port->svc_wait); 4075 kthread_stop(dd->mtip_svc_handler); 4076 } 4077 4078 /* Clean up the sysfs attributes, if created */ 4079 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) { 4080 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 4081 if (kobj) { 4082 mtip_hw_sysfs_exit(dd, kobj); 4083 kobject_put(kobj); 4084 } 4085 } 4086 4087 mtip_standby_drive(dd); 4088 4089 /* 4090 * Delete our gendisk structure. This also removes the device 4091 * from /dev 4092 */ 4093 if (dd->bdev) { 4094 bdput(dd->bdev); 4095 dd->bdev = NULL; 4096 } 4097 if (dd->disk) { 4098 if (dd->disk->queue) { 4099 del_gendisk(dd->disk); 4100 blk_cleanup_queue(dd->queue); 4101 blk_mq_free_tag_set(&dd->tags); 4102 dd->queue = NULL; 4103 } else 4104 put_disk(dd->disk); 4105 } 4106 dd->disk = NULL; 4107 4108 spin_lock(&rssd_index_lock); 4109 ida_remove(&rssd_index_ida, dd->index); 4110 spin_unlock(&rssd_index_lock); 4111 } else { 4112 dev_info(&dd->pdev->dev, "device %s surprise removal\n", 4113 dd->disk->disk_name); 4114 } 4115 4116 /* De-initialize the protocol layer. */ 4117 mtip_hw_exit(dd); 4118 4119 return 0; 4120 } 4121 4122 /* 4123 * Function called by the PCI layer when just before the 4124 * machine shuts down. 4125 * 4126 * If a protocol layer shutdown function is present it will be called 4127 * by this function. 4128 * 4129 * @dd Pointer to the driver data structure. 4130 * 4131 * return value 4132 * 0 4133 */ 4134 static int mtip_block_shutdown(struct driver_data *dd) 4135 { 4136 mtip_hw_shutdown(dd); 4137 4138 /* Delete our gendisk structure, and cleanup the blk queue. */ 4139 if (dd->disk) { 4140 dev_info(&dd->pdev->dev, 4141 "Shutting down %s ...\n", dd->disk->disk_name); 4142 4143 if (dd->disk->queue) { 4144 del_gendisk(dd->disk); 4145 blk_cleanup_queue(dd->queue); 4146 blk_mq_free_tag_set(&dd->tags); 4147 } else 4148 put_disk(dd->disk); 4149 dd->disk = NULL; 4150 dd->queue = NULL; 4151 } 4152 4153 spin_lock(&rssd_index_lock); 4154 ida_remove(&rssd_index_ida, dd->index); 4155 spin_unlock(&rssd_index_lock); 4156 return 0; 4157 } 4158 4159 static int mtip_block_suspend(struct driver_data *dd) 4160 { 4161 dev_info(&dd->pdev->dev, 4162 "Suspending %s ...\n", dd->disk->disk_name); 4163 mtip_hw_suspend(dd); 4164 return 0; 4165 } 4166 4167 static int mtip_block_resume(struct driver_data *dd) 4168 { 4169 dev_info(&dd->pdev->dev, "Resuming %s ...\n", 4170 dd->disk->disk_name); 4171 mtip_hw_resume(dd); 4172 return 0; 4173 } 4174 4175 static void drop_cpu(int cpu) 4176 { 4177 cpu_use[cpu]--; 4178 } 4179 4180 static int get_least_used_cpu_on_node(int node) 4181 { 4182 int cpu, least_used_cpu, least_cnt; 4183 const struct cpumask *node_mask; 4184 4185 node_mask = cpumask_of_node(node); 4186 least_used_cpu = cpumask_first(node_mask); 4187 least_cnt = cpu_use[least_used_cpu]; 4188 cpu = least_used_cpu; 4189 4190 for_each_cpu(cpu, node_mask) { 4191 if (cpu_use[cpu] < least_cnt) { 4192 least_used_cpu = cpu; 4193 least_cnt = cpu_use[cpu]; 4194 } 4195 } 4196 cpu_use[least_used_cpu]++; 4197 return least_used_cpu; 4198 } 4199 4200 /* Helper for selecting a node in round robin mode */ 4201 static inline int mtip_get_next_rr_node(void) 4202 { 4203 static int next_node = -1; 4204 4205 if (next_node == -1) { 4206 next_node = first_online_node; 4207 return next_node; 4208 } 4209 4210 next_node = next_online_node(next_node); 4211 if (next_node == MAX_NUMNODES) 4212 next_node = first_online_node; 4213 return next_node; 4214 } 4215 4216 static DEFINE_HANDLER(0); 4217 static DEFINE_HANDLER(1); 4218 static DEFINE_HANDLER(2); 4219 static DEFINE_HANDLER(3); 4220 static DEFINE_HANDLER(4); 4221 static DEFINE_HANDLER(5); 4222 static DEFINE_HANDLER(6); 4223 static DEFINE_HANDLER(7); 4224 4225 static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) 4226 { 4227 int pos; 4228 unsigned short pcie_dev_ctrl; 4229 4230 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4231 if (pos) { 4232 pci_read_config_word(pdev, 4233 pos + PCI_EXP_DEVCTL, 4234 &pcie_dev_ctrl); 4235 if (pcie_dev_ctrl & (1 << 11) || 4236 pcie_dev_ctrl & (1 << 4)) { 4237 dev_info(&dd->pdev->dev, 4238 "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", 4239 pdev->vendor, pdev->device); 4240 pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | 4241 PCI_EXP_DEVCTL_RELAX_EN); 4242 pci_write_config_word(pdev, 4243 pos + PCI_EXP_DEVCTL, 4244 pcie_dev_ctrl); 4245 } 4246 } 4247 } 4248 4249 static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) 4250 { 4251 /* 4252 * This workaround is specific to AMD/ATI chipset with a PCI upstream 4253 * device with device id 0x5aXX 4254 */ 4255 if (pdev->bus && pdev->bus->self) { 4256 if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && 4257 ((pdev->bus->self->device & 0xff00) == 0x5a00)) { 4258 mtip_disable_link_opts(dd, pdev->bus->self); 4259 } else { 4260 /* Check further up the topology */ 4261 struct pci_dev *parent_dev = pdev->bus->self; 4262 if (parent_dev->bus && 4263 parent_dev->bus->parent && 4264 parent_dev->bus->parent->self && 4265 parent_dev->bus->parent->self->vendor == 4266 PCI_VENDOR_ID_ATI && 4267 (parent_dev->bus->parent->self->device & 4268 0xff00) == 0x5a00) { 4269 mtip_disable_link_opts(dd, 4270 parent_dev->bus->parent->self); 4271 } 4272 } 4273 } 4274 } 4275 4276 /* 4277 * Called for each supported PCI device detected. 4278 * 4279 * This function allocates the private data structure, enables the 4280 * PCI device and then calls the block layer initialization function. 4281 * 4282 * return value 4283 * 0 on success else an error code. 4284 */ 4285 static int mtip_pci_probe(struct pci_dev *pdev, 4286 const struct pci_device_id *ent) 4287 { 4288 int rv = 0; 4289 struct driver_data *dd = NULL; 4290 char cpu_list[256]; 4291 const struct cpumask *node_mask; 4292 int cpu, i = 0, j = 0; 4293 int my_node = NUMA_NO_NODE; 4294 unsigned long flags; 4295 4296 /* Allocate memory for this devices private data. */ 4297 my_node = pcibus_to_node(pdev->bus); 4298 if (my_node != NUMA_NO_NODE) { 4299 if (!node_online(my_node)) 4300 my_node = mtip_get_next_rr_node(); 4301 } else { 4302 dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n"); 4303 my_node = mtip_get_next_rr_node(); 4304 } 4305 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", 4306 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), 4307 cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id()); 4308 4309 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); 4310 if (dd == NULL) { 4311 dev_err(&pdev->dev, 4312 "Unable to allocate memory for driver data\n"); 4313 return -ENOMEM; 4314 } 4315 4316 /* Attach the private data to this PCI device. */ 4317 pci_set_drvdata(pdev, dd); 4318 4319 rv = pcim_enable_device(pdev); 4320 if (rv < 0) { 4321 dev_err(&pdev->dev, "Unable to enable device\n"); 4322 goto iomap_err; 4323 } 4324 4325 /* Map BAR5 to memory. */ 4326 rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME); 4327 if (rv < 0) { 4328 dev_err(&pdev->dev, "Unable to map regions\n"); 4329 goto iomap_err; 4330 } 4331 4332 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4333 rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4334 4335 if (rv) { 4336 rv = pci_set_consistent_dma_mask(pdev, 4337 DMA_BIT_MASK(32)); 4338 if (rv) { 4339 dev_warn(&pdev->dev, 4340 "64-bit DMA enable failed\n"); 4341 goto setmask_err; 4342 } 4343 } 4344 } 4345 4346 /* Copy the info we may need later into the private data structure. */ 4347 dd->major = mtip_major; 4348 dd->instance = instance; 4349 dd->pdev = pdev; 4350 dd->numa_node = my_node; 4351 4352 INIT_LIST_HEAD(&dd->online_list); 4353 INIT_LIST_HEAD(&dd->remove_list); 4354 4355 memset(dd->workq_name, 0, 32); 4356 snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); 4357 4358 dd->isr_workq = create_workqueue(dd->workq_name); 4359 if (!dd->isr_workq) { 4360 dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); 4361 rv = -ENOMEM; 4362 goto block_initialize_err; 4363 } 4364 4365 memset(cpu_list, 0, sizeof(cpu_list)); 4366 4367 node_mask = cpumask_of_node(dd->numa_node); 4368 if (!cpumask_empty(node_mask)) { 4369 for_each_cpu(cpu, node_mask) 4370 { 4371 snprintf(&cpu_list[j], 256 - j, "%d ", cpu); 4372 j = strlen(cpu_list); 4373 } 4374 4375 dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n", 4376 dd->numa_node, 4377 topology_physical_package_id(cpumask_first(node_mask)), 4378 nr_cpus_node(dd->numa_node), 4379 cpu_list); 4380 } else 4381 dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n"); 4382 4383 dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node); 4384 dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n", 4385 cpu_to_node(dd->isr_binding), dd->isr_binding); 4386 4387 /* first worker context always runs in ISR */ 4388 dd->work[0].cpu_binding = dd->isr_binding; 4389 dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4390 dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node); 4391 dd->work[3].cpu_binding = dd->work[0].cpu_binding; 4392 dd->work[4].cpu_binding = dd->work[1].cpu_binding; 4393 dd->work[5].cpu_binding = dd->work[2].cpu_binding; 4394 dd->work[6].cpu_binding = dd->work[2].cpu_binding; 4395 dd->work[7].cpu_binding = dd->work[1].cpu_binding; 4396 4397 /* Log the bindings */ 4398 for_each_present_cpu(cpu) { 4399 memset(cpu_list, 0, sizeof(cpu_list)); 4400 for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) { 4401 if (dd->work[i].cpu_binding == cpu) { 4402 snprintf(&cpu_list[j], 256 - j, "%d ", i); 4403 j = strlen(cpu_list); 4404 } 4405 } 4406 if (j) 4407 dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list); 4408 } 4409 4410 INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0); 4411 INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1); 4412 INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2); 4413 INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3); 4414 INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4); 4415 INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5); 4416 INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6); 4417 INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); 4418 4419 pci_set_master(pdev); 4420 rv = pci_enable_msi(pdev); 4421 if (rv) { 4422 dev_warn(&pdev->dev, 4423 "Unable to enable MSI interrupt.\n"); 4424 goto msi_initialize_err; 4425 } 4426 4427 mtip_fix_ero_nosnoop(dd, pdev); 4428 4429 /* Initialize the block layer. */ 4430 rv = mtip_block_initialize(dd); 4431 if (rv < 0) { 4432 dev_err(&pdev->dev, 4433 "Unable to initialize block layer\n"); 4434 goto block_initialize_err; 4435 } 4436 4437 /* 4438 * Increment the instance count so that each device has a unique 4439 * instance number. 4440 */ 4441 instance++; 4442 if (rv != MTIP_FTL_REBUILD_MAGIC) 4443 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); 4444 else 4445 rv = 0; /* device in rebuild state, return 0 from probe */ 4446 4447 /* Add to online list even if in ftl rebuild */ 4448 spin_lock_irqsave(&dev_lock, flags); 4449 list_add(&dd->online_list, &online_list); 4450 spin_unlock_irqrestore(&dev_lock, flags); 4451 4452 goto done; 4453 4454 block_initialize_err: 4455 pci_disable_msi(pdev); 4456 4457 msi_initialize_err: 4458 if (dd->isr_workq) { 4459 flush_workqueue(dd->isr_workq); 4460 destroy_workqueue(dd->isr_workq); 4461 drop_cpu(dd->work[0].cpu_binding); 4462 drop_cpu(dd->work[1].cpu_binding); 4463 drop_cpu(dd->work[2].cpu_binding); 4464 } 4465 setmask_err: 4466 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4467 4468 iomap_err: 4469 kfree(dd); 4470 pci_set_drvdata(pdev, NULL); 4471 return rv; 4472 done: 4473 return rv; 4474 } 4475 4476 /* 4477 * Called for each probed device when the device is removed or the 4478 * driver is unloaded. 4479 * 4480 * return value 4481 * None 4482 */ 4483 static void mtip_pci_remove(struct pci_dev *pdev) 4484 { 4485 struct driver_data *dd = pci_get_drvdata(pdev); 4486 unsigned long flags, to; 4487 4488 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); 4489 4490 spin_lock_irqsave(&dev_lock, flags); 4491 list_del_init(&dd->online_list); 4492 list_add(&dd->remove_list, &removing_list); 4493 spin_unlock_irqrestore(&dev_lock, flags); 4494 4495 mtip_check_surprise_removal(pdev); 4496 synchronize_irq(dd->pdev->irq); 4497 4498 /* Spin until workers are done */ 4499 to = jiffies + msecs_to_jiffies(4000); 4500 do { 4501 msleep(20); 4502 } while (atomic_read(&dd->irq_workers_active) != 0 && 4503 time_before(jiffies, to)); 4504 4505 if (atomic_read(&dd->irq_workers_active) != 0) { 4506 dev_warn(&dd->pdev->dev, 4507 "Completion workers still active!\n"); 4508 } 4509 4510 /* Clean up the block layer. */ 4511 mtip_block_remove(dd); 4512 4513 if (dd->isr_workq) { 4514 flush_workqueue(dd->isr_workq); 4515 destroy_workqueue(dd->isr_workq); 4516 drop_cpu(dd->work[0].cpu_binding); 4517 drop_cpu(dd->work[1].cpu_binding); 4518 drop_cpu(dd->work[2].cpu_binding); 4519 } 4520 4521 pci_disable_msi(pdev); 4522 4523 spin_lock_irqsave(&dev_lock, flags); 4524 list_del_init(&dd->remove_list); 4525 spin_unlock_irqrestore(&dev_lock, flags); 4526 4527 if (!dd->sr) 4528 kfree(dd); 4529 else 4530 set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag); 4531 4532 pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); 4533 pci_set_drvdata(pdev, NULL); 4534 } 4535 4536 /* 4537 * Called for each probed device when the device is suspended. 4538 * 4539 * return value 4540 * 0 Success 4541 * <0 Error 4542 */ 4543 static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg) 4544 { 4545 int rv = 0; 4546 struct driver_data *dd = pci_get_drvdata(pdev); 4547 4548 if (!dd) { 4549 dev_err(&pdev->dev, 4550 "Driver private datastructure is NULL\n"); 4551 return -EFAULT; 4552 } 4553 4554 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4555 4556 /* Disable ports & interrupts then send standby immediate */ 4557 rv = mtip_block_suspend(dd); 4558 if (rv < 0) { 4559 dev_err(&pdev->dev, 4560 "Failed to suspend controller\n"); 4561 return rv; 4562 } 4563 4564 /* 4565 * Save the pci config space to pdev structure & 4566 * disable the device 4567 */ 4568 pci_save_state(pdev); 4569 pci_disable_device(pdev); 4570 4571 /* Move to Low power state*/ 4572 pci_set_power_state(pdev, PCI_D3hot); 4573 4574 return rv; 4575 } 4576 4577 /* 4578 * Called for each probed device when the device is resumed. 4579 * 4580 * return value 4581 * 0 Success 4582 * <0 Error 4583 */ 4584 static int mtip_pci_resume(struct pci_dev *pdev) 4585 { 4586 int rv = 0; 4587 struct driver_data *dd; 4588 4589 dd = pci_get_drvdata(pdev); 4590 if (!dd) { 4591 dev_err(&pdev->dev, 4592 "Driver private datastructure is NULL\n"); 4593 return -EFAULT; 4594 } 4595 4596 /* Move the device to active State */ 4597 pci_set_power_state(pdev, PCI_D0); 4598 4599 /* Restore PCI configuration space */ 4600 pci_restore_state(pdev); 4601 4602 /* Enable the PCI device*/ 4603 rv = pcim_enable_device(pdev); 4604 if (rv < 0) { 4605 dev_err(&pdev->dev, 4606 "Failed to enable card during resume\n"); 4607 goto err; 4608 } 4609 pci_set_master(pdev); 4610 4611 /* 4612 * Calls hbaReset, initPort, & startPort function 4613 * then enables interrupts 4614 */ 4615 rv = mtip_block_resume(dd); 4616 if (rv < 0) 4617 dev_err(&pdev->dev, "Unable to resume\n"); 4618 4619 err: 4620 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag); 4621 4622 return rv; 4623 } 4624 4625 /* 4626 * Shutdown routine 4627 * 4628 * return value 4629 * None 4630 */ 4631 static void mtip_pci_shutdown(struct pci_dev *pdev) 4632 { 4633 struct driver_data *dd = pci_get_drvdata(pdev); 4634 if (dd) 4635 mtip_block_shutdown(dd); 4636 } 4637 4638 /* Table of device ids supported by this driver. */ 4639 static const struct pci_device_id mtip_pci_tbl[] = { 4640 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, 4641 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, 4642 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, 4643 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, 4644 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, 4645 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, 4646 { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, 4647 { 0 } 4648 }; 4649 4650 /* Structure that describes the PCI driver functions. */ 4651 static struct pci_driver mtip_pci_driver = { 4652 .name = MTIP_DRV_NAME, 4653 .id_table = mtip_pci_tbl, 4654 .probe = mtip_pci_probe, 4655 .remove = mtip_pci_remove, 4656 .suspend = mtip_pci_suspend, 4657 .resume = mtip_pci_resume, 4658 .shutdown = mtip_pci_shutdown, 4659 }; 4660 4661 MODULE_DEVICE_TABLE(pci, mtip_pci_tbl); 4662 4663 /* 4664 * Module initialization function. 4665 * 4666 * Called once when the module is loaded. This function allocates a major 4667 * block device number to the Cyclone devices and registers the PCI layer 4668 * of the driver. 4669 * 4670 * Return value 4671 * 0 on success else error code. 4672 */ 4673 static int __init mtip_init(void) 4674 { 4675 int error; 4676 4677 pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4678 4679 spin_lock_init(&dev_lock); 4680 4681 INIT_LIST_HEAD(&online_list); 4682 INIT_LIST_HEAD(&removing_list); 4683 4684 /* Allocate a major block device number to use with this driver. */ 4685 error = register_blkdev(0, MTIP_DRV_NAME); 4686 if (error <= 0) { 4687 pr_err("Unable to register block device (%d)\n", 4688 error); 4689 return -EBUSY; 4690 } 4691 mtip_major = error; 4692 4693 dfs_parent = debugfs_create_dir("rssd", NULL); 4694 if (IS_ERR_OR_NULL(dfs_parent)) { 4695 pr_warn("Error creating debugfs parent\n"); 4696 dfs_parent = NULL; 4697 } 4698 if (dfs_parent) { 4699 dfs_device_status = debugfs_create_file("device_status", 4700 S_IRUGO, dfs_parent, NULL, 4701 &mtip_device_status_fops); 4702 if (IS_ERR_OR_NULL(dfs_device_status)) { 4703 pr_err("Error creating device_status node\n"); 4704 dfs_device_status = NULL; 4705 } 4706 } 4707 4708 /* Register our PCI operations. */ 4709 error = pci_register_driver(&mtip_pci_driver); 4710 if (error) { 4711 debugfs_remove(dfs_parent); 4712 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4713 } 4714 4715 return error; 4716 } 4717 4718 /* 4719 * Module de-initialization function. 4720 * 4721 * Called once when the module is unloaded. This function deallocates 4722 * the major block device number allocated by mtip_init() and 4723 * unregisters the PCI layer of the driver. 4724 * 4725 * Return value 4726 * none 4727 */ 4728 static void __exit mtip_exit(void) 4729 { 4730 /* Release the allocated major block device number. */ 4731 unregister_blkdev(mtip_major, MTIP_DRV_NAME); 4732 4733 /* Unregister the PCI driver. */ 4734 pci_unregister_driver(&mtip_pci_driver); 4735 4736 debugfs_remove_recursive(dfs_parent); 4737 } 4738 4739 MODULE_AUTHOR("Micron Technology, Inc"); 4740 MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver"); 4741 MODULE_LICENSE("GPL"); 4742 MODULE_VERSION(MTIP_DRV_VERSION); 4743 4744 module_init(mtip_init); 4745 module_exit(mtip_exit); 4746