1 /* 2 * libahci.c - Common AHCI SATA low-level routines 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2004-2005 Red Hat, Inc. 9 * 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; see the file COPYING. If not, write to 23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * 26 * libata documentation is available via 'make {ps|pdf}docs', 27 * as Documentation/DocBook/libata.* 28 * 29 * AHCI hardware documentation: 30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf 31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf 32 * 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/gfp.h> 37 #include <linux/module.h> 38 #include <linux/init.h> 39 #include <linux/blkdev.h> 40 #include <linux/delay.h> 41 #include <linux/interrupt.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/device.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <linux/libata.h> 47 #include "ahci.h" 48 49 static int ahci_skip_host_reset; 50 int ahci_ignore_sss; 51 EXPORT_SYMBOL_GPL(ahci_ignore_sss); 52 53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); 54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); 55 56 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); 57 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); 58 59 static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 60 unsigned hints); 61 static ssize_t ahci_led_show(struct ata_port *ap, char *buf); 62 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 63 size_t size); 64 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 65 ssize_t size); 66 67 68 69 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 70 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 71 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 72 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 73 static int ahci_port_start(struct ata_port *ap); 74 static void ahci_port_stop(struct ata_port *ap); 75 static void ahci_qc_prep(struct ata_queued_cmd *qc); 76 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); 77 static void ahci_freeze(struct ata_port *ap); 78 static void ahci_thaw(struct ata_port *ap); 79 static void ahci_enable_fbs(struct ata_port *ap); 80 static void ahci_disable_fbs(struct ata_port *ap); 81 static void ahci_pmp_attach(struct ata_port *ap); 82 static void ahci_pmp_detach(struct ata_port *ap); 83 static int ahci_softreset(struct ata_link *link, unsigned int *class, 84 unsigned long deadline); 85 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 86 unsigned long deadline); 87 static void ahci_postreset(struct ata_link *link, unsigned int *class); 88 static void ahci_error_handler(struct ata_port *ap); 89 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 90 static void ahci_dev_config(struct ata_device *dev); 91 #ifdef CONFIG_PM 92 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg); 93 #endif 94 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf); 95 static ssize_t ahci_activity_store(struct ata_device *dev, 96 enum sw_activity val); 97 static void ahci_init_sw_activity(struct ata_link *link); 98 99 static ssize_t ahci_show_host_caps(struct device *dev, 100 struct device_attribute *attr, char *buf); 101 static ssize_t ahci_show_host_cap2(struct device *dev, 102 struct device_attribute *attr, char *buf); 103 static ssize_t ahci_show_host_version(struct device *dev, 104 struct device_attribute *attr, char *buf); 105 static ssize_t ahci_show_port_cmd(struct device *dev, 106 struct device_attribute *attr, char *buf); 107 static ssize_t ahci_read_em_buffer(struct device *dev, 108 struct device_attribute *attr, char *buf); 109 static ssize_t ahci_store_em_buffer(struct device *dev, 110 struct device_attribute *attr, 111 const char *buf, size_t size); 112 113 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 114 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 115 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL); 116 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 117 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 118 ahci_read_em_buffer, ahci_store_em_buffer); 119 120 struct device_attribute *ahci_shost_attrs[] = { 121 &dev_attr_link_power_management_policy, 122 &dev_attr_em_message_type, 123 &dev_attr_em_message, 124 &dev_attr_ahci_host_caps, 125 &dev_attr_ahci_host_cap2, 126 &dev_attr_ahci_host_version, 127 &dev_attr_ahci_port_cmd, 128 &dev_attr_em_buffer, 129 NULL 130 }; 131 EXPORT_SYMBOL_GPL(ahci_shost_attrs); 132 133 struct device_attribute *ahci_sdev_attrs[] = { 134 &dev_attr_sw_activity, 135 &dev_attr_unload_heads, 136 NULL 137 }; 138 EXPORT_SYMBOL_GPL(ahci_sdev_attrs); 139 140 struct ata_port_operations ahci_ops = { 141 .inherits = &sata_pmp_port_ops, 142 143 .qc_defer = ahci_pmp_qc_defer, 144 .qc_prep = ahci_qc_prep, 145 .qc_issue = ahci_qc_issue, 146 .qc_fill_rtf = ahci_qc_fill_rtf, 147 148 .freeze = ahci_freeze, 149 .thaw = ahci_thaw, 150 .softreset = ahci_softreset, 151 .hardreset = ahci_hardreset, 152 .postreset = ahci_postreset, 153 .pmp_softreset = ahci_softreset, 154 .error_handler = ahci_error_handler, 155 .post_internal_cmd = ahci_post_internal_cmd, 156 .dev_config = ahci_dev_config, 157 158 .scr_read = ahci_scr_read, 159 .scr_write = ahci_scr_write, 160 .pmp_attach = ahci_pmp_attach, 161 .pmp_detach = ahci_pmp_detach, 162 163 .set_lpm = ahci_set_lpm, 164 .em_show = ahci_led_show, 165 .em_store = ahci_led_store, 166 .sw_activity_show = ahci_activity_show, 167 .sw_activity_store = ahci_activity_store, 168 #ifdef CONFIG_PM 169 .port_suspend = ahci_port_suspend, 170 .port_resume = ahci_port_resume, 171 #endif 172 .port_start = ahci_port_start, 173 .port_stop = ahci_port_stop, 174 }; 175 EXPORT_SYMBOL_GPL(ahci_ops); 176 177 int ahci_em_messages = 1; 178 EXPORT_SYMBOL_GPL(ahci_em_messages); 179 module_param(ahci_em_messages, int, 0444); 180 /* add other LED protocol types when they become supported */ 181 MODULE_PARM_DESC(ahci_em_messages, 182 "AHCI Enclosure Management Message control (0 = off, 1 = on)"); 183 184 static void ahci_enable_ahci(void __iomem *mmio) 185 { 186 int i; 187 u32 tmp; 188 189 /* turn on AHCI_EN */ 190 tmp = readl(mmio + HOST_CTL); 191 if (tmp & HOST_AHCI_EN) 192 return; 193 194 /* Some controllers need AHCI_EN to be written multiple times. 195 * Try a few times before giving up. 196 */ 197 for (i = 0; i < 5; i++) { 198 tmp |= HOST_AHCI_EN; 199 writel(tmp, mmio + HOST_CTL); 200 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */ 201 if (tmp & HOST_AHCI_EN) 202 return; 203 msleep(10); 204 } 205 206 WARN_ON(1); 207 } 208 209 static ssize_t ahci_show_host_caps(struct device *dev, 210 struct device_attribute *attr, char *buf) 211 { 212 struct Scsi_Host *shost = class_to_shost(dev); 213 struct ata_port *ap = ata_shost_to_port(shost); 214 struct ahci_host_priv *hpriv = ap->host->private_data; 215 216 return sprintf(buf, "%x\n", hpriv->cap); 217 } 218 219 static ssize_t ahci_show_host_cap2(struct device *dev, 220 struct device_attribute *attr, char *buf) 221 { 222 struct Scsi_Host *shost = class_to_shost(dev); 223 struct ata_port *ap = ata_shost_to_port(shost); 224 struct ahci_host_priv *hpriv = ap->host->private_data; 225 226 return sprintf(buf, "%x\n", hpriv->cap2); 227 } 228 229 static ssize_t ahci_show_host_version(struct device *dev, 230 struct device_attribute *attr, char *buf) 231 { 232 struct Scsi_Host *shost = class_to_shost(dev); 233 struct ata_port *ap = ata_shost_to_port(shost); 234 struct ahci_host_priv *hpriv = ap->host->private_data; 235 void __iomem *mmio = hpriv->mmio; 236 237 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION)); 238 } 239 240 static ssize_t ahci_show_port_cmd(struct device *dev, 241 struct device_attribute *attr, char *buf) 242 { 243 struct Scsi_Host *shost = class_to_shost(dev); 244 struct ata_port *ap = ata_shost_to_port(shost); 245 void __iomem *port_mmio = ahci_port_base(ap); 246 247 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD)); 248 } 249 250 static ssize_t ahci_read_em_buffer(struct device *dev, 251 struct device_attribute *attr, char *buf) 252 { 253 struct Scsi_Host *shost = class_to_shost(dev); 254 struct ata_port *ap = ata_shost_to_port(shost); 255 struct ahci_host_priv *hpriv = ap->host->private_data; 256 void __iomem *mmio = hpriv->mmio; 257 void __iomem *em_mmio = mmio + hpriv->em_loc; 258 u32 em_ctl, msg; 259 unsigned long flags; 260 size_t count; 261 int i; 262 263 spin_lock_irqsave(ap->lock, flags); 264 265 em_ctl = readl(mmio + HOST_EM_CTL); 266 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT || 267 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) { 268 spin_unlock_irqrestore(ap->lock, flags); 269 return -EINVAL; 270 } 271 272 if (!(em_ctl & EM_CTL_MR)) { 273 spin_unlock_irqrestore(ap->lock, flags); 274 return -EAGAIN; 275 } 276 277 if (!(em_ctl & EM_CTL_SMB)) 278 em_mmio += hpriv->em_buf_sz; 279 280 count = hpriv->em_buf_sz; 281 282 /* the count should not be larger than PAGE_SIZE */ 283 if (count > PAGE_SIZE) { 284 if (printk_ratelimit()) 285 ata_port_printk(ap, KERN_WARNING, 286 "EM read buffer size too large: " 287 "buffer size %u, page size %lu\n", 288 hpriv->em_buf_sz, PAGE_SIZE); 289 count = PAGE_SIZE; 290 } 291 292 for (i = 0; i < count; i += 4) { 293 msg = readl(em_mmio + i); 294 buf[i] = msg & 0xff; 295 buf[i + 1] = (msg >> 8) & 0xff; 296 buf[i + 2] = (msg >> 16) & 0xff; 297 buf[i + 3] = (msg >> 24) & 0xff; 298 } 299 300 spin_unlock_irqrestore(ap->lock, flags); 301 302 return i; 303 } 304 305 static ssize_t ahci_store_em_buffer(struct device *dev, 306 struct device_attribute *attr, 307 const char *buf, size_t size) 308 { 309 struct Scsi_Host *shost = class_to_shost(dev); 310 struct ata_port *ap = ata_shost_to_port(shost); 311 struct ahci_host_priv *hpriv = ap->host->private_data; 312 void __iomem *mmio = hpriv->mmio; 313 void __iomem *em_mmio = mmio + hpriv->em_loc; 314 const unsigned char *msg_buf = buf; 315 u32 em_ctl, msg; 316 unsigned long flags; 317 int i; 318 319 /* check size validity */ 320 if (!(ap->flags & ATA_FLAG_EM) || 321 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) || 322 size % 4 || size > hpriv->em_buf_sz) 323 return -EINVAL; 324 325 spin_lock_irqsave(ap->lock, flags); 326 327 em_ctl = readl(mmio + HOST_EM_CTL); 328 if (em_ctl & EM_CTL_TM) { 329 spin_unlock_irqrestore(ap->lock, flags); 330 return -EBUSY; 331 } 332 333 for (i = 0; i < size; i += 4) { 334 msg = msg_buf[i] | msg_buf[i + 1] << 8 | 335 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24; 336 writel(msg, em_mmio + i); 337 } 338 339 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 340 341 spin_unlock_irqrestore(ap->lock, flags); 342 343 return size; 344 } 345 346 /** 347 * ahci_save_initial_config - Save and fixup initial config values 348 * @dev: target AHCI device 349 * @hpriv: host private area to store config values 350 * @force_port_map: force port map to a specified value 351 * @mask_port_map: mask out particular bits from port map 352 * 353 * Some registers containing configuration info might be setup by 354 * BIOS and might be cleared on reset. This function saves the 355 * initial values of those registers into @hpriv such that they 356 * can be restored after controller reset. 357 * 358 * If inconsistent, config values are fixed up by this function. 359 * 360 * LOCKING: 361 * None. 362 */ 363 void ahci_save_initial_config(struct device *dev, 364 struct ahci_host_priv *hpriv, 365 unsigned int force_port_map, 366 unsigned int mask_port_map) 367 { 368 void __iomem *mmio = hpriv->mmio; 369 u32 cap, cap2, vers, port_map; 370 int i; 371 372 /* make sure AHCI mode is enabled before accessing CAP */ 373 ahci_enable_ahci(mmio); 374 375 /* Values prefixed with saved_ are written back to host after 376 * reset. Values without are used for driver operation. 377 */ 378 hpriv->saved_cap = cap = readl(mmio + HOST_CAP); 379 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL); 380 381 /* CAP2 register is only defined for AHCI 1.2 and later */ 382 vers = readl(mmio + HOST_VERSION); 383 if ((vers >> 16) > 1 || 384 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200)) 385 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2); 386 else 387 hpriv->saved_cap2 = cap2 = 0; 388 389 /* some chips have errata preventing 64bit use */ 390 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) { 391 dev_printk(KERN_INFO, dev, 392 "controller can't do 64bit DMA, forcing 32bit\n"); 393 cap &= ~HOST_CAP_64; 394 } 395 396 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) { 397 dev_printk(KERN_INFO, dev, 398 "controller can't do NCQ, turning off CAP_NCQ\n"); 399 cap &= ~HOST_CAP_NCQ; 400 } 401 402 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { 403 dev_printk(KERN_INFO, dev, 404 "controller can do NCQ, turning on CAP_NCQ\n"); 405 cap |= HOST_CAP_NCQ; 406 } 407 408 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 409 dev_printk(KERN_INFO, dev, 410 "controller can't do PMP, turning off CAP_PMP\n"); 411 cap &= ~HOST_CAP_PMP; 412 } 413 414 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { 415 dev_printk(KERN_INFO, dev, 416 "controller can't do SNTF, turning off CAP_SNTF\n"); 417 cap &= ~HOST_CAP_SNTF; 418 } 419 420 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 421 dev_printk(KERN_INFO, dev, 422 "controller can do FBS, turning on CAP_FBS\n"); 423 cap |= HOST_CAP_FBS; 424 } 425 426 if (force_port_map && port_map != force_port_map) { 427 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", 428 port_map, force_port_map); 429 port_map = force_port_map; 430 } 431 432 if (mask_port_map) { 433 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n", 434 port_map, 435 port_map & mask_port_map); 436 port_map &= mask_port_map; 437 } 438 439 /* cross check port_map and cap.n_ports */ 440 if (port_map) { 441 int map_ports = 0; 442 443 for (i = 0; i < AHCI_MAX_PORTS; i++) 444 if (port_map & (1 << i)) 445 map_ports++; 446 447 /* If PI has more ports than n_ports, whine, clear 448 * port_map and let it be generated from n_ports. 449 */ 450 if (map_ports > ahci_nr_ports(cap)) { 451 dev_printk(KERN_WARNING, dev, 452 "implemented port map (0x%x) contains more " 453 "ports than nr_ports (%u), using nr_ports\n", 454 port_map, ahci_nr_ports(cap)); 455 port_map = 0; 456 } 457 } 458 459 /* fabricate port_map from cap.nr_ports */ 460 if (!port_map) { 461 port_map = (1 << ahci_nr_ports(cap)) - 1; 462 dev_printk(KERN_WARNING, dev, 463 "forcing PORTS_IMPL to 0x%x\n", port_map); 464 465 /* write the fixed up value to the PI register */ 466 hpriv->saved_port_map = port_map; 467 } 468 469 /* record values to use during operation */ 470 hpriv->cap = cap; 471 hpriv->cap2 = cap2; 472 hpriv->port_map = port_map; 473 } 474 EXPORT_SYMBOL_GPL(ahci_save_initial_config); 475 476 /** 477 * ahci_restore_initial_config - Restore initial config 478 * @host: target ATA host 479 * 480 * Restore initial config stored by ahci_save_initial_config(). 481 * 482 * LOCKING: 483 * None. 484 */ 485 static void ahci_restore_initial_config(struct ata_host *host) 486 { 487 struct ahci_host_priv *hpriv = host->private_data; 488 void __iomem *mmio = hpriv->mmio; 489 490 writel(hpriv->saved_cap, mmio + HOST_CAP); 491 if (hpriv->saved_cap2) 492 writel(hpriv->saved_cap2, mmio + HOST_CAP2); 493 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL); 494 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */ 495 } 496 497 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg) 498 { 499 static const int offset[] = { 500 [SCR_STATUS] = PORT_SCR_STAT, 501 [SCR_CONTROL] = PORT_SCR_CTL, 502 [SCR_ERROR] = PORT_SCR_ERR, 503 [SCR_ACTIVE] = PORT_SCR_ACT, 504 [SCR_NOTIFICATION] = PORT_SCR_NTF, 505 }; 506 struct ahci_host_priv *hpriv = ap->host->private_data; 507 508 if (sc_reg < ARRAY_SIZE(offset) && 509 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF))) 510 return offset[sc_reg]; 511 return 0; 512 } 513 514 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 515 { 516 void __iomem *port_mmio = ahci_port_base(link->ap); 517 int offset = ahci_scr_offset(link->ap, sc_reg); 518 519 if (offset) { 520 *val = readl(port_mmio + offset); 521 return 0; 522 } 523 return -EINVAL; 524 } 525 526 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 527 { 528 void __iomem *port_mmio = ahci_port_base(link->ap); 529 int offset = ahci_scr_offset(link->ap, sc_reg); 530 531 if (offset) { 532 writel(val, port_mmio + offset); 533 return 0; 534 } 535 return -EINVAL; 536 } 537 538 void ahci_start_engine(struct ata_port *ap) 539 { 540 void __iomem *port_mmio = ahci_port_base(ap); 541 u32 tmp; 542 543 /* start DMA */ 544 tmp = readl(port_mmio + PORT_CMD); 545 tmp |= PORT_CMD_START; 546 writel(tmp, port_mmio + PORT_CMD); 547 readl(port_mmio + PORT_CMD); /* flush */ 548 } 549 EXPORT_SYMBOL_GPL(ahci_start_engine); 550 551 int ahci_stop_engine(struct ata_port *ap) 552 { 553 void __iomem *port_mmio = ahci_port_base(ap); 554 u32 tmp; 555 556 tmp = readl(port_mmio + PORT_CMD); 557 558 /* check if the HBA is idle */ 559 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 560 return 0; 561 562 /* setting HBA to idle */ 563 tmp &= ~PORT_CMD_START; 564 writel(tmp, port_mmio + PORT_CMD); 565 566 /* wait for engine to stop. This could be as long as 500 msec */ 567 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, 568 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 569 if (tmp & PORT_CMD_LIST_ON) 570 return -EIO; 571 572 return 0; 573 } 574 EXPORT_SYMBOL_GPL(ahci_stop_engine); 575 576 static void ahci_start_fis_rx(struct ata_port *ap) 577 { 578 void __iomem *port_mmio = ahci_port_base(ap); 579 struct ahci_host_priv *hpriv = ap->host->private_data; 580 struct ahci_port_priv *pp = ap->private_data; 581 u32 tmp; 582 583 /* set FIS registers */ 584 if (hpriv->cap & HOST_CAP_64) 585 writel((pp->cmd_slot_dma >> 16) >> 16, 586 port_mmio + PORT_LST_ADDR_HI); 587 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); 588 589 if (hpriv->cap & HOST_CAP_64) 590 writel((pp->rx_fis_dma >> 16) >> 16, 591 port_mmio + PORT_FIS_ADDR_HI); 592 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); 593 594 /* enable FIS reception */ 595 tmp = readl(port_mmio + PORT_CMD); 596 tmp |= PORT_CMD_FIS_RX; 597 writel(tmp, port_mmio + PORT_CMD); 598 599 /* flush */ 600 readl(port_mmio + PORT_CMD); 601 } 602 603 static int ahci_stop_fis_rx(struct ata_port *ap) 604 { 605 void __iomem *port_mmio = ahci_port_base(ap); 606 u32 tmp; 607 608 /* disable FIS reception */ 609 tmp = readl(port_mmio + PORT_CMD); 610 tmp &= ~PORT_CMD_FIS_RX; 611 writel(tmp, port_mmio + PORT_CMD); 612 613 /* wait for completion, spec says 500ms, give it 1000 */ 614 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON, 615 PORT_CMD_FIS_ON, 10, 1000); 616 if (tmp & PORT_CMD_FIS_ON) 617 return -EBUSY; 618 619 return 0; 620 } 621 622 static void ahci_power_up(struct ata_port *ap) 623 { 624 struct ahci_host_priv *hpriv = ap->host->private_data; 625 void __iomem *port_mmio = ahci_port_base(ap); 626 u32 cmd; 627 628 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 629 630 /* spin up device */ 631 if (hpriv->cap & HOST_CAP_SSS) { 632 cmd |= PORT_CMD_SPIN_UP; 633 writel(cmd, port_mmio + PORT_CMD); 634 } 635 636 /* wake up link */ 637 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); 638 } 639 640 static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, 641 unsigned int hints) 642 { 643 struct ata_port *ap = link->ap; 644 struct ahci_host_priv *hpriv = ap->host->private_data; 645 struct ahci_port_priv *pp = ap->private_data; 646 void __iomem *port_mmio = ahci_port_base(ap); 647 648 if (policy != ATA_LPM_MAX_POWER) { 649 /* 650 * Disable interrupts on Phy Ready. This keeps us from 651 * getting woken up due to spurious phy ready 652 * interrupts. 653 */ 654 pp->intr_mask &= ~PORT_IRQ_PHYRDY; 655 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 656 657 sata_link_scr_lpm(link, policy, false); 658 } 659 660 if (hpriv->cap & HOST_CAP_ALPM) { 661 u32 cmd = readl(port_mmio + PORT_CMD); 662 663 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { 664 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); 665 cmd |= PORT_CMD_ICC_ACTIVE; 666 667 writel(cmd, port_mmio + PORT_CMD); 668 readl(port_mmio + PORT_CMD); 669 670 /* wait 10ms to be sure we've come out of LPM state */ 671 ata_msleep(ap, 10); 672 } else { 673 cmd |= PORT_CMD_ALPE; 674 if (policy == ATA_LPM_MIN_POWER) 675 cmd |= PORT_CMD_ASP; 676 677 /* write out new cmd value */ 678 writel(cmd, port_mmio + PORT_CMD); 679 } 680 } 681 682 if (policy == ATA_LPM_MAX_POWER) { 683 sata_link_scr_lpm(link, policy, false); 684 685 /* turn PHYRDY IRQ back on */ 686 pp->intr_mask |= PORT_IRQ_PHYRDY; 687 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 688 } 689 690 return 0; 691 } 692 693 #ifdef CONFIG_PM 694 static void ahci_power_down(struct ata_port *ap) 695 { 696 struct ahci_host_priv *hpriv = ap->host->private_data; 697 void __iomem *port_mmio = ahci_port_base(ap); 698 u32 cmd, scontrol; 699 700 if (!(hpriv->cap & HOST_CAP_SSS)) 701 return; 702 703 /* put device into listen mode, first set PxSCTL.DET to 0 */ 704 scontrol = readl(port_mmio + PORT_SCR_CTL); 705 scontrol &= ~0xf; 706 writel(scontrol, port_mmio + PORT_SCR_CTL); 707 708 /* then set PxCMD.SUD to 0 */ 709 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK; 710 cmd &= ~PORT_CMD_SPIN_UP; 711 writel(cmd, port_mmio + PORT_CMD); 712 } 713 #endif 714 715 static void ahci_start_port(struct ata_port *ap) 716 { 717 struct ahci_port_priv *pp = ap->private_data; 718 struct ata_link *link; 719 struct ahci_em_priv *emp; 720 ssize_t rc; 721 int i; 722 723 /* enable FIS reception */ 724 ahci_start_fis_rx(ap); 725 726 /* enable DMA */ 727 ahci_start_engine(ap); 728 729 /* turn on LEDs */ 730 if (ap->flags & ATA_FLAG_EM) { 731 ata_for_each_link(link, ap, EDGE) { 732 emp = &pp->em_priv[link->pmp]; 733 734 /* EM Transmit bit maybe busy during init */ 735 for (i = 0; i < EM_MAX_RETRY; i++) { 736 rc = ahci_transmit_led_message(ap, 737 emp->led_state, 738 4); 739 if (rc == -EBUSY) 740 ata_msleep(ap, 1); 741 else 742 break; 743 } 744 } 745 } 746 747 if (ap->flags & ATA_FLAG_SW_ACTIVITY) 748 ata_for_each_link(link, ap, EDGE) 749 ahci_init_sw_activity(link); 750 751 } 752 753 static int ahci_deinit_port(struct ata_port *ap, const char **emsg) 754 { 755 int rc; 756 757 /* disable DMA */ 758 rc = ahci_stop_engine(ap); 759 if (rc) { 760 *emsg = "failed to stop engine"; 761 return rc; 762 } 763 764 /* disable FIS reception */ 765 rc = ahci_stop_fis_rx(ap); 766 if (rc) { 767 *emsg = "failed stop FIS RX"; 768 return rc; 769 } 770 771 return 0; 772 } 773 774 int ahci_reset_controller(struct ata_host *host) 775 { 776 struct ahci_host_priv *hpriv = host->private_data; 777 void __iomem *mmio = hpriv->mmio; 778 u32 tmp; 779 780 /* we must be in AHCI mode, before using anything 781 * AHCI-specific, such as HOST_RESET. 782 */ 783 ahci_enable_ahci(mmio); 784 785 /* global controller reset */ 786 if (!ahci_skip_host_reset) { 787 tmp = readl(mmio + HOST_CTL); 788 if ((tmp & HOST_RESET) == 0) { 789 writel(tmp | HOST_RESET, mmio + HOST_CTL); 790 readl(mmio + HOST_CTL); /* flush */ 791 } 792 793 /* 794 * to perform host reset, OS should set HOST_RESET 795 * and poll until this bit is read to be "0". 796 * reset must complete within 1 second, or 797 * the hardware should be considered fried. 798 */ 799 tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET, 800 HOST_RESET, 10, 1000); 801 802 if (tmp & HOST_RESET) { 803 dev_printk(KERN_ERR, host->dev, 804 "controller reset failed (0x%x)\n", tmp); 805 return -EIO; 806 } 807 808 /* turn on AHCI mode */ 809 ahci_enable_ahci(mmio); 810 811 /* Some registers might be cleared on reset. Restore 812 * initial values. 813 */ 814 ahci_restore_initial_config(host); 815 } else 816 dev_printk(KERN_INFO, host->dev, 817 "skipping global host reset\n"); 818 819 return 0; 820 } 821 EXPORT_SYMBOL_GPL(ahci_reset_controller); 822 823 static void ahci_sw_activity(struct ata_link *link) 824 { 825 struct ata_port *ap = link->ap; 826 struct ahci_port_priv *pp = ap->private_data; 827 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 828 829 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY)) 830 return; 831 832 emp->activity++; 833 if (!timer_pending(&emp->timer)) 834 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); 835 } 836 837 static void ahci_sw_activity_blink(unsigned long arg) 838 { 839 struct ata_link *link = (struct ata_link *)arg; 840 struct ata_port *ap = link->ap; 841 struct ahci_port_priv *pp = ap->private_data; 842 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 843 unsigned long led_message = emp->led_state; 844 u32 activity_led_state; 845 unsigned long flags; 846 847 led_message &= EM_MSG_LED_VALUE; 848 led_message |= ap->port_no | (link->pmp << 8); 849 850 /* check to see if we've had activity. If so, 851 * toggle state of LED and reset timer. If not, 852 * turn LED to desired idle state. 853 */ 854 spin_lock_irqsave(ap->lock, flags); 855 if (emp->saved_activity != emp->activity) { 856 emp->saved_activity = emp->activity; 857 /* get the current LED state */ 858 activity_led_state = led_message & EM_MSG_LED_VALUE_ON; 859 860 if (activity_led_state) 861 activity_led_state = 0; 862 else 863 activity_led_state = 1; 864 865 /* clear old state */ 866 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 867 868 /* toggle state */ 869 led_message |= (activity_led_state << 16); 870 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100)); 871 } else { 872 /* switch to idle */ 873 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY; 874 if (emp->blink_policy == BLINK_OFF) 875 led_message |= (1 << 16); 876 } 877 spin_unlock_irqrestore(ap->lock, flags); 878 ahci_transmit_led_message(ap, led_message, 4); 879 } 880 881 static void ahci_init_sw_activity(struct ata_link *link) 882 { 883 struct ata_port *ap = link->ap; 884 struct ahci_port_priv *pp = ap->private_data; 885 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 886 887 /* init activity stats, setup timer */ 888 emp->saved_activity = emp->activity = 0; 889 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); 890 891 /* check our blink policy and set flag for link if it's enabled */ 892 if (emp->blink_policy) 893 link->flags |= ATA_LFLAG_SW_ACTIVITY; 894 } 895 896 int ahci_reset_em(struct ata_host *host) 897 { 898 struct ahci_host_priv *hpriv = host->private_data; 899 void __iomem *mmio = hpriv->mmio; 900 u32 em_ctl; 901 902 em_ctl = readl(mmio + HOST_EM_CTL); 903 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST)) 904 return -EINVAL; 905 906 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL); 907 return 0; 908 } 909 EXPORT_SYMBOL_GPL(ahci_reset_em); 910 911 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state, 912 ssize_t size) 913 { 914 struct ahci_host_priv *hpriv = ap->host->private_data; 915 struct ahci_port_priv *pp = ap->private_data; 916 void __iomem *mmio = hpriv->mmio; 917 u32 em_ctl; 918 u32 message[] = {0, 0}; 919 unsigned long flags; 920 int pmp; 921 struct ahci_em_priv *emp; 922 923 /* get the slot number from the message */ 924 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 925 if (pmp < EM_MAX_SLOTS) 926 emp = &pp->em_priv[pmp]; 927 else 928 return -EINVAL; 929 930 spin_lock_irqsave(ap->lock, flags); 931 932 /* 933 * if we are still busy transmitting a previous message, 934 * do not allow 935 */ 936 em_ctl = readl(mmio + HOST_EM_CTL); 937 if (em_ctl & EM_CTL_TM) { 938 spin_unlock_irqrestore(ap->lock, flags); 939 return -EBUSY; 940 } 941 942 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) { 943 /* 944 * create message header - this is all zero except for 945 * the message size, which is 4 bytes. 946 */ 947 message[0] |= (4 << 8); 948 949 /* ignore 0:4 of byte zero, fill in port info yourself */ 950 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no); 951 952 /* write message to EM_LOC */ 953 writel(message[0], mmio + hpriv->em_loc); 954 writel(message[1], mmio + hpriv->em_loc+4); 955 956 /* 957 * tell hardware to transmit the message 958 */ 959 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL); 960 } 961 962 /* save off new led state for port/slot */ 963 emp->led_state = state; 964 965 spin_unlock_irqrestore(ap->lock, flags); 966 return size; 967 } 968 969 static ssize_t ahci_led_show(struct ata_port *ap, char *buf) 970 { 971 struct ahci_port_priv *pp = ap->private_data; 972 struct ata_link *link; 973 struct ahci_em_priv *emp; 974 int rc = 0; 975 976 ata_for_each_link(link, ap, EDGE) { 977 emp = &pp->em_priv[link->pmp]; 978 rc += sprintf(buf, "%lx\n", emp->led_state); 979 } 980 return rc; 981 } 982 983 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, 984 size_t size) 985 { 986 int state; 987 int pmp; 988 struct ahci_port_priv *pp = ap->private_data; 989 struct ahci_em_priv *emp; 990 991 state = simple_strtoul(buf, NULL, 0); 992 993 /* get the slot number from the message */ 994 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 995 if (pmp < EM_MAX_SLOTS) 996 emp = &pp->em_priv[pmp]; 997 else 998 return -EINVAL; 999 1000 /* mask off the activity bits if we are in sw_activity 1001 * mode, user should turn off sw_activity before setting 1002 * activity led through em_message 1003 */ 1004 if (emp->blink_policy) 1005 state &= ~EM_MSG_LED_VALUE_ACTIVITY; 1006 1007 return ahci_transmit_led_message(ap, state, size); 1008 } 1009 1010 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val) 1011 { 1012 struct ata_link *link = dev->link; 1013 struct ata_port *ap = link->ap; 1014 struct ahci_port_priv *pp = ap->private_data; 1015 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1016 u32 port_led_state = emp->led_state; 1017 1018 /* save the desired Activity LED behavior */ 1019 if (val == OFF) { 1020 /* clear LFLAG */ 1021 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY); 1022 1023 /* set the LED to OFF */ 1024 port_led_state &= EM_MSG_LED_VALUE_OFF; 1025 port_led_state |= (ap->port_no | (link->pmp << 8)); 1026 ahci_transmit_led_message(ap, port_led_state, 4); 1027 } else { 1028 link->flags |= ATA_LFLAG_SW_ACTIVITY; 1029 if (val == BLINK_OFF) { 1030 /* set LED to ON for idle */ 1031 port_led_state &= EM_MSG_LED_VALUE_OFF; 1032 port_led_state |= (ap->port_no | (link->pmp << 8)); 1033 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */ 1034 ahci_transmit_led_message(ap, port_led_state, 4); 1035 } 1036 } 1037 emp->blink_policy = val; 1038 return 0; 1039 } 1040 1041 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf) 1042 { 1043 struct ata_link *link = dev->link; 1044 struct ata_port *ap = link->ap; 1045 struct ahci_port_priv *pp = ap->private_data; 1046 struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; 1047 1048 /* display the saved value of activity behavior for this 1049 * disk. 1050 */ 1051 return sprintf(buf, "%d\n", emp->blink_policy); 1052 } 1053 1054 static void ahci_port_init(struct device *dev, struct ata_port *ap, 1055 int port_no, void __iomem *mmio, 1056 void __iomem *port_mmio) 1057 { 1058 const char *emsg = NULL; 1059 int rc; 1060 u32 tmp; 1061 1062 /* make sure port is not active */ 1063 rc = ahci_deinit_port(ap, &emsg); 1064 if (rc) 1065 dev_warn(dev, "%s (%d)\n", emsg, rc); 1066 1067 /* clear SError */ 1068 tmp = readl(port_mmio + PORT_SCR_ERR); 1069 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp); 1070 writel(tmp, port_mmio + PORT_SCR_ERR); 1071 1072 /* clear port IRQ */ 1073 tmp = readl(port_mmio + PORT_IRQ_STAT); 1074 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp); 1075 if (tmp) 1076 writel(tmp, port_mmio + PORT_IRQ_STAT); 1077 1078 writel(1 << port_no, mmio + HOST_IRQ_STAT); 1079 } 1080 1081 void ahci_init_controller(struct ata_host *host) 1082 { 1083 struct ahci_host_priv *hpriv = host->private_data; 1084 void __iomem *mmio = hpriv->mmio; 1085 int i; 1086 void __iomem *port_mmio; 1087 u32 tmp; 1088 1089 for (i = 0; i < host->n_ports; i++) { 1090 struct ata_port *ap = host->ports[i]; 1091 1092 port_mmio = ahci_port_base(ap); 1093 if (ata_port_is_dummy(ap)) 1094 continue; 1095 1096 ahci_port_init(host->dev, ap, i, mmio, port_mmio); 1097 } 1098 1099 tmp = readl(mmio + HOST_CTL); 1100 VPRINTK("HOST_CTL 0x%x\n", tmp); 1101 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL); 1102 tmp = readl(mmio + HOST_CTL); 1103 VPRINTK("HOST_CTL 0x%x\n", tmp); 1104 } 1105 EXPORT_SYMBOL_GPL(ahci_init_controller); 1106 1107 static void ahci_dev_config(struct ata_device *dev) 1108 { 1109 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; 1110 1111 if (hpriv->flags & AHCI_HFLAG_SECT255) { 1112 dev->max_sectors = 255; 1113 ata_dev_printk(dev, KERN_INFO, 1114 "SB600 AHCI: limiting to 255 sectors per cmd\n"); 1115 } 1116 } 1117 1118 static unsigned int ahci_dev_classify(struct ata_port *ap) 1119 { 1120 void __iomem *port_mmio = ahci_port_base(ap); 1121 struct ata_taskfile tf; 1122 u32 tmp; 1123 1124 tmp = readl(port_mmio + PORT_SIG); 1125 tf.lbah = (tmp >> 24) & 0xff; 1126 tf.lbam = (tmp >> 16) & 0xff; 1127 tf.lbal = (tmp >> 8) & 0xff; 1128 tf.nsect = (tmp) & 0xff; 1129 1130 return ata_dev_classify(&tf); 1131 } 1132 1133 void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 1134 u32 opts) 1135 { 1136 dma_addr_t cmd_tbl_dma; 1137 1138 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; 1139 1140 pp->cmd_slot[tag].opts = cpu_to_le32(opts); 1141 pp->cmd_slot[tag].status = 0; 1142 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); 1143 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1144 } 1145 EXPORT_SYMBOL_GPL(ahci_fill_cmd_slot); 1146 1147 int ahci_kick_engine(struct ata_port *ap) 1148 { 1149 void __iomem *port_mmio = ahci_port_base(ap); 1150 struct ahci_host_priv *hpriv = ap->host->private_data; 1151 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1152 u32 tmp; 1153 int busy, rc; 1154 1155 /* stop engine */ 1156 rc = ahci_stop_engine(ap); 1157 if (rc) 1158 goto out_restart; 1159 1160 /* need to do CLO? 1161 * always do CLO if PMP is attached (AHCI-1.3 9.2) 1162 */ 1163 busy = status & (ATA_BUSY | ATA_DRQ); 1164 if (!busy && !sata_pmp_attached(ap)) { 1165 rc = 0; 1166 goto out_restart; 1167 } 1168 1169 if (!(hpriv->cap & HOST_CAP_CLO)) { 1170 rc = -EOPNOTSUPP; 1171 goto out_restart; 1172 } 1173 1174 /* perform CLO */ 1175 tmp = readl(port_mmio + PORT_CMD); 1176 tmp |= PORT_CMD_CLO; 1177 writel(tmp, port_mmio + PORT_CMD); 1178 1179 rc = 0; 1180 tmp = ata_wait_register(ap, port_mmio + PORT_CMD, 1181 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); 1182 if (tmp & PORT_CMD_CLO) 1183 rc = -EIO; 1184 1185 /* restart engine */ 1186 out_restart: 1187 ahci_start_engine(ap); 1188 return rc; 1189 } 1190 EXPORT_SYMBOL_GPL(ahci_kick_engine); 1191 1192 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, 1193 struct ata_taskfile *tf, int is_cmd, u16 flags, 1194 unsigned long timeout_msec) 1195 { 1196 const u32 cmd_fis_len = 5; /* five dwords */ 1197 struct ahci_port_priv *pp = ap->private_data; 1198 void __iomem *port_mmio = ahci_port_base(ap); 1199 u8 *fis = pp->cmd_tbl; 1200 u32 tmp; 1201 1202 /* prep the command */ 1203 ata_tf_to_fis(tf, pmp, is_cmd, fis); 1204 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); 1205 1206 /* issue & wait */ 1207 writel(1, port_mmio + PORT_CMD_ISSUE); 1208 1209 if (timeout_msec) { 1210 tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE, 1211 0x1, 0x1, 1, timeout_msec); 1212 if (tmp & 0x1) { 1213 ahci_kick_engine(ap); 1214 return -EBUSY; 1215 } 1216 } else 1217 readl(port_mmio + PORT_CMD_ISSUE); /* flush */ 1218 1219 return 0; 1220 } 1221 1222 int ahci_do_softreset(struct ata_link *link, unsigned int *class, 1223 int pmp, unsigned long deadline, 1224 int (*check_ready)(struct ata_link *link)) 1225 { 1226 struct ata_port *ap = link->ap; 1227 struct ahci_host_priv *hpriv = ap->host->private_data; 1228 const char *reason = NULL; 1229 unsigned long now, msecs; 1230 struct ata_taskfile tf; 1231 int rc; 1232 1233 DPRINTK("ENTER\n"); 1234 1235 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1236 rc = ahci_kick_engine(ap); 1237 if (rc && rc != -EOPNOTSUPP) 1238 ata_link_printk(link, KERN_WARNING, 1239 "failed to reset engine (errno=%d)\n", rc); 1240 1241 ata_tf_init(link->device, &tf); 1242 1243 /* issue the first D2H Register FIS */ 1244 msecs = 0; 1245 now = jiffies; 1246 if (time_after(deadline, now)) 1247 msecs = jiffies_to_msecs(deadline - now); 1248 1249 tf.ctl |= ATA_SRST; 1250 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0, 1251 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) { 1252 rc = -EIO; 1253 reason = "1st FIS failed"; 1254 goto fail; 1255 } 1256 1257 /* spec says at least 5us, but be generous and sleep for 1ms */ 1258 ata_msleep(ap, 1); 1259 1260 /* issue the second D2H Register FIS */ 1261 tf.ctl &= ~ATA_SRST; 1262 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); 1263 1264 /* wait for link to become ready */ 1265 rc = ata_wait_after_reset(link, deadline, check_ready); 1266 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) { 1267 /* 1268 * Workaround for cases where link online status can't 1269 * be trusted. Treat device readiness timeout as link 1270 * offline. 1271 */ 1272 ata_link_printk(link, KERN_INFO, 1273 "device not ready, treating as offline\n"); 1274 *class = ATA_DEV_NONE; 1275 } else if (rc) { 1276 /* link occupied, -ENODEV too is an error */ 1277 reason = "device not ready"; 1278 goto fail; 1279 } else 1280 *class = ahci_dev_classify(ap); 1281 1282 DPRINTK("EXIT, class=%u\n", *class); 1283 return 0; 1284 1285 fail: 1286 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason); 1287 return rc; 1288 } 1289 1290 int ahci_check_ready(struct ata_link *link) 1291 { 1292 void __iomem *port_mmio = ahci_port_base(link->ap); 1293 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; 1294 1295 return ata_check_ready(status); 1296 } 1297 EXPORT_SYMBOL_GPL(ahci_check_ready); 1298 1299 static int ahci_softreset(struct ata_link *link, unsigned int *class, 1300 unsigned long deadline) 1301 { 1302 int pmp = sata_srst_pmp(link); 1303 1304 DPRINTK("ENTER\n"); 1305 1306 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); 1307 } 1308 EXPORT_SYMBOL_GPL(ahci_do_softreset); 1309 1310 static int ahci_hardreset(struct ata_link *link, unsigned int *class, 1311 unsigned long deadline) 1312 { 1313 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 1314 struct ata_port *ap = link->ap; 1315 struct ahci_port_priv *pp = ap->private_data; 1316 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 1317 struct ata_taskfile tf; 1318 bool online; 1319 int rc; 1320 1321 DPRINTK("ENTER\n"); 1322 1323 ahci_stop_engine(ap); 1324 1325 /* clear D2H reception area to properly wait for D2H FIS */ 1326 ata_tf_init(link->device, &tf); 1327 tf.command = 0x80; 1328 ata_tf_to_fis(&tf, 0, 0, d2h_fis); 1329 1330 rc = sata_link_hardreset(link, timing, deadline, &online, 1331 ahci_check_ready); 1332 1333 ahci_start_engine(ap); 1334 1335 if (online) 1336 *class = ahci_dev_classify(ap); 1337 1338 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); 1339 return rc; 1340 } 1341 1342 static void ahci_postreset(struct ata_link *link, unsigned int *class) 1343 { 1344 struct ata_port *ap = link->ap; 1345 void __iomem *port_mmio = ahci_port_base(ap); 1346 u32 new_tmp, tmp; 1347 1348 ata_std_postreset(link, class); 1349 1350 /* Make sure port's ATAPI bit is set appropriately */ 1351 new_tmp = tmp = readl(port_mmio + PORT_CMD); 1352 if (*class == ATA_DEV_ATAPI) 1353 new_tmp |= PORT_CMD_ATAPI; 1354 else 1355 new_tmp &= ~PORT_CMD_ATAPI; 1356 if (new_tmp != tmp) { 1357 writel(new_tmp, port_mmio + PORT_CMD); 1358 readl(port_mmio + PORT_CMD); /* flush */ 1359 } 1360 } 1361 1362 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) 1363 { 1364 struct scatterlist *sg; 1365 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ; 1366 unsigned int si; 1367 1368 VPRINTK("ENTER\n"); 1369 1370 /* 1371 * Next, the S/G list. 1372 */ 1373 for_each_sg(qc->sg, sg, qc->n_elem, si) { 1374 dma_addr_t addr = sg_dma_address(sg); 1375 u32 sg_len = sg_dma_len(sg); 1376 1377 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff); 1378 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16); 1379 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1); 1380 } 1381 1382 return si; 1383 } 1384 1385 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) 1386 { 1387 struct ata_port *ap = qc->ap; 1388 struct ahci_port_priv *pp = ap->private_data; 1389 1390 if (!sata_pmp_attached(ap) || pp->fbs_enabled) 1391 return ata_std_qc_defer(qc); 1392 else 1393 return sata_pmp_qc_defer_cmd_switch(qc); 1394 } 1395 1396 static void ahci_qc_prep(struct ata_queued_cmd *qc) 1397 { 1398 struct ata_port *ap = qc->ap; 1399 struct ahci_port_priv *pp = ap->private_data; 1400 int is_atapi = ata_is_atapi(qc->tf.protocol); 1401 void *cmd_tbl; 1402 u32 opts; 1403 const u32 cmd_fis_len = 5; /* five dwords */ 1404 unsigned int n_elem; 1405 1406 /* 1407 * Fill in command table information. First, the header, 1408 * a SATA Register - Host to Device command FIS. 1409 */ 1410 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; 1411 1412 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); 1413 if (is_atapi) { 1414 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32); 1415 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len); 1416 } 1417 1418 n_elem = 0; 1419 if (qc->flags & ATA_QCFLAG_DMAMAP) 1420 n_elem = ahci_fill_sg(qc, cmd_tbl); 1421 1422 /* 1423 * Fill in command slot information. 1424 */ 1425 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12); 1426 if (qc->tf.flags & ATA_TFLAG_WRITE) 1427 opts |= AHCI_CMD_WRITE; 1428 if (is_atapi) 1429 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1430 1431 ahci_fill_cmd_slot(pp, qc->tag, opts); 1432 } 1433 1434 static void ahci_fbs_dec_intr(struct ata_port *ap) 1435 { 1436 struct ahci_port_priv *pp = ap->private_data; 1437 void __iomem *port_mmio = ahci_port_base(ap); 1438 u32 fbs = readl(port_mmio + PORT_FBS); 1439 int retries = 3; 1440 1441 DPRINTK("ENTER\n"); 1442 BUG_ON(!pp->fbs_enabled); 1443 1444 /* time to wait for DEC is not specified by AHCI spec, 1445 * add a retry loop for safety. 1446 */ 1447 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS); 1448 fbs = readl(port_mmio + PORT_FBS); 1449 while ((fbs & PORT_FBS_DEC) && retries--) { 1450 udelay(1); 1451 fbs = readl(port_mmio + PORT_FBS); 1452 } 1453 1454 if (fbs & PORT_FBS_DEC) 1455 dev_printk(KERN_ERR, ap->host->dev, 1456 "failed to clear device error\n"); 1457 } 1458 1459 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) 1460 { 1461 struct ahci_host_priv *hpriv = ap->host->private_data; 1462 struct ahci_port_priv *pp = ap->private_data; 1463 struct ata_eh_info *host_ehi = &ap->link.eh_info; 1464 struct ata_link *link = NULL; 1465 struct ata_queued_cmd *active_qc; 1466 struct ata_eh_info *active_ehi; 1467 bool fbs_need_dec = false; 1468 u32 serror; 1469 1470 /* determine active link with error */ 1471 if (pp->fbs_enabled) { 1472 void __iomem *port_mmio = ahci_port_base(ap); 1473 u32 fbs = readl(port_mmio + PORT_FBS); 1474 int pmp = fbs >> PORT_FBS_DWE_OFFSET; 1475 1476 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && 1477 ata_link_online(&ap->pmp_link[pmp])) { 1478 link = &ap->pmp_link[pmp]; 1479 fbs_need_dec = true; 1480 } 1481 1482 } else 1483 ata_for_each_link(link, ap, EDGE) 1484 if (ata_link_active(link)) 1485 break; 1486 1487 if (!link) 1488 link = &ap->link; 1489 1490 active_qc = ata_qc_from_tag(ap, link->active_tag); 1491 active_ehi = &link->eh_info; 1492 1493 /* record irq stat */ 1494 ata_ehi_clear_desc(host_ehi); 1495 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat); 1496 1497 /* AHCI needs SError cleared; otherwise, it might lock up */ 1498 ahci_scr_read(&ap->link, SCR_ERROR, &serror); 1499 ahci_scr_write(&ap->link, SCR_ERROR, serror); 1500 host_ehi->serror |= serror; 1501 1502 /* some controllers set IRQ_IF_ERR on device errors, ignore it */ 1503 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR) 1504 irq_stat &= ~PORT_IRQ_IF_ERR; 1505 1506 if (irq_stat & PORT_IRQ_TF_ERR) { 1507 /* If qc is active, charge it; otherwise, the active 1508 * link. There's no active qc on NCQ errors. It will 1509 * be determined by EH by reading log page 10h. 1510 */ 1511 if (active_qc) 1512 active_qc->err_mask |= AC_ERR_DEV; 1513 else 1514 active_ehi->err_mask |= AC_ERR_DEV; 1515 1516 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL) 1517 host_ehi->serror &= ~SERR_INTERNAL; 1518 } 1519 1520 if (irq_stat & PORT_IRQ_UNK_FIS) { 1521 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK); 1522 1523 active_ehi->err_mask |= AC_ERR_HSM; 1524 active_ehi->action |= ATA_EH_RESET; 1525 ata_ehi_push_desc(active_ehi, 1526 "unknown FIS %08x %08x %08x %08x" , 1527 unk[0], unk[1], unk[2], unk[3]); 1528 } 1529 1530 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) { 1531 active_ehi->err_mask |= AC_ERR_HSM; 1532 active_ehi->action |= ATA_EH_RESET; 1533 ata_ehi_push_desc(active_ehi, "incorrect PMP"); 1534 } 1535 1536 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) { 1537 host_ehi->err_mask |= AC_ERR_HOST_BUS; 1538 host_ehi->action |= ATA_EH_RESET; 1539 ata_ehi_push_desc(host_ehi, "host bus error"); 1540 } 1541 1542 if (irq_stat & PORT_IRQ_IF_ERR) { 1543 if (fbs_need_dec) 1544 active_ehi->err_mask |= AC_ERR_DEV; 1545 else { 1546 host_ehi->err_mask |= AC_ERR_ATA_BUS; 1547 host_ehi->action |= ATA_EH_RESET; 1548 } 1549 1550 ata_ehi_push_desc(host_ehi, "interface fatal error"); 1551 } 1552 1553 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) { 1554 ata_ehi_hotplugged(host_ehi); 1555 ata_ehi_push_desc(host_ehi, "%s", 1556 irq_stat & PORT_IRQ_CONNECT ? 1557 "connection status changed" : "PHY RDY changed"); 1558 } 1559 1560 /* okay, let's hand over to EH */ 1561 1562 if (irq_stat & PORT_IRQ_FREEZE) 1563 ata_port_freeze(ap); 1564 else if (fbs_need_dec) { 1565 ata_link_abort(link); 1566 ahci_fbs_dec_intr(ap); 1567 } else 1568 ata_port_abort(ap); 1569 } 1570 1571 static void ahci_port_intr(struct ata_port *ap) 1572 { 1573 void __iomem *port_mmio = ahci_port_base(ap); 1574 struct ata_eh_info *ehi = &ap->link.eh_info; 1575 struct ahci_port_priv *pp = ap->private_data; 1576 struct ahci_host_priv *hpriv = ap->host->private_data; 1577 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); 1578 u32 status, qc_active = 0; 1579 int rc; 1580 1581 status = readl(port_mmio + PORT_IRQ_STAT); 1582 writel(status, port_mmio + PORT_IRQ_STAT); 1583 1584 /* ignore BAD_PMP while resetting */ 1585 if (unlikely(resetting)) 1586 status &= ~PORT_IRQ_BAD_PMP; 1587 1588 /* if LPM is enabled, PHYRDY doesn't mean anything */ 1589 if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { 1590 status &= ~PORT_IRQ_PHYRDY; 1591 ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); 1592 } 1593 1594 if (unlikely(status & PORT_IRQ_ERROR)) { 1595 ahci_error_intr(ap, status); 1596 return; 1597 } 1598 1599 if (status & PORT_IRQ_SDB_FIS) { 1600 /* If SNotification is available, leave notification 1601 * handling to sata_async_notification(). If not, 1602 * emulate it by snooping SDB FIS RX area. 1603 * 1604 * Snooping FIS RX area is probably cheaper than 1605 * poking SNotification but some constrollers which 1606 * implement SNotification, ICH9 for example, don't 1607 * store AN SDB FIS into receive area. 1608 */ 1609 if (hpriv->cap & HOST_CAP_SNTF) 1610 sata_async_notification(ap); 1611 else { 1612 /* If the 'N' bit in word 0 of the FIS is set, 1613 * we just received asynchronous notification. 1614 * Tell libata about it. 1615 * 1616 * Lack of SNotification should not appear in 1617 * ahci 1.2, so the workaround is unnecessary 1618 * when FBS is enabled. 1619 */ 1620 if (pp->fbs_enabled) 1621 WARN_ON_ONCE(1); 1622 else { 1623 const __le32 *f = pp->rx_fis + RX_FIS_SDB; 1624 u32 f0 = le32_to_cpu(f[0]); 1625 if (f0 & (1 << 15)) 1626 sata_async_notification(ap); 1627 } 1628 } 1629 } 1630 1631 /* pp->active_link is not reliable once FBS is enabled, both 1632 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because 1633 * NCQ and non-NCQ commands may be in flight at the same time. 1634 */ 1635 if (pp->fbs_enabled) { 1636 if (ap->qc_active) { 1637 qc_active = readl(port_mmio + PORT_SCR_ACT); 1638 qc_active |= readl(port_mmio + PORT_CMD_ISSUE); 1639 } 1640 } else { 1641 /* pp->active_link is valid iff any command is in flight */ 1642 if (ap->qc_active && pp->active_link->sactive) 1643 qc_active = readl(port_mmio + PORT_SCR_ACT); 1644 else 1645 qc_active = readl(port_mmio + PORT_CMD_ISSUE); 1646 } 1647 1648 1649 rc = ata_qc_complete_multiple(ap, qc_active); 1650 1651 /* while resetting, invalid completions are expected */ 1652 if (unlikely(rc < 0 && !resetting)) { 1653 ehi->err_mask |= AC_ERR_HSM; 1654 ehi->action |= ATA_EH_RESET; 1655 ata_port_freeze(ap); 1656 } 1657 } 1658 1659 irqreturn_t ahci_interrupt(int irq, void *dev_instance) 1660 { 1661 struct ata_host *host = dev_instance; 1662 struct ahci_host_priv *hpriv; 1663 unsigned int i, handled = 0; 1664 void __iomem *mmio; 1665 u32 irq_stat, irq_masked; 1666 1667 VPRINTK("ENTER\n"); 1668 1669 hpriv = host->private_data; 1670 mmio = hpriv->mmio; 1671 1672 /* sigh. 0xffffffff is a valid return from h/w */ 1673 irq_stat = readl(mmio + HOST_IRQ_STAT); 1674 if (!irq_stat) 1675 return IRQ_NONE; 1676 1677 irq_masked = irq_stat & hpriv->port_map; 1678 1679 spin_lock(&host->lock); 1680 1681 for (i = 0; i < host->n_ports; i++) { 1682 struct ata_port *ap; 1683 1684 if (!(irq_masked & (1 << i))) 1685 continue; 1686 1687 ap = host->ports[i]; 1688 if (ap) { 1689 ahci_port_intr(ap); 1690 VPRINTK("port %u\n", i); 1691 } else { 1692 VPRINTK("port %u (no irq)\n", i); 1693 if (ata_ratelimit()) 1694 dev_printk(KERN_WARNING, host->dev, 1695 "interrupt on disabled port %u\n", i); 1696 } 1697 1698 handled = 1; 1699 } 1700 1701 /* HOST_IRQ_STAT behaves as level triggered latch meaning that 1702 * it should be cleared after all the port events are cleared; 1703 * otherwise, it will raise a spurious interrupt after each 1704 * valid one. Please read section 10.6.2 of ahci 1.1 for more 1705 * information. 1706 * 1707 * Also, use the unmasked value to clear interrupt as spurious 1708 * pending event on a dummy port might cause screaming IRQ. 1709 */ 1710 writel(irq_stat, mmio + HOST_IRQ_STAT); 1711 1712 spin_unlock(&host->lock); 1713 1714 VPRINTK("EXIT\n"); 1715 1716 return IRQ_RETVAL(handled); 1717 } 1718 EXPORT_SYMBOL_GPL(ahci_interrupt); 1719 1720 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1721 { 1722 struct ata_port *ap = qc->ap; 1723 void __iomem *port_mmio = ahci_port_base(ap); 1724 struct ahci_port_priv *pp = ap->private_data; 1725 1726 /* Keep track of the currently active link. It will be used 1727 * in completion path to determine whether NCQ phase is in 1728 * progress. 1729 */ 1730 pp->active_link = qc->dev->link; 1731 1732 if (qc->tf.protocol == ATA_PROT_NCQ) 1733 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); 1734 1735 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { 1736 u32 fbs = readl(port_mmio + PORT_FBS); 1737 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC); 1738 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET; 1739 writel(fbs, port_mmio + PORT_FBS); 1740 pp->fbs_last_dev = qc->dev->link->pmp; 1741 } 1742 1743 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); 1744 1745 ahci_sw_activity(qc->dev->link); 1746 1747 return 0; 1748 } 1749 1750 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1751 { 1752 struct ahci_port_priv *pp = qc->ap->private_data; 1753 u8 *rx_fis = pp->rx_fis; 1754 1755 if (pp->fbs_enabled) 1756 rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; 1757 1758 /* 1759 * After a successful execution of an ATA PIO data-in command, 1760 * the device doesn't send D2H Reg FIS to update the TF and 1761 * the host should take TF and E_Status from the preceding PIO 1762 * Setup FIS. 1763 */ 1764 if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && 1765 !(qc->flags & ATA_QCFLAG_FAILED)) { 1766 ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); 1767 qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; 1768 } else 1769 ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); 1770 1771 return true; 1772 } 1773 1774 static void ahci_freeze(struct ata_port *ap) 1775 { 1776 void __iomem *port_mmio = ahci_port_base(ap); 1777 1778 /* turn IRQ off */ 1779 writel(0, port_mmio + PORT_IRQ_MASK); 1780 } 1781 1782 static void ahci_thaw(struct ata_port *ap) 1783 { 1784 struct ahci_host_priv *hpriv = ap->host->private_data; 1785 void __iomem *mmio = hpriv->mmio; 1786 void __iomem *port_mmio = ahci_port_base(ap); 1787 u32 tmp; 1788 struct ahci_port_priv *pp = ap->private_data; 1789 1790 /* clear IRQ */ 1791 tmp = readl(port_mmio + PORT_IRQ_STAT); 1792 writel(tmp, port_mmio + PORT_IRQ_STAT); 1793 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT); 1794 1795 /* turn IRQ back on */ 1796 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1797 } 1798 1799 static void ahci_error_handler(struct ata_port *ap) 1800 { 1801 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { 1802 /* restart engine */ 1803 ahci_stop_engine(ap); 1804 ahci_start_engine(ap); 1805 } 1806 1807 sata_pmp_error_handler(ap); 1808 1809 if (!ata_dev_enabled(ap->link.device)) 1810 ahci_stop_engine(ap); 1811 } 1812 1813 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1814 { 1815 struct ata_port *ap = qc->ap; 1816 1817 /* make DMA engine forget about the failed command */ 1818 if (qc->flags & ATA_QCFLAG_FAILED) 1819 ahci_kick_engine(ap); 1820 } 1821 1822 static void ahci_enable_fbs(struct ata_port *ap) 1823 { 1824 struct ahci_port_priv *pp = ap->private_data; 1825 void __iomem *port_mmio = ahci_port_base(ap); 1826 u32 fbs; 1827 int rc; 1828 1829 if (!pp->fbs_supported) 1830 return; 1831 1832 fbs = readl(port_mmio + PORT_FBS); 1833 if (fbs & PORT_FBS_EN) { 1834 pp->fbs_enabled = true; 1835 pp->fbs_last_dev = -1; /* initialization */ 1836 return; 1837 } 1838 1839 rc = ahci_stop_engine(ap); 1840 if (rc) 1841 return; 1842 1843 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS); 1844 fbs = readl(port_mmio + PORT_FBS); 1845 if (fbs & PORT_FBS_EN) { 1846 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n"); 1847 pp->fbs_enabled = true; 1848 pp->fbs_last_dev = -1; /* initialization */ 1849 } else 1850 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n"); 1851 1852 ahci_start_engine(ap); 1853 } 1854 1855 static void ahci_disable_fbs(struct ata_port *ap) 1856 { 1857 struct ahci_port_priv *pp = ap->private_data; 1858 void __iomem *port_mmio = ahci_port_base(ap); 1859 u32 fbs; 1860 int rc; 1861 1862 if (!pp->fbs_supported) 1863 return; 1864 1865 fbs = readl(port_mmio + PORT_FBS); 1866 if ((fbs & PORT_FBS_EN) == 0) { 1867 pp->fbs_enabled = false; 1868 return; 1869 } 1870 1871 rc = ahci_stop_engine(ap); 1872 if (rc) 1873 return; 1874 1875 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS); 1876 fbs = readl(port_mmio + PORT_FBS); 1877 if (fbs & PORT_FBS_EN) 1878 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n"); 1879 else { 1880 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n"); 1881 pp->fbs_enabled = false; 1882 } 1883 1884 ahci_start_engine(ap); 1885 } 1886 1887 static void ahci_pmp_attach(struct ata_port *ap) 1888 { 1889 void __iomem *port_mmio = ahci_port_base(ap); 1890 struct ahci_port_priv *pp = ap->private_data; 1891 u32 cmd; 1892 1893 cmd = readl(port_mmio + PORT_CMD); 1894 cmd |= PORT_CMD_PMP; 1895 writel(cmd, port_mmio + PORT_CMD); 1896 1897 ahci_enable_fbs(ap); 1898 1899 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1900 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1901 } 1902 1903 static void ahci_pmp_detach(struct ata_port *ap) 1904 { 1905 void __iomem *port_mmio = ahci_port_base(ap); 1906 struct ahci_port_priv *pp = ap->private_data; 1907 u32 cmd; 1908 1909 ahci_disable_fbs(ap); 1910 1911 cmd = readl(port_mmio + PORT_CMD); 1912 cmd &= ~PORT_CMD_PMP; 1913 writel(cmd, port_mmio + PORT_CMD); 1914 1915 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1916 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1917 } 1918 1919 int ahci_port_resume(struct ata_port *ap) 1920 { 1921 ahci_power_up(ap); 1922 ahci_start_port(ap); 1923 1924 if (sata_pmp_attached(ap)) 1925 ahci_pmp_attach(ap); 1926 else 1927 ahci_pmp_detach(ap); 1928 1929 return 0; 1930 } 1931 EXPORT_SYMBOL_GPL(ahci_port_resume); 1932 1933 #ifdef CONFIG_PM 1934 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1935 { 1936 const char *emsg = NULL; 1937 int rc; 1938 1939 rc = ahci_deinit_port(ap, &emsg); 1940 if (rc == 0) 1941 ahci_power_down(ap); 1942 else { 1943 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc); 1944 ahci_start_port(ap); 1945 } 1946 1947 return rc; 1948 } 1949 #endif 1950 1951 static int ahci_port_start(struct ata_port *ap) 1952 { 1953 struct ahci_host_priv *hpriv = ap->host->private_data; 1954 struct device *dev = ap->host->dev; 1955 struct ahci_port_priv *pp; 1956 void *mem; 1957 dma_addr_t mem_dma; 1958 size_t dma_sz, rx_fis_sz; 1959 1960 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1961 if (!pp) 1962 return -ENOMEM; 1963 1964 /* check FBS capability */ 1965 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) { 1966 void __iomem *port_mmio = ahci_port_base(ap); 1967 u32 cmd = readl(port_mmio + PORT_CMD); 1968 if (cmd & PORT_CMD_FBSCP) 1969 pp->fbs_supported = true; 1970 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) { 1971 dev_printk(KERN_INFO, dev, 1972 "port %d can do FBS, forcing FBSCP\n", 1973 ap->port_no); 1974 pp->fbs_supported = true; 1975 } else 1976 dev_printk(KERN_WARNING, dev, 1977 "port %d is not capable of FBS\n", 1978 ap->port_no); 1979 } 1980 1981 if (pp->fbs_supported) { 1982 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ; 1983 rx_fis_sz = AHCI_RX_FIS_SZ * 16; 1984 } else { 1985 dma_sz = AHCI_PORT_PRIV_DMA_SZ; 1986 rx_fis_sz = AHCI_RX_FIS_SZ; 1987 } 1988 1989 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL); 1990 if (!mem) 1991 return -ENOMEM; 1992 memset(mem, 0, dma_sz); 1993 1994 /* 1995 * First item in chunk of DMA memory: 32-slot command table, 1996 * 32 bytes each in size 1997 */ 1998 pp->cmd_slot = mem; 1999 pp->cmd_slot_dma = mem_dma; 2000 2001 mem += AHCI_CMD_SLOT_SZ; 2002 mem_dma += AHCI_CMD_SLOT_SZ; 2003 2004 /* 2005 * Second item: Received-FIS area 2006 */ 2007 pp->rx_fis = mem; 2008 pp->rx_fis_dma = mem_dma; 2009 2010 mem += rx_fis_sz; 2011 mem_dma += rx_fis_sz; 2012 2013 /* 2014 * Third item: data area for storing a single command 2015 * and its scatter-gather table 2016 */ 2017 pp->cmd_tbl = mem; 2018 pp->cmd_tbl_dma = mem_dma; 2019 2020 /* 2021 * Save off initial list of interrupts to be enabled. 2022 * This could be changed later 2023 */ 2024 pp->intr_mask = DEF_PORT_IRQ; 2025 2026 ap->private_data = pp; 2027 2028 /* engage engines, captain */ 2029 return ahci_port_resume(ap); 2030 } 2031 2032 static void ahci_port_stop(struct ata_port *ap) 2033 { 2034 const char *emsg = NULL; 2035 int rc; 2036 2037 /* de-initialize port */ 2038 rc = ahci_deinit_port(ap, &emsg); 2039 if (rc) 2040 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc); 2041 } 2042 2043 void ahci_print_info(struct ata_host *host, const char *scc_s) 2044 { 2045 struct ahci_host_priv *hpriv = host->private_data; 2046 void __iomem *mmio = hpriv->mmio; 2047 u32 vers, cap, cap2, impl, speed; 2048 const char *speed_s; 2049 2050 vers = readl(mmio + HOST_VERSION); 2051 cap = hpriv->cap; 2052 cap2 = hpriv->cap2; 2053 impl = hpriv->port_map; 2054 2055 speed = (cap >> 20) & 0xf; 2056 if (speed == 1) 2057 speed_s = "1.5"; 2058 else if (speed == 2) 2059 speed_s = "3"; 2060 else if (speed == 3) 2061 speed_s = "6"; 2062 else 2063 speed_s = "?"; 2064 2065 dev_info(host->dev, 2066 "AHCI %02x%02x.%02x%02x " 2067 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 2068 , 2069 2070 (vers >> 24) & 0xff, 2071 (vers >> 16) & 0xff, 2072 (vers >> 8) & 0xff, 2073 vers & 0xff, 2074 2075 ((cap >> 8) & 0x1f) + 1, 2076 (cap & 0x1f) + 1, 2077 speed_s, 2078 impl, 2079 scc_s); 2080 2081 dev_info(host->dev, 2082 "flags: " 2083 "%s%s%s%s%s%s%s" 2084 "%s%s%s%s%s%s%s" 2085 "%s%s%s%s%s%s\n" 2086 , 2087 2088 cap & HOST_CAP_64 ? "64bit " : "", 2089 cap & HOST_CAP_NCQ ? "ncq " : "", 2090 cap & HOST_CAP_SNTF ? "sntf " : "", 2091 cap & HOST_CAP_MPS ? "ilck " : "", 2092 cap & HOST_CAP_SSS ? "stag " : "", 2093 cap & HOST_CAP_ALPM ? "pm " : "", 2094 cap & HOST_CAP_LED ? "led " : "", 2095 cap & HOST_CAP_CLO ? "clo " : "", 2096 cap & HOST_CAP_ONLY ? "only " : "", 2097 cap & HOST_CAP_PMP ? "pmp " : "", 2098 cap & HOST_CAP_FBS ? "fbs " : "", 2099 cap & HOST_CAP_PIO_MULTI ? "pio " : "", 2100 cap & HOST_CAP_SSC ? "slum " : "", 2101 cap & HOST_CAP_PART ? "part " : "", 2102 cap & HOST_CAP_CCC ? "ccc " : "", 2103 cap & HOST_CAP_EMS ? "ems " : "", 2104 cap & HOST_CAP_SXS ? "sxs " : "", 2105 cap2 & HOST_CAP2_APST ? "apst " : "", 2106 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "", 2107 cap2 & HOST_CAP2_BOH ? "boh " : "" 2108 ); 2109 } 2110 EXPORT_SYMBOL_GPL(ahci_print_info); 2111 2112 void ahci_set_em_messages(struct ahci_host_priv *hpriv, 2113 struct ata_port_info *pi) 2114 { 2115 u8 messages; 2116 void __iomem *mmio = hpriv->mmio; 2117 u32 em_loc = readl(mmio + HOST_EM_LOC); 2118 u32 em_ctl = readl(mmio + HOST_EM_CTL); 2119 2120 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS)) 2121 return; 2122 2123 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; 2124 2125 if (messages) { 2126 /* store em_loc */ 2127 hpriv->em_loc = ((em_loc >> 16) * 4); 2128 hpriv->em_buf_sz = ((em_loc & 0xff) * 4); 2129 hpriv->em_msg_type = messages; 2130 pi->flags |= ATA_FLAG_EM; 2131 if (!(em_ctl & EM_CTL_ALHD)) 2132 pi->flags |= ATA_FLAG_SW_ACTIVITY; 2133 } 2134 } 2135 EXPORT_SYMBOL_GPL(ahci_set_em_messages); 2136 2137 MODULE_AUTHOR("Jeff Garzik"); 2138 MODULE_DESCRIPTION("Common AHCI SATA low-level routines"); 2139 MODULE_LICENSE("GPL"); 2140