1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/ata/sata_dwc_460ex.c 4 * 5 * Synopsys DesignWare Cores (DWC) SATA host driver 6 * 7 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 8 * 9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 10 * Copyright 2008 DENX Software Engineering 11 * 12 * Based on versions provided by AMCC and Synopsys which are: 13 * Copyright 2006 Applied Micro Circuits Corporation 14 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/device.h> 20 #include <linux/dmaengine.h> 21 #include <linux/of_address.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_platform.h> 24 #include <linux/platform_device.h> 25 #include <linux/phy/phy.h> 26 #include <linux/libata.h> 27 #include <linux/slab.h> 28 #include <trace/events/libata.h> 29 30 #include "libata.h" 31 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_cmnd.h> 34 35 /* These two are defined in "libata.h" */ 36 #undef DRV_NAME 37 #undef DRV_VERSION 38 39 #define DRV_NAME "sata-dwc" 40 #define DRV_VERSION "1.3" 41 42 #define sata_dwc_writel(a, v) writel_relaxed(v, a) 43 #define sata_dwc_readl(a) readl_relaxed(a) 44 45 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ 46 47 enum { 48 SATA_DWC_MAX_PORTS = 1, 49 50 SATA_DWC_SCR_OFFSET = 0x24, 51 SATA_DWC_REG_OFFSET = 0x64, 52 }; 53 54 /* DWC SATA Registers */ 55 struct sata_dwc_regs { 56 u32 fptagr; /* 1st party DMA tag */ 57 u32 fpbor; /* 1st party DMA buffer offset */ 58 u32 fptcr; /* 1st party DMA Xfr count */ 59 u32 dmacr; /* DMA Control */ 60 u32 dbtsr; /* DMA Burst Transac size */ 61 u32 intpr; /* Interrupt Pending */ 62 u32 intmr; /* Interrupt Mask */ 63 u32 errmr; /* Error Mask */ 64 u32 llcr; /* Link Layer Control */ 65 u32 phycr; /* PHY Control */ 66 u32 physr; /* PHY Status */ 67 u32 rxbistpd; /* Recvd BIST pattern def register */ 68 u32 rxbistpd1; /* Recvd BIST data dword1 */ 69 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 70 u32 txbistpd; /* Trans BIST pattern def register */ 71 u32 txbistpd1; /* Trans BIST data dword1 */ 72 u32 txbistpd2; /* Trans BIST data dword2 */ 73 u32 bistcr; /* BIST Control Register */ 74 u32 bistfctr; /* BIST FIS Count Register */ 75 u32 bistsr; /* BIST Status Register */ 76 u32 bistdecr; /* BIST Dword Error count register */ 77 u32 res[15]; /* Reserved locations */ 78 u32 testr; /* Test Register */ 79 u32 versionr; /* Version Register */ 80 u32 idr; /* ID Register */ 81 u32 unimpl[192]; /* Unimplemented */ 82 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 83 }; 84 85 enum { 86 SCR_SCONTROL_DET_ENABLE = 0x00000001, 87 SCR_SSTATUS_DET_PRESENT = 0x00000001, 88 SCR_SERROR_DIAG_X = 0x04000000, 89 /* DWC SATA Register Operations */ 90 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 91 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 92 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 93 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 94 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 95 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 96 SATA_DWC_INTPR_DMAT = 0x00000001, 97 SATA_DWC_INTPR_NEWFP = 0x00000002, 98 SATA_DWC_INTPR_PMABRT = 0x00000004, 99 SATA_DWC_INTPR_ERR = 0x00000008, 100 SATA_DWC_INTPR_NEWBIST = 0x00000010, 101 SATA_DWC_INTPR_IPF = 0x10000000, 102 SATA_DWC_INTMR_DMATM = 0x00000001, 103 SATA_DWC_INTMR_NEWFPM = 0x00000002, 104 SATA_DWC_INTMR_PMABRTM = 0x00000004, 105 SATA_DWC_INTMR_ERRM = 0x00000008, 106 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 107 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 108 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 109 SATA_DWC_LLCR_RPDEN = 0x00000004, 110 /* This is all error bits, zero's are reserved fields. */ 111 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 112 }; 113 114 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 115 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 116 SATA_DWC_DMACR_TMOD_TXCHEN) 117 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 118 SATA_DWC_DMACR_TMOD_TXCHEN) 119 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 120 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 121 << 16) 122 struct sata_dwc_device { 123 struct device *dev; /* generic device struct */ 124 struct ata_probe_ent *pe; /* ptr to probe-ent */ 125 struct ata_host *host; 126 struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */ 127 u32 sactive_issued; 128 u32 sactive_queued; 129 struct phy *phy; 130 phys_addr_t dmadr; 131 #ifdef CONFIG_SATA_DWC_OLD_DMA 132 struct dw_dma_chip *dma; 133 #endif 134 }; 135 136 /* 137 * Allow one extra special slot for commands and DMA management 138 * to account for libata internal commands. 139 */ 140 #define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) 141 142 struct sata_dwc_device_port { 143 struct sata_dwc_device *hsdev; 144 int cmd_issued[SATA_DWC_QCMD_MAX]; 145 int dma_pending[SATA_DWC_QCMD_MAX]; 146 147 /* DMA info */ 148 struct dma_chan *chan; 149 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 150 u32 dma_interrupt_count; 151 }; 152 153 /* 154 * Commonly used DWC SATA driver macros 155 */ 156 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data) 157 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data) 158 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data) 159 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data) 160 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev) 161 162 enum { 163 SATA_DWC_CMD_ISSUED_NOT = 0, 164 SATA_DWC_CMD_ISSUED_PEND = 1, 165 SATA_DWC_CMD_ISSUED_EXEC = 2, 166 SATA_DWC_CMD_ISSUED_NODATA = 3, 167 168 SATA_DWC_DMA_PENDING_NONE = 0, 169 SATA_DWC_DMA_PENDING_TX = 1, 170 SATA_DWC_DMA_PENDING_RX = 2, 171 }; 172 173 /* 174 * Prototypes 175 */ 176 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 177 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc); 178 static void sata_dwc_dma_xfer_complete(struct ata_port *ap); 179 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 180 181 #ifdef CONFIG_SATA_DWC_OLD_DMA 182 183 #include <linux/platform_data/dma-dw.h> 184 #include <linux/dma/dw.h> 185 186 static struct dw_dma_slave sata_dwc_dma_dws = { 187 .src_id = 0, 188 .dst_id = 0, 189 .m_master = 1, 190 .p_master = 0, 191 }; 192 193 static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 194 { 195 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 196 197 if (dws->dma_dev != chan->device->dev) 198 return false; 199 200 chan->private = dws; 201 return true; 202 } 203 204 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) 205 { 206 struct sata_dwc_device *hsdev = hsdevp->hsdev; 207 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 208 struct device *dev = hsdev->dev; 209 dma_cap_mask_t mask; 210 211 dws->dma_dev = dev; 212 213 dma_cap_zero(mask); 214 dma_cap_set(DMA_SLAVE, mask); 215 216 /* Acquire DMA channel */ 217 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 218 if (!hsdevp->chan) { 219 dev_err(dev, "%s: dma channel unavailable\n", __func__); 220 return -EAGAIN; 221 } 222 223 return 0; 224 } 225 226 static int sata_dwc_dma_init_old(struct platform_device *pdev, 227 struct sata_dwc_device *hsdev) 228 { 229 struct device *dev = &pdev->dev; 230 struct device_node *np = dev->of_node; 231 232 hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL); 233 if (!hsdev->dma) 234 return -ENOMEM; 235 236 hsdev->dma->dev = dev; 237 hsdev->dma->id = pdev->id; 238 239 /* Get SATA DMA interrupt number */ 240 hsdev->dma->irq = irq_of_parse_and_map(np, 1); 241 if (!hsdev->dma->irq) { 242 dev_err(dev, "no SATA DMA irq\n"); 243 return -ENODEV; 244 } 245 246 /* Get physical SATA DMA register base address */ 247 hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1); 248 if (IS_ERR(hsdev->dma->regs)) 249 return PTR_ERR(hsdev->dma->regs); 250 251 /* Initialize AHB DMAC */ 252 return dw_dma_probe(hsdev->dma); 253 } 254 255 static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) 256 { 257 if (!hsdev->dma) 258 return; 259 260 dw_dma_remove(hsdev->dma); 261 } 262 263 #endif 264 265 static const char *get_prot_descript(u8 protocol) 266 { 267 switch (protocol) { 268 case ATA_PROT_NODATA: 269 return "ATA no data"; 270 case ATA_PROT_PIO: 271 return "ATA PIO"; 272 case ATA_PROT_DMA: 273 return "ATA DMA"; 274 case ATA_PROT_NCQ: 275 return "ATA NCQ"; 276 case ATA_PROT_NCQ_NODATA: 277 return "ATA NCQ no data"; 278 case ATAPI_PROT_NODATA: 279 return "ATAPI no data"; 280 case ATAPI_PROT_PIO: 281 return "ATAPI PIO"; 282 case ATAPI_PROT_DMA: 283 return "ATAPI DMA"; 284 default: 285 return "unknown"; 286 } 287 } 288 289 static void dma_dwc_xfer_done(void *hsdev_instance) 290 { 291 unsigned long flags; 292 struct sata_dwc_device *hsdev = hsdev_instance; 293 struct ata_host *host = (struct ata_host *)hsdev->host; 294 struct ata_port *ap; 295 struct sata_dwc_device_port *hsdevp; 296 u8 tag = 0; 297 unsigned int port = 0; 298 299 spin_lock_irqsave(&host->lock, flags); 300 ap = host->ports[port]; 301 hsdevp = HSDEVP_FROM_AP(ap); 302 tag = ap->link.active_tag; 303 304 /* 305 * Each DMA command produces 2 interrupts. Only 306 * complete the command after both interrupts have been 307 * seen. (See sata_dwc_isr()) 308 */ 309 hsdevp->dma_interrupt_count++; 310 sata_dwc_clear_dmacr(hsdevp, tag); 311 312 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 313 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 314 tag, hsdevp->dma_pending[tag]); 315 } 316 317 if ((hsdevp->dma_interrupt_count % 2) == 0) 318 sata_dwc_dma_xfer_complete(ap); 319 320 spin_unlock_irqrestore(&host->lock, flags); 321 } 322 323 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 324 { 325 struct ata_port *ap = qc->ap; 326 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 327 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 328 struct dma_slave_config sconf; 329 struct dma_async_tx_descriptor *desc; 330 331 if (qc->dma_dir == DMA_DEV_TO_MEM) { 332 sconf.src_addr = hsdev->dmadr; 333 sconf.device_fc = false; 334 } else { /* DMA_MEM_TO_DEV */ 335 sconf.dst_addr = hsdev->dmadr; 336 sconf.device_fc = false; 337 } 338 339 sconf.direction = qc->dma_dir; 340 sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 341 sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 342 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 343 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 344 345 dmaengine_slave_config(hsdevp->chan, &sconf); 346 347 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 348 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 349 qc->dma_dir, 350 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 351 352 if (!desc) 353 return NULL; 354 355 desc->callback = dma_dwc_xfer_done; 356 desc->callback_param = hsdev; 357 358 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__, 359 qc->sg, qc->n_elem, &hsdev->dmadr); 360 361 return desc; 362 } 363 364 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 365 { 366 if (scr > SCR_NOTIFICATION) { 367 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 368 __func__, scr); 369 return -EINVAL; 370 } 371 372 *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4)); 373 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 374 link->ap->print_id, scr, *val); 375 376 return 0; 377 } 378 379 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 380 { 381 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 382 link->ap->print_id, scr, val); 383 if (scr > SCR_NOTIFICATION) { 384 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 385 __func__, scr); 386 return -EINVAL; 387 } 388 sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val); 389 390 return 0; 391 } 392 393 static void clear_serror(struct ata_port *ap) 394 { 395 u32 val; 396 sata_dwc_scr_read(&ap->link, SCR_ERROR, &val); 397 sata_dwc_scr_write(&ap->link, SCR_ERROR, val); 398 } 399 400 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 401 { 402 sata_dwc_writel(&hsdev->sata_dwc_regs->intpr, 403 sata_dwc_readl(&hsdev->sata_dwc_regs->intpr)); 404 } 405 406 static u32 qcmd_tag_to_mask(u8 tag) 407 { 408 return 0x00000001 << (tag & 0x1f); 409 } 410 411 /* See ahci.c */ 412 static void sata_dwc_error_intr(struct ata_port *ap, 413 struct sata_dwc_device *hsdev, uint intpr) 414 { 415 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 416 struct ata_eh_info *ehi = &ap->link.eh_info; 417 unsigned int err_mask = 0, action = 0; 418 struct ata_queued_cmd *qc; 419 u32 serror; 420 u8 status, tag; 421 422 ata_ehi_clear_desc(ehi); 423 424 sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror); 425 status = ap->ops->sff_check_status(ap); 426 427 tag = ap->link.active_tag; 428 429 dev_err(ap->dev, 430 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 431 __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 432 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 433 434 /* Clear error register and interrupt bit */ 435 clear_serror(ap); 436 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 437 438 /* This is the only error happening now. TODO check for exact error */ 439 440 err_mask |= AC_ERR_HOST_BUS; 441 action |= ATA_EH_RESET; 442 443 /* Pass this on to EH */ 444 ehi->serror |= serror; 445 ehi->action |= action; 446 447 qc = ata_qc_from_tag(ap, tag); 448 if (qc) 449 qc->err_mask |= err_mask; 450 else 451 ehi->err_mask |= err_mask; 452 453 ata_port_abort(ap); 454 } 455 456 /* 457 * Function : sata_dwc_isr 458 * arguments : irq, void *dev_instance, struct pt_regs *regs 459 * Return value : irqreturn_t - status of IRQ 460 * This Interrupt handler called via port ops registered function. 461 * .irq_handler = sata_dwc_isr 462 */ 463 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 464 { 465 struct ata_host *host = (struct ata_host *)dev_instance; 466 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 467 struct ata_port *ap; 468 struct ata_queued_cmd *qc; 469 unsigned long flags; 470 u8 status, tag; 471 int handled, port = 0; 472 uint intpr, sactive, sactive2, tag_mask; 473 struct sata_dwc_device_port *hsdevp; 474 hsdev->sactive_issued = 0; 475 476 spin_lock_irqsave(&host->lock, flags); 477 478 /* Read the interrupt register */ 479 intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr); 480 481 ap = host->ports[port]; 482 hsdevp = HSDEVP_FROM_AP(ap); 483 484 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 485 ap->link.active_tag); 486 487 /* Check for error interrupt */ 488 if (intpr & SATA_DWC_INTPR_ERR) { 489 sata_dwc_error_intr(ap, hsdev, intpr); 490 handled = 1; 491 goto DONE; 492 } 493 494 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 495 if (intpr & SATA_DWC_INTPR_NEWFP) { 496 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 497 498 tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr)); 499 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 500 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 501 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 502 503 hsdev->sactive_issued |= qcmd_tag_to_mask(tag); 504 505 qc = ata_qc_from_tag(ap, tag); 506 if (unlikely(!qc)) { 507 dev_err(ap->dev, "failed to get qc"); 508 handled = 1; 509 goto DONE; 510 } 511 /* 512 * Start FP DMA for NCQ command. At this point the tag is the 513 * active tag. It is the tag that matches the command about to 514 * be completed. 515 */ 516 trace_ata_bmdma_start(ap, &qc->tf, tag); 517 qc->ap->link.active_tag = tag; 518 sata_dwc_bmdma_start_by_tag(qc, tag); 519 520 handled = 1; 521 goto DONE; 522 } 523 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 524 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 525 526 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 527 if (hsdev->sactive_issued == 0 && tag_mask == 0) { 528 if (ap->link.active_tag == ATA_TAG_POISON) 529 tag = 0; 530 else 531 tag = ap->link.active_tag; 532 qc = ata_qc_from_tag(ap, tag); 533 534 /* DEV interrupt w/ no active qc? */ 535 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 536 dev_err(ap->dev, 537 "%s interrupt with no active qc qc=%p\n", 538 __func__, qc); 539 ap->ops->sff_check_status(ap); 540 handled = 1; 541 goto DONE; 542 } 543 status = ap->ops->sff_check_status(ap); 544 545 qc->ap->link.active_tag = tag; 546 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 547 548 if (status & ATA_ERR) { 549 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 550 sata_dwc_qc_complete(ap, qc); 551 handled = 1; 552 goto DONE; 553 } 554 555 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 556 __func__, get_prot_descript(qc->tf.protocol)); 557 DRVSTILLBUSY: 558 if (ata_is_dma(qc->tf.protocol)) { 559 /* 560 * Each DMA transaction produces 2 interrupts. The DMAC 561 * transfer complete interrupt and the SATA controller 562 * operation done interrupt. The command should be 563 * completed only after both interrupts are seen. 564 */ 565 hsdevp->dma_interrupt_count++; 566 if (hsdevp->dma_pending[tag] == \ 567 SATA_DWC_DMA_PENDING_NONE) { 568 dev_err(ap->dev, 569 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 570 __func__, intpr, status, 571 hsdevp->dma_pending[tag]); 572 } 573 574 if ((hsdevp->dma_interrupt_count % 2) == 0) 575 sata_dwc_dma_xfer_complete(ap); 576 } else if (ata_is_pio(qc->tf.protocol)) { 577 ata_sff_hsm_move(ap, qc, status, 0); 578 handled = 1; 579 goto DONE; 580 } else { 581 if (unlikely(sata_dwc_qc_complete(ap, qc))) 582 goto DRVSTILLBUSY; 583 } 584 585 handled = 1; 586 goto DONE; 587 } 588 589 /* 590 * This is a NCQ command. At this point we need to figure out for which 591 * tags we have gotten a completion interrupt. One interrupt may serve 592 * as completion for more than one operation when commands are queued 593 * (NCQ). We need to process each completed command. 594 */ 595 596 /* process completed commands */ 597 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 598 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 599 600 if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) { 601 dev_dbg(ap->dev, 602 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 603 __func__, sactive, hsdev->sactive_issued, tag_mask); 604 } 605 606 if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) { 607 dev_warn(ap->dev, 608 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 609 sactive, hsdev->sactive_issued, tag_mask); 610 } 611 612 /* read just to clear ... not bad if currently still busy */ 613 status = ap->ops->sff_check_status(ap); 614 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 615 616 tag = 0; 617 while (tag_mask) { 618 while (!(tag_mask & 0x00000001)) { 619 tag++; 620 tag_mask <<= 1; 621 } 622 623 tag_mask &= (~0x00000001); 624 qc = ata_qc_from_tag(ap, tag); 625 if (unlikely(!qc)) { 626 dev_err(ap->dev, "failed to get qc"); 627 handled = 1; 628 goto DONE; 629 } 630 631 /* To be picked up by completion functions */ 632 qc->ap->link.active_tag = tag; 633 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 634 635 /* Let libata/scsi layers handle error */ 636 if (status & ATA_ERR) { 637 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 638 status); 639 sata_dwc_qc_complete(ap, qc); 640 handled = 1; 641 goto DONE; 642 } 643 644 /* Process completed command */ 645 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 646 get_prot_descript(qc->tf.protocol)); 647 if (ata_is_dma(qc->tf.protocol)) { 648 hsdevp->dma_interrupt_count++; 649 if (hsdevp->dma_pending[tag] == \ 650 SATA_DWC_DMA_PENDING_NONE) 651 dev_warn(ap->dev, "%s: DMA not pending?\n", 652 __func__); 653 if ((hsdevp->dma_interrupt_count % 2) == 0) 654 sata_dwc_dma_xfer_complete(ap); 655 } else { 656 if (unlikely(sata_dwc_qc_complete(ap, qc))) 657 goto STILLBUSY; 658 } 659 continue; 660 661 STILLBUSY: 662 ap->stats.idle_irq++; 663 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 664 ap->print_id); 665 } /* while tag_mask */ 666 667 /* 668 * Check to see if any commands completed while we were processing our 669 * initial set of completed commands (read status clears interrupts, 670 * so we might miss a completed command interrupt if one came in while 671 * we were processing --we read status as part of processing a completed 672 * command). 673 */ 674 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2); 675 if (sactive2 != sactive) { 676 dev_dbg(ap->dev, 677 "More completed - sactive=0x%x sactive2=0x%x\n", 678 sactive, sactive2); 679 } 680 handled = 1; 681 682 DONE: 683 spin_unlock_irqrestore(&host->lock, flags); 684 return IRQ_RETVAL(handled); 685 } 686 687 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 688 { 689 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 690 u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr); 691 692 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 693 dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr); 694 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 695 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 696 dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr); 697 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 698 } else { 699 /* 700 * This should not happen, it indicates the driver is out of 701 * sync. If it does happen, clear dmacr anyway. 702 */ 703 dev_err(hsdev->dev, 704 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 705 __func__, tag, hsdevp->dma_pending[tag], dmacr); 706 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 707 SATA_DWC_DMACR_TXRXCH_CLEAR); 708 } 709 } 710 711 static void sata_dwc_dma_xfer_complete(struct ata_port *ap) 712 { 713 struct ata_queued_cmd *qc; 714 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 715 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 716 u8 tag = 0; 717 718 tag = ap->link.active_tag; 719 qc = ata_qc_from_tag(ap, tag); 720 if (!qc) { 721 dev_err(ap->dev, "failed to get qc"); 722 return; 723 } 724 725 if (ata_is_dma(qc->tf.protocol)) { 726 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 727 dev_err(ap->dev, 728 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 729 __func__, 730 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 731 } 732 733 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 734 sata_dwc_qc_complete(ap, qc); 735 ap->link.active_tag = ATA_TAG_POISON; 736 } else { 737 sata_dwc_qc_complete(ap, qc); 738 } 739 } 740 741 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc) 742 { 743 u8 status = 0; 744 u32 mask = 0x0; 745 u8 tag = qc->hw_tag; 746 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 747 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 748 hsdev->sactive_queued = 0; 749 750 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 751 dev_err(ap->dev, "TX DMA PENDING\n"); 752 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 753 dev_err(ap->dev, "RX DMA PENDING\n"); 754 dev_dbg(ap->dev, 755 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 756 qc->tf.command, status, ap->print_id, qc->tf.protocol); 757 758 /* clear active bit */ 759 mask = (~(qcmd_tag_to_mask(tag))); 760 hsdev->sactive_queued = hsdev->sactive_queued & mask; 761 hsdev->sactive_issued = hsdev->sactive_issued & mask; 762 ata_qc_complete(qc); 763 return 0; 764 } 765 766 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 767 { 768 /* Enable selective interrupts by setting the interrupt maskregister*/ 769 sata_dwc_writel(&hsdev->sata_dwc_regs->intmr, 770 SATA_DWC_INTMR_ERRM | 771 SATA_DWC_INTMR_NEWFPM | 772 SATA_DWC_INTMR_PMABRTM | 773 SATA_DWC_INTMR_DMATM); 774 /* 775 * Unmask the error bits that should trigger an error interrupt by 776 * setting the error mask register. 777 */ 778 sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 779 780 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 781 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr), 782 sata_dwc_readl(&hsdev->sata_dwc_regs->errmr)); 783 } 784 785 static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base) 786 { 787 port->cmd_addr = base + 0x00; 788 port->data_addr = base + 0x00; 789 790 port->error_addr = base + 0x04; 791 port->feature_addr = base + 0x04; 792 793 port->nsect_addr = base + 0x08; 794 795 port->lbal_addr = base + 0x0c; 796 port->lbam_addr = base + 0x10; 797 port->lbah_addr = base + 0x14; 798 799 port->device_addr = base + 0x18; 800 port->command_addr = base + 0x1c; 801 port->status_addr = base + 0x1c; 802 803 port->altstatus_addr = base + 0x20; 804 port->ctl_addr = base + 0x20; 805 } 806 807 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp) 808 { 809 struct sata_dwc_device *hsdev = hsdevp->hsdev; 810 struct device *dev = hsdev->dev; 811 812 #ifdef CONFIG_SATA_DWC_OLD_DMA 813 if (!of_find_property(dev->of_node, "dmas", NULL)) 814 return sata_dwc_dma_get_channel_old(hsdevp); 815 #endif 816 817 hsdevp->chan = dma_request_chan(dev, "sata-dma"); 818 if (IS_ERR(hsdevp->chan)) { 819 dev_err(dev, "failed to allocate dma channel: %ld\n", 820 PTR_ERR(hsdevp->chan)); 821 return PTR_ERR(hsdevp->chan); 822 } 823 824 return 0; 825 } 826 827 /* 828 * Function : sata_dwc_port_start 829 * arguments : struct ata_ioports *port 830 * Return value : returns 0 if success, error code otherwise 831 * This function allocates the scatter gather LLI table for AHB DMA 832 */ 833 static int sata_dwc_port_start(struct ata_port *ap) 834 { 835 int err = 0; 836 struct sata_dwc_device *hsdev; 837 struct sata_dwc_device_port *hsdevp = NULL; 838 struct device *pdev; 839 int i; 840 841 hsdev = HSDEV_FROM_AP(ap); 842 843 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 844 845 hsdev->host = ap->host; 846 pdev = ap->host->dev; 847 if (!pdev) { 848 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 849 err = -ENODEV; 850 goto CLEANUP; 851 } 852 853 /* Allocate Port Struct */ 854 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 855 if (!hsdevp) { 856 err = -ENOMEM; 857 goto CLEANUP; 858 } 859 hsdevp->hsdev = hsdev; 860 861 err = sata_dwc_dma_get_channel(hsdevp); 862 if (err) 863 goto CLEANUP_ALLOC; 864 865 err = phy_power_on(hsdev->phy); 866 if (err) 867 goto CLEANUP_ALLOC; 868 869 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 870 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 871 872 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 873 ap->bmdma_prd_dma = 0; 874 875 if (ap->port_no == 0) { 876 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 877 __func__); 878 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 879 SATA_DWC_DMACR_TXRXCH_CLEAR); 880 881 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 882 __func__); 883 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 884 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 885 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 886 } 887 888 /* Clear any error bits before libata starts issuing commands */ 889 clear_serror(ap); 890 ap->private_data = hsdevp; 891 dev_dbg(ap->dev, "%s: done\n", __func__); 892 return 0; 893 894 CLEANUP_ALLOC: 895 kfree(hsdevp); 896 CLEANUP: 897 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 898 return err; 899 } 900 901 static void sata_dwc_port_stop(struct ata_port *ap) 902 { 903 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 904 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 905 906 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 907 908 dmaengine_terminate_sync(hsdevp->chan); 909 dma_release_channel(hsdevp->chan); 910 phy_power_off(hsdev->phy); 911 912 kfree(hsdevp); 913 ap->private_data = NULL; 914 } 915 916 /* 917 * Function : sata_dwc_exec_command_by_tag 918 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 919 * Return value : None 920 * This function keeps track of individual command tag ids and calls 921 * ata_exec_command in libata 922 */ 923 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 924 struct ata_taskfile *tf, 925 u8 tag, u32 cmd_issued) 926 { 927 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 928 929 hsdevp->cmd_issued[tag] = cmd_issued; 930 931 /* 932 * Clear SError before executing a new command. 933 * sata_dwc_scr_write and read can not be used here. Clearing the PM 934 * managed SError register for the disk needs to be done before the 935 * task file is loaded. 936 */ 937 clear_serror(ap); 938 ata_sff_exec_command(ap, tf); 939 } 940 941 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 942 { 943 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 944 SATA_DWC_CMD_ISSUED_PEND); 945 } 946 947 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 948 { 949 u8 tag = qc->hw_tag; 950 951 if (!ata_is_ncq(qc->tf.protocol)) 952 tag = 0; 953 954 sata_dwc_bmdma_setup_by_tag(qc, tag); 955 } 956 957 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 958 { 959 int start_dma; 960 u32 reg; 961 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 962 struct ata_port *ap = qc->ap; 963 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 964 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 965 int dir = qc->dma_dir; 966 967 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 968 start_dma = 1; 969 if (dir == DMA_TO_DEVICE) 970 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 971 else 972 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 973 } else { 974 dev_err(ap->dev, 975 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 976 __func__, hsdevp->cmd_issued[tag], tag); 977 start_dma = 0; 978 } 979 980 if (start_dma) { 981 sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); 982 if (reg & SATA_DWC_SERROR_ERR_BITS) { 983 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 984 __func__, reg); 985 } 986 987 if (dir == DMA_TO_DEVICE) 988 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 989 SATA_DWC_DMACR_TXCHEN); 990 else 991 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 992 SATA_DWC_DMACR_RXCHEN); 993 994 /* Enable AHB DMA transfer on the specified channel */ 995 dmaengine_submit(desc); 996 dma_async_issue_pending(hsdevp->chan); 997 } 998 } 999 1000 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1001 { 1002 u8 tag = qc->hw_tag; 1003 1004 if (!ata_is_ncq(qc->tf.protocol)) 1005 tag = 0; 1006 1007 sata_dwc_bmdma_start_by_tag(qc, tag); 1008 } 1009 1010 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1011 { 1012 u32 sactive; 1013 u8 tag = qc->hw_tag; 1014 struct ata_port *ap = qc->ap; 1015 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1016 1017 if (!ata_is_ncq(qc->tf.protocol)) 1018 tag = 0; 1019 1020 if (ata_is_dma(qc->tf.protocol)) { 1021 hsdevp->desc[tag] = dma_dwc_xfer_setup(qc); 1022 if (!hsdevp->desc[tag]) 1023 return AC_ERR_SYSTEM; 1024 } else { 1025 hsdevp->desc[tag] = NULL; 1026 } 1027 1028 if (ata_is_ncq(qc->tf.protocol)) { 1029 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 1030 sactive |= (0x00000001 << tag); 1031 sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); 1032 1033 trace_ata_tf_load(ap, &qc->tf); 1034 ap->ops->sff_tf_load(ap, &qc->tf); 1035 trace_ata_exec_command(ap, &qc->tf, tag); 1036 sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, 1037 SATA_DWC_CMD_ISSUED_PEND); 1038 } else { 1039 return ata_bmdma_qc_issue(qc); 1040 } 1041 return 0; 1042 } 1043 1044 static void sata_dwc_error_handler(struct ata_port *ap) 1045 { 1046 ata_sff_error_handler(ap); 1047 } 1048 1049 static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1050 unsigned long deadline) 1051 { 1052 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1053 int ret; 1054 1055 ret = sata_sff_hardreset(link, class, deadline); 1056 1057 sata_dwc_enable_interrupts(hsdev); 1058 1059 /* Reconfigure the DMA control register */ 1060 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1061 SATA_DWC_DMACR_TXRXCH_CLEAR); 1062 1063 /* Reconfigure the DMA Burst Transaction Size register */ 1064 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 1065 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1066 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); 1067 1068 return ret; 1069 } 1070 1071 static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) 1072 { 1073 /* SATA DWC is master only */ 1074 } 1075 1076 /* 1077 * scsi mid-layer and libata interface structures 1078 */ 1079 static const struct scsi_host_template sata_dwc_sht = { 1080 ATA_NCQ_SHT(DRV_NAME), 1081 /* 1082 * test-only: Currently this driver doesn't handle NCQ 1083 * correctly. We enable NCQ but set the queue depth to a 1084 * max of 1. This will get fixed in in a future release. 1085 */ 1086 .sg_tablesize = LIBATA_MAX_PRD, 1087 /* .can_queue = ATA_MAX_QUEUE, */ 1088 /* 1089 * Make sure a LLI block is not created that will span 8K max FIS 1090 * boundary. If the block spans such a FIS boundary, there is a chance 1091 * that a DMA burst will cross that boundary -- this results in an 1092 * error in the host controller. 1093 */ 1094 .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */, 1095 }; 1096 1097 static struct ata_port_operations sata_dwc_ops = { 1098 .inherits = &ata_sff_port_ops, 1099 1100 .error_handler = sata_dwc_error_handler, 1101 .hardreset = sata_dwc_hardreset, 1102 1103 .qc_issue = sata_dwc_qc_issue, 1104 1105 .scr_read = sata_dwc_scr_read, 1106 .scr_write = sata_dwc_scr_write, 1107 1108 .port_start = sata_dwc_port_start, 1109 .port_stop = sata_dwc_port_stop, 1110 1111 .sff_dev_select = sata_dwc_dev_select, 1112 1113 .bmdma_setup = sata_dwc_bmdma_setup, 1114 .bmdma_start = sata_dwc_bmdma_start, 1115 }; 1116 1117 static const struct ata_port_info sata_dwc_port_info[] = { 1118 { 1119 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1120 .pio_mask = ATA_PIO4, 1121 .udma_mask = ATA_UDMA6, 1122 .port_ops = &sata_dwc_ops, 1123 }, 1124 }; 1125 1126 static int sata_dwc_probe(struct platform_device *ofdev) 1127 { 1128 struct device *dev = &ofdev->dev; 1129 struct device_node *np = dev->of_node; 1130 struct sata_dwc_device *hsdev; 1131 u32 idr, versionr; 1132 char *ver = (char *)&versionr; 1133 void __iomem *base; 1134 int err = 0; 1135 int irq; 1136 struct ata_host *host; 1137 struct ata_port_info pi = sata_dwc_port_info[0]; 1138 const struct ata_port_info *ppi[] = { &pi, NULL }; 1139 struct resource *res; 1140 1141 /* Allocate DWC SATA device */ 1142 host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS); 1143 hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL); 1144 if (!host || !hsdev) 1145 return -ENOMEM; 1146 1147 host->private_data = hsdev; 1148 1149 /* Ioremap SATA registers */ 1150 base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); 1151 if (IS_ERR(base)) 1152 return PTR_ERR(base); 1153 dev_dbg(dev, "ioremap done for SATA register address\n"); 1154 1155 /* Synopsys DWC SATA specific Registers */ 1156 hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; 1157 hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); 1158 1159 /* Setup port */ 1160 host->ports[0]->ioaddr.cmd_addr = base; 1161 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1162 sata_dwc_setup_port(&host->ports[0]->ioaddr, base); 1163 1164 /* Read the ID and Version Registers */ 1165 idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); 1166 versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); 1167 dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]); 1168 1169 /* Save dev for later use in dev_xxx() routines */ 1170 hsdev->dev = dev; 1171 1172 /* Enable SATA Interrupts */ 1173 sata_dwc_enable_interrupts(hsdev); 1174 1175 /* Get SATA interrupt number */ 1176 irq = irq_of_parse_and_map(np, 0); 1177 if (!irq) { 1178 dev_err(dev, "no SATA DMA irq\n"); 1179 return -ENODEV; 1180 } 1181 1182 #ifdef CONFIG_SATA_DWC_OLD_DMA 1183 if (!of_find_property(np, "dmas", NULL)) { 1184 err = sata_dwc_dma_init_old(ofdev, hsdev); 1185 if (err) 1186 return err; 1187 } 1188 #endif 1189 1190 hsdev->phy = devm_phy_optional_get(dev, "sata-phy"); 1191 if (IS_ERR(hsdev->phy)) 1192 return PTR_ERR(hsdev->phy); 1193 1194 err = phy_init(hsdev->phy); 1195 if (err) 1196 goto error_out; 1197 1198 /* 1199 * Now, register with libATA core, this will also initiate the 1200 * device discovery process, invoking our port_start() handler & 1201 * error_handler() to execute a dummy Softreset EH session 1202 */ 1203 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1204 if (err) 1205 dev_err(dev, "failed to activate host"); 1206 1207 return 0; 1208 1209 error_out: 1210 phy_exit(hsdev->phy); 1211 return err; 1212 } 1213 1214 static int sata_dwc_remove(struct platform_device *ofdev) 1215 { 1216 struct device *dev = &ofdev->dev; 1217 struct ata_host *host = dev_get_drvdata(dev); 1218 struct sata_dwc_device *hsdev = host->private_data; 1219 1220 ata_host_detach(host); 1221 1222 phy_exit(hsdev->phy); 1223 1224 #ifdef CONFIG_SATA_DWC_OLD_DMA 1225 /* Free SATA DMA resources */ 1226 sata_dwc_dma_exit_old(hsdev); 1227 #endif 1228 1229 dev_dbg(dev, "done\n"); 1230 return 0; 1231 } 1232 1233 static const struct of_device_id sata_dwc_match[] = { 1234 { .compatible = "amcc,sata-460ex", }, 1235 {} 1236 }; 1237 MODULE_DEVICE_TABLE(of, sata_dwc_match); 1238 1239 static struct platform_driver sata_dwc_driver = { 1240 .driver = { 1241 .name = DRV_NAME, 1242 .of_match_table = sata_dwc_match, 1243 }, 1244 .probe = sata_dwc_probe, 1245 .remove = sata_dwc_remove, 1246 }; 1247 1248 module_platform_driver(sata_dwc_driver); 1249 1250 MODULE_LICENSE("GPL"); 1251 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1252 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1253 MODULE_VERSION(DRV_VERSION); 1254