1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/ata/sata_dwc_460ex.c 4 * 5 * Synopsys DesignWare Cores (DWC) SATA host driver 6 * 7 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 8 * 9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 10 * Copyright 2008 DENX Software Engineering 11 * 12 * Based on versions provided by AMCC and Synopsys which are: 13 * Copyright 2006 Applied Micro Circuits Corporation 14 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 15 */ 16 17 #ifdef CONFIG_SATA_DWC_DEBUG 18 #define DEBUG 19 #endif 20 21 #ifdef CONFIG_SATA_DWC_VDEBUG 22 #define VERBOSE_DEBUG 23 #define DEBUG_NCQ 24 #endif 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/device.h> 29 #include <linux/dmaengine.h> 30 #include <linux/of_address.h> 31 #include <linux/of_irq.h> 32 #include <linux/of_platform.h> 33 #include <linux/platform_device.h> 34 #include <linux/phy/phy.h> 35 #include <linux/libata.h> 36 #include <linux/slab.h> 37 38 #include "libata.h" 39 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 43 /* These two are defined in "libata.h" */ 44 #undef DRV_NAME 45 #undef DRV_VERSION 46 47 #define DRV_NAME "sata-dwc" 48 #define DRV_VERSION "1.3" 49 50 #define sata_dwc_writel(a, v) writel_relaxed(v, a) 51 #define sata_dwc_readl(a) readl_relaxed(a) 52 53 #ifndef NO_IRQ 54 #define NO_IRQ 0 55 #endif 56 57 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ 58 59 enum { 60 SATA_DWC_MAX_PORTS = 1, 61 62 SATA_DWC_SCR_OFFSET = 0x24, 63 SATA_DWC_REG_OFFSET = 0x64, 64 }; 65 66 /* DWC SATA Registers */ 67 struct sata_dwc_regs { 68 u32 fptagr; /* 1st party DMA tag */ 69 u32 fpbor; /* 1st party DMA buffer offset */ 70 u32 fptcr; /* 1st party DMA Xfr count */ 71 u32 dmacr; /* DMA Control */ 72 u32 dbtsr; /* DMA Burst Transac size */ 73 u32 intpr; /* Interrupt Pending */ 74 u32 intmr; /* Interrupt Mask */ 75 u32 errmr; /* Error Mask */ 76 u32 llcr; /* Link Layer Control */ 77 u32 phycr; /* PHY Control */ 78 u32 physr; /* PHY Status */ 79 u32 rxbistpd; /* Recvd BIST pattern def register */ 80 u32 rxbistpd1; /* Recvd BIST data dword1 */ 81 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 82 u32 txbistpd; /* Trans BIST pattern def register */ 83 u32 txbistpd1; /* Trans BIST data dword1 */ 84 u32 txbistpd2; /* Trans BIST data dword2 */ 85 u32 bistcr; /* BIST Control Register */ 86 u32 bistfctr; /* BIST FIS Count Register */ 87 u32 bistsr; /* BIST Status Register */ 88 u32 bistdecr; /* BIST Dword Error count register */ 89 u32 res[15]; /* Reserved locations */ 90 u32 testr; /* Test Register */ 91 u32 versionr; /* Version Register */ 92 u32 idr; /* ID Register */ 93 u32 unimpl[192]; /* Unimplemented */ 94 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 95 }; 96 97 enum { 98 SCR_SCONTROL_DET_ENABLE = 0x00000001, 99 SCR_SSTATUS_DET_PRESENT = 0x00000001, 100 SCR_SERROR_DIAG_X = 0x04000000, 101 /* DWC SATA Register Operations */ 102 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 103 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 104 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 105 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 106 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 107 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 108 SATA_DWC_INTPR_DMAT = 0x00000001, 109 SATA_DWC_INTPR_NEWFP = 0x00000002, 110 SATA_DWC_INTPR_PMABRT = 0x00000004, 111 SATA_DWC_INTPR_ERR = 0x00000008, 112 SATA_DWC_INTPR_NEWBIST = 0x00000010, 113 SATA_DWC_INTPR_IPF = 0x10000000, 114 SATA_DWC_INTMR_DMATM = 0x00000001, 115 SATA_DWC_INTMR_NEWFPM = 0x00000002, 116 SATA_DWC_INTMR_PMABRTM = 0x00000004, 117 SATA_DWC_INTMR_ERRM = 0x00000008, 118 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 119 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 120 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 121 SATA_DWC_LLCR_RPDEN = 0x00000004, 122 /* This is all error bits, zero's are reserved fields. */ 123 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 124 }; 125 126 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 127 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 128 SATA_DWC_DMACR_TMOD_TXCHEN) 129 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 130 SATA_DWC_DMACR_TMOD_TXCHEN) 131 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 132 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 133 << 16) 134 struct sata_dwc_device { 135 struct device *dev; /* generic device struct */ 136 struct ata_probe_ent *pe; /* ptr to probe-ent */ 137 struct ata_host *host; 138 struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */ 139 u32 sactive_issued; 140 u32 sactive_queued; 141 struct phy *phy; 142 phys_addr_t dmadr; 143 #ifdef CONFIG_SATA_DWC_OLD_DMA 144 struct dw_dma_chip *dma; 145 #endif 146 }; 147 148 #define SATA_DWC_QCMD_MAX 32 149 150 struct sata_dwc_device_port { 151 struct sata_dwc_device *hsdev; 152 int cmd_issued[SATA_DWC_QCMD_MAX]; 153 int dma_pending[SATA_DWC_QCMD_MAX]; 154 155 /* DMA info */ 156 struct dma_chan *chan; 157 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 158 u32 dma_interrupt_count; 159 }; 160 161 /* 162 * Commonly used DWC SATA driver macros 163 */ 164 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data) 165 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data) 166 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data) 167 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data) 168 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev) 169 170 enum { 171 SATA_DWC_CMD_ISSUED_NOT = 0, 172 SATA_DWC_CMD_ISSUED_PEND = 1, 173 SATA_DWC_CMD_ISSUED_EXEC = 2, 174 SATA_DWC_CMD_ISSUED_NODATA = 3, 175 176 SATA_DWC_DMA_PENDING_NONE = 0, 177 SATA_DWC_DMA_PENDING_TX = 1, 178 SATA_DWC_DMA_PENDING_RX = 2, 179 }; 180 181 /* 182 * Prototypes 183 */ 184 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 185 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 186 u32 check_status); 187 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 188 static void sata_dwc_port_stop(struct ata_port *ap); 189 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 190 191 #ifdef CONFIG_SATA_DWC_OLD_DMA 192 193 #include <linux/platform_data/dma-dw.h> 194 #include <linux/dma/dw.h> 195 196 static struct dw_dma_slave sata_dwc_dma_dws = { 197 .src_id = 0, 198 .dst_id = 0, 199 .m_master = 1, 200 .p_master = 0, 201 }; 202 203 static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 204 { 205 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 206 207 if (dws->dma_dev != chan->device->dev) 208 return false; 209 210 chan->private = dws; 211 return true; 212 } 213 214 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) 215 { 216 struct sata_dwc_device *hsdev = hsdevp->hsdev; 217 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 218 dma_cap_mask_t mask; 219 220 dws->dma_dev = hsdev->dev; 221 222 dma_cap_zero(mask); 223 dma_cap_set(DMA_SLAVE, mask); 224 225 /* Acquire DMA channel */ 226 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 227 if (!hsdevp->chan) { 228 dev_err(hsdev->dev, "%s: dma channel unavailable\n", 229 __func__); 230 return -EAGAIN; 231 } 232 233 return 0; 234 } 235 236 static int sata_dwc_dma_init_old(struct platform_device *pdev, 237 struct sata_dwc_device *hsdev) 238 { 239 struct device_node *np = pdev->dev.of_node; 240 struct resource *res; 241 242 hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL); 243 if (!hsdev->dma) 244 return -ENOMEM; 245 246 hsdev->dma->dev = &pdev->dev; 247 hsdev->dma->id = pdev->id; 248 249 /* Get SATA DMA interrupt number */ 250 hsdev->dma->irq = irq_of_parse_and_map(np, 1); 251 if (hsdev->dma->irq == NO_IRQ) { 252 dev_err(&pdev->dev, "no SATA DMA irq\n"); 253 return -ENODEV; 254 } 255 256 /* Get physical SATA DMA register base address */ 257 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 258 hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res); 259 if (IS_ERR(hsdev->dma->regs)) 260 return PTR_ERR(hsdev->dma->regs); 261 262 /* Initialize AHB DMAC */ 263 return dw_dma_probe(hsdev->dma); 264 } 265 266 static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) 267 { 268 if (!hsdev->dma) 269 return; 270 271 dw_dma_remove(hsdev->dma); 272 } 273 274 #endif 275 276 static const char *get_prot_descript(u8 protocol) 277 { 278 switch (protocol) { 279 case ATA_PROT_NODATA: 280 return "ATA no data"; 281 case ATA_PROT_PIO: 282 return "ATA PIO"; 283 case ATA_PROT_DMA: 284 return "ATA DMA"; 285 case ATA_PROT_NCQ: 286 return "ATA NCQ"; 287 case ATA_PROT_NCQ_NODATA: 288 return "ATA NCQ no data"; 289 case ATAPI_PROT_NODATA: 290 return "ATAPI no data"; 291 case ATAPI_PROT_PIO: 292 return "ATAPI PIO"; 293 case ATAPI_PROT_DMA: 294 return "ATAPI DMA"; 295 default: 296 return "unknown"; 297 } 298 } 299 300 static const char *get_dma_dir_descript(int dma_dir) 301 { 302 switch ((enum dma_data_direction)dma_dir) { 303 case DMA_BIDIRECTIONAL: 304 return "bidirectional"; 305 case DMA_TO_DEVICE: 306 return "to device"; 307 case DMA_FROM_DEVICE: 308 return "from device"; 309 default: 310 return "none"; 311 } 312 } 313 314 static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) 315 { 316 dev_vdbg(ap->dev, 317 "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", 318 tf->command, get_prot_descript(tf->protocol), tf->flags, 319 tf->device); 320 dev_vdbg(ap->dev, 321 "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", 322 tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); 323 dev_vdbg(ap->dev, 324 "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 325 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 326 tf->hob_lbah); 327 } 328 329 static void dma_dwc_xfer_done(void *hsdev_instance) 330 { 331 unsigned long flags; 332 struct sata_dwc_device *hsdev = hsdev_instance; 333 struct ata_host *host = (struct ata_host *)hsdev->host; 334 struct ata_port *ap; 335 struct sata_dwc_device_port *hsdevp; 336 u8 tag = 0; 337 unsigned int port = 0; 338 339 spin_lock_irqsave(&host->lock, flags); 340 ap = host->ports[port]; 341 hsdevp = HSDEVP_FROM_AP(ap); 342 tag = ap->link.active_tag; 343 344 /* 345 * Each DMA command produces 2 interrupts. Only 346 * complete the command after both interrupts have been 347 * seen. (See sata_dwc_isr()) 348 */ 349 hsdevp->dma_interrupt_count++; 350 sata_dwc_clear_dmacr(hsdevp, tag); 351 352 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 353 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 354 tag, hsdevp->dma_pending[tag]); 355 } 356 357 if ((hsdevp->dma_interrupt_count % 2) == 0) 358 sata_dwc_dma_xfer_complete(ap, 1); 359 360 spin_unlock_irqrestore(&host->lock, flags); 361 } 362 363 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 364 { 365 struct ata_port *ap = qc->ap; 366 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 367 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 368 struct dma_slave_config sconf; 369 struct dma_async_tx_descriptor *desc; 370 371 if (qc->dma_dir == DMA_DEV_TO_MEM) { 372 sconf.src_addr = hsdev->dmadr; 373 sconf.device_fc = false; 374 } else { /* DMA_MEM_TO_DEV */ 375 sconf.dst_addr = hsdev->dmadr; 376 sconf.device_fc = false; 377 } 378 379 sconf.direction = qc->dma_dir; 380 sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 381 sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 382 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 383 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 384 385 dmaengine_slave_config(hsdevp->chan, &sconf); 386 387 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 388 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 389 qc->dma_dir, 390 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 391 392 if (!desc) 393 return NULL; 394 395 desc->callback = dma_dwc_xfer_done; 396 desc->callback_param = hsdev; 397 398 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__, 399 qc->sg, qc->n_elem, &hsdev->dmadr); 400 401 return desc; 402 } 403 404 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 405 { 406 if (scr > SCR_NOTIFICATION) { 407 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 408 __func__, scr); 409 return -EINVAL; 410 } 411 412 *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4)); 413 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 414 link->ap->print_id, scr, *val); 415 416 return 0; 417 } 418 419 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 420 { 421 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 422 link->ap->print_id, scr, val); 423 if (scr > SCR_NOTIFICATION) { 424 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 425 __func__, scr); 426 return -EINVAL; 427 } 428 sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val); 429 430 return 0; 431 } 432 433 static void clear_serror(struct ata_port *ap) 434 { 435 u32 val; 436 sata_dwc_scr_read(&ap->link, SCR_ERROR, &val); 437 sata_dwc_scr_write(&ap->link, SCR_ERROR, val); 438 } 439 440 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 441 { 442 sata_dwc_writel(&hsdev->sata_dwc_regs->intpr, 443 sata_dwc_readl(&hsdev->sata_dwc_regs->intpr)); 444 } 445 446 static u32 qcmd_tag_to_mask(u8 tag) 447 { 448 return 0x00000001 << (tag & 0x1f); 449 } 450 451 /* See ahci.c */ 452 static void sata_dwc_error_intr(struct ata_port *ap, 453 struct sata_dwc_device *hsdev, uint intpr) 454 { 455 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 456 struct ata_eh_info *ehi = &ap->link.eh_info; 457 unsigned int err_mask = 0, action = 0; 458 struct ata_queued_cmd *qc; 459 u32 serror; 460 u8 status, tag; 461 462 ata_ehi_clear_desc(ehi); 463 464 sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror); 465 status = ap->ops->sff_check_status(ap); 466 467 tag = ap->link.active_tag; 468 469 dev_err(ap->dev, 470 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 471 __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 472 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 473 474 /* Clear error register and interrupt bit */ 475 clear_serror(ap); 476 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 477 478 /* This is the only error happening now. TODO check for exact error */ 479 480 err_mask |= AC_ERR_HOST_BUS; 481 action |= ATA_EH_RESET; 482 483 /* Pass this on to EH */ 484 ehi->serror |= serror; 485 ehi->action |= action; 486 487 qc = ata_qc_from_tag(ap, tag); 488 if (qc) 489 qc->err_mask |= err_mask; 490 else 491 ehi->err_mask |= err_mask; 492 493 ata_port_abort(ap); 494 } 495 496 /* 497 * Function : sata_dwc_isr 498 * arguments : irq, void *dev_instance, struct pt_regs *regs 499 * Return value : irqreturn_t - status of IRQ 500 * This Interrupt handler called via port ops registered function. 501 * .irq_handler = sata_dwc_isr 502 */ 503 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 504 { 505 struct ata_host *host = (struct ata_host *)dev_instance; 506 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 507 struct ata_port *ap; 508 struct ata_queued_cmd *qc; 509 unsigned long flags; 510 u8 status, tag; 511 int handled, num_processed, port = 0; 512 uint intpr, sactive, sactive2, tag_mask; 513 struct sata_dwc_device_port *hsdevp; 514 hsdev->sactive_issued = 0; 515 516 spin_lock_irqsave(&host->lock, flags); 517 518 /* Read the interrupt register */ 519 intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr); 520 521 ap = host->ports[port]; 522 hsdevp = HSDEVP_FROM_AP(ap); 523 524 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 525 ap->link.active_tag); 526 527 /* Check for error interrupt */ 528 if (intpr & SATA_DWC_INTPR_ERR) { 529 sata_dwc_error_intr(ap, hsdev, intpr); 530 handled = 1; 531 goto DONE; 532 } 533 534 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 535 if (intpr & SATA_DWC_INTPR_NEWFP) { 536 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 537 538 tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr)); 539 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 540 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 541 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 542 543 hsdev->sactive_issued |= qcmd_tag_to_mask(tag); 544 545 qc = ata_qc_from_tag(ap, tag); 546 if (unlikely(!qc)) { 547 dev_err(ap->dev, "failed to get qc"); 548 handled = 1; 549 goto DONE; 550 } 551 /* 552 * Start FP DMA for NCQ command. At this point the tag is the 553 * active tag. It is the tag that matches the command about to 554 * be completed. 555 */ 556 qc->ap->link.active_tag = tag; 557 sata_dwc_bmdma_start_by_tag(qc, tag); 558 559 handled = 1; 560 goto DONE; 561 } 562 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 563 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 564 565 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 566 if (hsdev->sactive_issued == 0 && tag_mask == 0) { 567 if (ap->link.active_tag == ATA_TAG_POISON) 568 tag = 0; 569 else 570 tag = ap->link.active_tag; 571 qc = ata_qc_from_tag(ap, tag); 572 573 /* DEV interrupt w/ no active qc? */ 574 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 575 dev_err(ap->dev, 576 "%s interrupt with no active qc qc=%p\n", 577 __func__, qc); 578 ap->ops->sff_check_status(ap); 579 handled = 1; 580 goto DONE; 581 } 582 status = ap->ops->sff_check_status(ap); 583 584 qc->ap->link.active_tag = tag; 585 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 586 587 if (status & ATA_ERR) { 588 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 589 sata_dwc_qc_complete(ap, qc, 1); 590 handled = 1; 591 goto DONE; 592 } 593 594 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 595 __func__, get_prot_descript(qc->tf.protocol)); 596 DRVSTILLBUSY: 597 if (ata_is_dma(qc->tf.protocol)) { 598 /* 599 * Each DMA transaction produces 2 interrupts. The DMAC 600 * transfer complete interrupt and the SATA controller 601 * operation done interrupt. The command should be 602 * completed only after both interrupts are seen. 603 */ 604 hsdevp->dma_interrupt_count++; 605 if (hsdevp->dma_pending[tag] == \ 606 SATA_DWC_DMA_PENDING_NONE) { 607 dev_err(ap->dev, 608 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 609 __func__, intpr, status, 610 hsdevp->dma_pending[tag]); 611 } 612 613 if ((hsdevp->dma_interrupt_count % 2) == 0) 614 sata_dwc_dma_xfer_complete(ap, 1); 615 } else if (ata_is_pio(qc->tf.protocol)) { 616 ata_sff_hsm_move(ap, qc, status, 0); 617 handled = 1; 618 goto DONE; 619 } else { 620 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 621 goto DRVSTILLBUSY; 622 } 623 624 handled = 1; 625 goto DONE; 626 } 627 628 /* 629 * This is a NCQ command. At this point we need to figure out for which 630 * tags we have gotten a completion interrupt. One interrupt may serve 631 * as completion for more than one operation when commands are queued 632 * (NCQ). We need to process each completed command. 633 */ 634 635 /* process completed commands */ 636 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 637 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 638 639 if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) { 640 dev_dbg(ap->dev, 641 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 642 __func__, sactive, hsdev->sactive_issued, tag_mask); 643 } 644 645 if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) { 646 dev_warn(ap->dev, 647 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 648 sactive, hsdev->sactive_issued, tag_mask); 649 } 650 651 /* read just to clear ... not bad if currently still busy */ 652 status = ap->ops->sff_check_status(ap); 653 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 654 655 tag = 0; 656 num_processed = 0; 657 while (tag_mask) { 658 num_processed++; 659 while (!(tag_mask & 0x00000001)) { 660 tag++; 661 tag_mask <<= 1; 662 } 663 664 tag_mask &= (~0x00000001); 665 qc = ata_qc_from_tag(ap, tag); 666 if (unlikely(!qc)) { 667 dev_err(ap->dev, "failed to get qc"); 668 handled = 1; 669 goto DONE; 670 } 671 672 /* To be picked up by completion functions */ 673 qc->ap->link.active_tag = tag; 674 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 675 676 /* Let libata/scsi layers handle error */ 677 if (status & ATA_ERR) { 678 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 679 status); 680 sata_dwc_qc_complete(ap, qc, 1); 681 handled = 1; 682 goto DONE; 683 } 684 685 /* Process completed command */ 686 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 687 get_prot_descript(qc->tf.protocol)); 688 if (ata_is_dma(qc->tf.protocol)) { 689 hsdevp->dma_interrupt_count++; 690 if (hsdevp->dma_pending[tag] == \ 691 SATA_DWC_DMA_PENDING_NONE) 692 dev_warn(ap->dev, "%s: DMA not pending?\n", 693 __func__); 694 if ((hsdevp->dma_interrupt_count % 2) == 0) 695 sata_dwc_dma_xfer_complete(ap, 1); 696 } else { 697 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 698 goto STILLBUSY; 699 } 700 continue; 701 702 STILLBUSY: 703 ap->stats.idle_irq++; 704 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 705 ap->print_id); 706 } /* while tag_mask */ 707 708 /* 709 * Check to see if any commands completed while we were processing our 710 * initial set of completed commands (read status clears interrupts, 711 * so we might miss a completed command interrupt if one came in while 712 * we were processing --we read status as part of processing a completed 713 * command). 714 */ 715 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2); 716 if (sactive2 != sactive) { 717 dev_dbg(ap->dev, 718 "More completed - sactive=0x%x sactive2=0x%x\n", 719 sactive, sactive2); 720 } 721 handled = 1; 722 723 DONE: 724 spin_unlock_irqrestore(&host->lock, flags); 725 return IRQ_RETVAL(handled); 726 } 727 728 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 729 { 730 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 731 u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr); 732 733 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 734 dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr); 735 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 736 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 737 dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr); 738 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 739 } else { 740 /* 741 * This should not happen, it indicates the driver is out of 742 * sync. If it does happen, clear dmacr anyway. 743 */ 744 dev_err(hsdev->dev, 745 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 746 __func__, tag, hsdevp->dma_pending[tag], dmacr); 747 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 748 SATA_DWC_DMACR_TXRXCH_CLEAR); 749 } 750 } 751 752 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) 753 { 754 struct ata_queued_cmd *qc; 755 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 756 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 757 u8 tag = 0; 758 759 tag = ap->link.active_tag; 760 qc = ata_qc_from_tag(ap, tag); 761 if (!qc) { 762 dev_err(ap->dev, "failed to get qc"); 763 return; 764 } 765 766 #ifdef DEBUG_NCQ 767 if (tag > 0) { 768 dev_info(ap->dev, 769 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", 770 __func__, qc->hw_tag, qc->tf.command, 771 get_dma_dir_descript(qc->dma_dir), 772 get_prot_descript(qc->tf.protocol), 773 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 774 } 775 #endif 776 777 if (ata_is_dma(qc->tf.protocol)) { 778 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 779 dev_err(ap->dev, 780 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 781 __func__, 782 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 783 } 784 785 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 786 sata_dwc_qc_complete(ap, qc, check_status); 787 ap->link.active_tag = ATA_TAG_POISON; 788 } else { 789 sata_dwc_qc_complete(ap, qc, check_status); 790 } 791 } 792 793 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 794 u32 check_status) 795 { 796 u8 status = 0; 797 u32 mask = 0x0; 798 u8 tag = qc->hw_tag; 799 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 800 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 801 hsdev->sactive_queued = 0; 802 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); 803 804 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 805 dev_err(ap->dev, "TX DMA PENDING\n"); 806 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 807 dev_err(ap->dev, "RX DMA PENDING\n"); 808 dev_dbg(ap->dev, 809 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 810 qc->tf.command, status, ap->print_id, qc->tf.protocol); 811 812 /* clear active bit */ 813 mask = (~(qcmd_tag_to_mask(tag))); 814 hsdev->sactive_queued = hsdev->sactive_queued & mask; 815 hsdev->sactive_issued = hsdev->sactive_issued & mask; 816 ata_qc_complete(qc); 817 return 0; 818 } 819 820 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 821 { 822 /* Enable selective interrupts by setting the interrupt maskregister*/ 823 sata_dwc_writel(&hsdev->sata_dwc_regs->intmr, 824 SATA_DWC_INTMR_ERRM | 825 SATA_DWC_INTMR_NEWFPM | 826 SATA_DWC_INTMR_PMABRTM | 827 SATA_DWC_INTMR_DMATM); 828 /* 829 * Unmask the error bits that should trigger an error interrupt by 830 * setting the error mask register. 831 */ 832 sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 833 834 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 835 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr), 836 sata_dwc_readl(&hsdev->sata_dwc_regs->errmr)); 837 } 838 839 static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base) 840 { 841 port->cmd_addr = base + 0x00; 842 port->data_addr = base + 0x00; 843 844 port->error_addr = base + 0x04; 845 port->feature_addr = base + 0x04; 846 847 port->nsect_addr = base + 0x08; 848 849 port->lbal_addr = base + 0x0c; 850 port->lbam_addr = base + 0x10; 851 port->lbah_addr = base + 0x14; 852 853 port->device_addr = base + 0x18; 854 port->command_addr = base + 0x1c; 855 port->status_addr = base + 0x1c; 856 857 port->altstatus_addr = base + 0x20; 858 port->ctl_addr = base + 0x20; 859 } 860 861 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp) 862 { 863 struct sata_dwc_device *hsdev = hsdevp->hsdev; 864 struct device *dev = hsdev->dev; 865 866 #ifdef CONFIG_SATA_DWC_OLD_DMA 867 if (!of_find_property(dev->of_node, "dmas", NULL)) 868 return sata_dwc_dma_get_channel_old(hsdevp); 869 #endif 870 871 hsdevp->chan = dma_request_chan(dev, "sata-dma"); 872 if (IS_ERR(hsdevp->chan)) { 873 dev_err(dev, "failed to allocate dma channel: %ld\n", 874 PTR_ERR(hsdevp->chan)); 875 return PTR_ERR(hsdevp->chan); 876 } 877 878 return 0; 879 } 880 881 /* 882 * Function : sata_dwc_port_start 883 * arguments : struct ata_ioports *port 884 * Return value : returns 0 if success, error code otherwise 885 * This function allocates the scatter gather LLI table for AHB DMA 886 */ 887 static int sata_dwc_port_start(struct ata_port *ap) 888 { 889 int err = 0; 890 struct sata_dwc_device *hsdev; 891 struct sata_dwc_device_port *hsdevp = NULL; 892 struct device *pdev; 893 int i; 894 895 hsdev = HSDEV_FROM_AP(ap); 896 897 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 898 899 hsdev->host = ap->host; 900 pdev = ap->host->dev; 901 if (!pdev) { 902 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 903 err = -ENODEV; 904 goto CLEANUP; 905 } 906 907 /* Allocate Port Struct */ 908 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 909 if (!hsdevp) { 910 err = -ENOMEM; 911 goto CLEANUP; 912 } 913 hsdevp->hsdev = hsdev; 914 915 err = sata_dwc_dma_get_channel(hsdevp); 916 if (err) 917 goto CLEANUP_ALLOC; 918 919 err = phy_power_on(hsdev->phy); 920 if (err) 921 goto CLEANUP_ALLOC; 922 923 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 924 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 925 926 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 927 ap->bmdma_prd_dma = 0; 928 929 if (ap->port_no == 0) { 930 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 931 __func__); 932 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 933 SATA_DWC_DMACR_TXRXCH_CLEAR); 934 935 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 936 __func__); 937 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 938 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 939 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 940 } 941 942 /* Clear any error bits before libata starts issuing commands */ 943 clear_serror(ap); 944 ap->private_data = hsdevp; 945 dev_dbg(ap->dev, "%s: done\n", __func__); 946 return 0; 947 948 CLEANUP_ALLOC: 949 kfree(hsdevp); 950 CLEANUP: 951 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 952 return err; 953 } 954 955 static void sata_dwc_port_stop(struct ata_port *ap) 956 { 957 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 958 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 959 960 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 961 962 dmaengine_terminate_sync(hsdevp->chan); 963 dma_release_channel(hsdevp->chan); 964 phy_power_off(hsdev->phy); 965 966 kfree(hsdevp); 967 ap->private_data = NULL; 968 } 969 970 /* 971 * Function : sata_dwc_exec_command_by_tag 972 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 973 * Return value : None 974 * This function keeps track of individual command tag ids and calls 975 * ata_exec_command in libata 976 */ 977 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 978 struct ata_taskfile *tf, 979 u8 tag, u32 cmd_issued) 980 { 981 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 982 983 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 984 ata_get_cmd_descript(tf->command), tag); 985 986 hsdevp->cmd_issued[tag] = cmd_issued; 987 988 /* 989 * Clear SError before executing a new command. 990 * sata_dwc_scr_write and read can not be used here. Clearing the PM 991 * managed SError register for the disk needs to be done before the 992 * task file is loaded. 993 */ 994 clear_serror(ap); 995 ata_sff_exec_command(ap, tf); 996 } 997 998 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 999 { 1000 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 1001 SATA_DWC_CMD_ISSUED_PEND); 1002 } 1003 1004 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 1005 { 1006 u8 tag = qc->hw_tag; 1007 1008 if (ata_is_ncq(qc->tf.protocol)) { 1009 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1010 __func__, qc->ap->link.sactive, tag); 1011 } else { 1012 tag = 0; 1013 } 1014 sata_dwc_bmdma_setup_by_tag(qc, tag); 1015 } 1016 1017 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 1018 { 1019 int start_dma; 1020 u32 reg; 1021 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 1022 struct ata_port *ap = qc->ap; 1023 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1024 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 1025 int dir = qc->dma_dir; 1026 1027 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 1028 start_dma = 1; 1029 if (dir == DMA_TO_DEVICE) 1030 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 1031 else 1032 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 1033 } else { 1034 dev_err(ap->dev, 1035 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 1036 __func__, hsdevp->cmd_issued[tag], tag); 1037 start_dma = 0; 1038 } 1039 1040 dev_dbg(ap->dev, 1041 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", 1042 __func__, qc, tag, qc->tf.command, 1043 get_dma_dir_descript(qc->dma_dir), start_dma); 1044 sata_dwc_tf_dump(ap, &qc->tf); 1045 1046 if (start_dma) { 1047 sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); 1048 if (reg & SATA_DWC_SERROR_ERR_BITS) { 1049 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 1050 __func__, reg); 1051 } 1052 1053 if (dir == DMA_TO_DEVICE) 1054 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1055 SATA_DWC_DMACR_TXCHEN); 1056 else 1057 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1058 SATA_DWC_DMACR_RXCHEN); 1059 1060 /* Enable AHB DMA transfer on the specified channel */ 1061 dmaengine_submit(desc); 1062 dma_async_issue_pending(hsdevp->chan); 1063 } 1064 } 1065 1066 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1067 { 1068 u8 tag = qc->hw_tag; 1069 1070 if (ata_is_ncq(qc->tf.protocol)) { 1071 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1072 __func__, qc->ap->link.sactive, tag); 1073 } else { 1074 tag = 0; 1075 } 1076 dev_dbg(qc->ap->dev, "%s\n", __func__); 1077 sata_dwc_bmdma_start_by_tag(qc, tag); 1078 } 1079 1080 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1081 { 1082 u32 sactive; 1083 u8 tag = qc->hw_tag; 1084 struct ata_port *ap = qc->ap; 1085 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1086 1087 #ifdef DEBUG_NCQ 1088 if (qc->hw_tag > 0 || ap->link.sactive > 1) 1089 dev_info(ap->dev, 1090 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1091 __func__, ap->print_id, qc->tf.command, 1092 ata_get_cmd_descript(qc->tf.command), 1093 qc->hw_tag, get_prot_descript(qc->tf.protocol), 1094 ap->link.active_tag, ap->link.sactive); 1095 #endif 1096 1097 if (!ata_is_ncq(qc->tf.protocol)) 1098 tag = 0; 1099 1100 if (ata_is_dma(qc->tf.protocol)) { 1101 hsdevp->desc[tag] = dma_dwc_xfer_setup(qc); 1102 if (!hsdevp->desc[tag]) 1103 return AC_ERR_SYSTEM; 1104 } else { 1105 hsdevp->desc[tag] = NULL; 1106 } 1107 1108 if (ata_is_ncq(qc->tf.protocol)) { 1109 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 1110 sactive |= (0x00000001 << tag); 1111 sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); 1112 1113 dev_dbg(qc->ap->dev, 1114 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", 1115 __func__, tag, qc->ap->link.sactive, sactive); 1116 1117 ap->ops->sff_tf_load(ap, &qc->tf); 1118 sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, 1119 SATA_DWC_CMD_ISSUED_PEND); 1120 } else { 1121 return ata_bmdma_qc_issue(qc); 1122 } 1123 return 0; 1124 } 1125 1126 static void sata_dwc_error_handler(struct ata_port *ap) 1127 { 1128 ata_sff_error_handler(ap); 1129 } 1130 1131 static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1132 unsigned long deadline) 1133 { 1134 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1135 int ret; 1136 1137 ret = sata_sff_hardreset(link, class, deadline); 1138 1139 sata_dwc_enable_interrupts(hsdev); 1140 1141 /* Reconfigure the DMA control register */ 1142 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1143 SATA_DWC_DMACR_TXRXCH_CLEAR); 1144 1145 /* Reconfigure the DMA Burst Transaction Size register */ 1146 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 1147 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1148 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); 1149 1150 return ret; 1151 } 1152 1153 static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) 1154 { 1155 /* SATA DWC is master only */ 1156 } 1157 1158 /* 1159 * scsi mid-layer and libata interface structures 1160 */ 1161 static struct scsi_host_template sata_dwc_sht = { 1162 ATA_NCQ_SHT(DRV_NAME), 1163 /* 1164 * test-only: Currently this driver doesn't handle NCQ 1165 * correctly. We enable NCQ but set the queue depth to a 1166 * max of 1. This will get fixed in in a future release. 1167 */ 1168 .sg_tablesize = LIBATA_MAX_PRD, 1169 /* .can_queue = ATA_MAX_QUEUE, */ 1170 /* 1171 * Make sure a LLI block is not created that will span 8K max FIS 1172 * boundary. If the block spans such a FIS boundary, there is a chance 1173 * that a DMA burst will cross that boundary -- this results in an 1174 * error in the host controller. 1175 */ 1176 .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */, 1177 }; 1178 1179 static struct ata_port_operations sata_dwc_ops = { 1180 .inherits = &ata_sff_port_ops, 1181 1182 .error_handler = sata_dwc_error_handler, 1183 .hardreset = sata_dwc_hardreset, 1184 1185 .qc_issue = sata_dwc_qc_issue, 1186 1187 .scr_read = sata_dwc_scr_read, 1188 .scr_write = sata_dwc_scr_write, 1189 1190 .port_start = sata_dwc_port_start, 1191 .port_stop = sata_dwc_port_stop, 1192 1193 .sff_dev_select = sata_dwc_dev_select, 1194 1195 .bmdma_setup = sata_dwc_bmdma_setup, 1196 .bmdma_start = sata_dwc_bmdma_start, 1197 }; 1198 1199 static const struct ata_port_info sata_dwc_port_info[] = { 1200 { 1201 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1202 .pio_mask = ATA_PIO4, 1203 .udma_mask = ATA_UDMA6, 1204 .port_ops = &sata_dwc_ops, 1205 }, 1206 }; 1207 1208 static int sata_dwc_probe(struct platform_device *ofdev) 1209 { 1210 struct sata_dwc_device *hsdev; 1211 u32 idr, versionr; 1212 char *ver = (char *)&versionr; 1213 void __iomem *base; 1214 int err = 0; 1215 int irq; 1216 struct ata_host *host; 1217 struct ata_port_info pi = sata_dwc_port_info[0]; 1218 const struct ata_port_info *ppi[] = { &pi, NULL }; 1219 struct device_node *np = ofdev->dev.of_node; 1220 struct resource *res; 1221 1222 /* Allocate DWC SATA device */ 1223 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1224 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); 1225 if (!host || !hsdev) 1226 return -ENOMEM; 1227 1228 host->private_data = hsdev; 1229 1230 /* Ioremap SATA registers */ 1231 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 1232 base = devm_ioremap_resource(&ofdev->dev, res); 1233 if (IS_ERR(base)) 1234 return PTR_ERR(base); 1235 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1236 1237 /* Synopsys DWC SATA specific Registers */ 1238 hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; 1239 hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); 1240 1241 /* Setup port */ 1242 host->ports[0]->ioaddr.cmd_addr = base; 1243 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1244 sata_dwc_setup_port(&host->ports[0]->ioaddr, base); 1245 1246 /* Read the ID and Version Registers */ 1247 idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); 1248 versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); 1249 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", 1250 idr, ver[0], ver[1], ver[2]); 1251 1252 /* Save dev for later use in dev_xxx() routines */ 1253 hsdev->dev = &ofdev->dev; 1254 1255 /* Enable SATA Interrupts */ 1256 sata_dwc_enable_interrupts(hsdev); 1257 1258 /* Get SATA interrupt number */ 1259 irq = irq_of_parse_and_map(np, 0); 1260 if (irq == NO_IRQ) { 1261 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1262 return -ENODEV; 1263 } 1264 1265 #ifdef CONFIG_SATA_DWC_OLD_DMA 1266 if (!of_find_property(np, "dmas", NULL)) { 1267 err = sata_dwc_dma_init_old(ofdev, hsdev); 1268 if (err) 1269 return err; 1270 } 1271 #endif 1272 1273 hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); 1274 if (IS_ERR(hsdev->phy)) 1275 return PTR_ERR(hsdev->phy); 1276 1277 err = phy_init(hsdev->phy); 1278 if (err) 1279 goto error_out; 1280 1281 /* 1282 * Now, register with libATA core, this will also initiate the 1283 * device discovery process, invoking our port_start() handler & 1284 * error_handler() to execute a dummy Softreset EH session 1285 */ 1286 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1287 if (err) 1288 dev_err(&ofdev->dev, "failed to activate host"); 1289 1290 return 0; 1291 1292 error_out: 1293 phy_exit(hsdev->phy); 1294 return err; 1295 } 1296 1297 static int sata_dwc_remove(struct platform_device *ofdev) 1298 { 1299 struct device *dev = &ofdev->dev; 1300 struct ata_host *host = dev_get_drvdata(dev); 1301 struct sata_dwc_device *hsdev = host->private_data; 1302 1303 ata_host_detach(host); 1304 1305 phy_exit(hsdev->phy); 1306 1307 #ifdef CONFIG_SATA_DWC_OLD_DMA 1308 /* Free SATA DMA resources */ 1309 sata_dwc_dma_exit_old(hsdev); 1310 #endif 1311 1312 dev_dbg(&ofdev->dev, "done\n"); 1313 return 0; 1314 } 1315 1316 static const struct of_device_id sata_dwc_match[] = { 1317 { .compatible = "amcc,sata-460ex", }, 1318 {} 1319 }; 1320 MODULE_DEVICE_TABLE(of, sata_dwc_match); 1321 1322 static struct platform_driver sata_dwc_driver = { 1323 .driver = { 1324 .name = DRV_NAME, 1325 .of_match_table = sata_dwc_match, 1326 }, 1327 .probe = sata_dwc_probe, 1328 .remove = sata_dwc_remove, 1329 }; 1330 1331 module_platform_driver(sata_dwc_driver); 1332 1333 MODULE_LICENSE("GPL"); 1334 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1335 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1336 MODULE_VERSION(DRV_VERSION); 1337