1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/ata/sata_dwc_460ex.c 4 * 5 * Synopsys DesignWare Cores (DWC) SATA host driver 6 * 7 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 8 * 9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 10 * Copyright 2008 DENX Software Engineering 11 * 12 * Based on versions provided by AMCC and Synopsys which are: 13 * Copyright 2006 Applied Micro Circuits Corporation 14 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 15 */ 16 17 #ifdef CONFIG_SATA_DWC_DEBUG 18 #define DEBUG 19 #endif 20 21 #ifdef CONFIG_SATA_DWC_VDEBUG 22 #define VERBOSE_DEBUG 23 #define DEBUG_NCQ 24 #endif 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/device.h> 29 #include <linux/dmaengine.h> 30 #include <linux/of_address.h> 31 #include <linux/of_irq.h> 32 #include <linux/of_platform.h> 33 #include <linux/platform_device.h> 34 #include <linux/phy/phy.h> 35 #include <linux/libata.h> 36 #include <linux/slab.h> 37 38 #include "libata.h" 39 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 43 /* These two are defined in "libata.h" */ 44 #undef DRV_NAME 45 #undef DRV_VERSION 46 47 #define DRV_NAME "sata-dwc" 48 #define DRV_VERSION "1.3" 49 50 #define sata_dwc_writel(a, v) writel_relaxed(v, a) 51 #define sata_dwc_readl(a) readl_relaxed(a) 52 53 #ifndef NO_IRQ 54 #define NO_IRQ 0 55 #endif 56 57 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ 58 59 enum { 60 SATA_DWC_MAX_PORTS = 1, 61 62 SATA_DWC_SCR_OFFSET = 0x24, 63 SATA_DWC_REG_OFFSET = 0x64, 64 }; 65 66 /* DWC SATA Registers */ 67 struct sata_dwc_regs { 68 u32 fptagr; /* 1st party DMA tag */ 69 u32 fpbor; /* 1st party DMA buffer offset */ 70 u32 fptcr; /* 1st party DMA Xfr count */ 71 u32 dmacr; /* DMA Control */ 72 u32 dbtsr; /* DMA Burst Transac size */ 73 u32 intpr; /* Interrupt Pending */ 74 u32 intmr; /* Interrupt Mask */ 75 u32 errmr; /* Error Mask */ 76 u32 llcr; /* Link Layer Control */ 77 u32 phycr; /* PHY Control */ 78 u32 physr; /* PHY Status */ 79 u32 rxbistpd; /* Recvd BIST pattern def register */ 80 u32 rxbistpd1; /* Recvd BIST data dword1 */ 81 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 82 u32 txbistpd; /* Trans BIST pattern def register */ 83 u32 txbistpd1; /* Trans BIST data dword1 */ 84 u32 txbistpd2; /* Trans BIST data dword2 */ 85 u32 bistcr; /* BIST Control Register */ 86 u32 bistfctr; /* BIST FIS Count Register */ 87 u32 bistsr; /* BIST Status Register */ 88 u32 bistdecr; /* BIST Dword Error count register */ 89 u32 res[15]; /* Reserved locations */ 90 u32 testr; /* Test Register */ 91 u32 versionr; /* Version Register */ 92 u32 idr; /* ID Register */ 93 u32 unimpl[192]; /* Unimplemented */ 94 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 95 }; 96 97 enum { 98 SCR_SCONTROL_DET_ENABLE = 0x00000001, 99 SCR_SSTATUS_DET_PRESENT = 0x00000001, 100 SCR_SERROR_DIAG_X = 0x04000000, 101 /* DWC SATA Register Operations */ 102 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 103 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 104 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 105 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 106 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 107 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 108 SATA_DWC_INTPR_DMAT = 0x00000001, 109 SATA_DWC_INTPR_NEWFP = 0x00000002, 110 SATA_DWC_INTPR_PMABRT = 0x00000004, 111 SATA_DWC_INTPR_ERR = 0x00000008, 112 SATA_DWC_INTPR_NEWBIST = 0x00000010, 113 SATA_DWC_INTPR_IPF = 0x10000000, 114 SATA_DWC_INTMR_DMATM = 0x00000001, 115 SATA_DWC_INTMR_NEWFPM = 0x00000002, 116 SATA_DWC_INTMR_PMABRTM = 0x00000004, 117 SATA_DWC_INTMR_ERRM = 0x00000008, 118 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 119 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 120 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 121 SATA_DWC_LLCR_RPDEN = 0x00000004, 122 /* This is all error bits, zero's are reserved fields. */ 123 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 124 }; 125 126 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 127 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 128 SATA_DWC_DMACR_TMOD_TXCHEN) 129 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 130 SATA_DWC_DMACR_TMOD_TXCHEN) 131 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 132 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 133 << 16) 134 struct sata_dwc_device { 135 struct device *dev; /* generic device struct */ 136 struct ata_probe_ent *pe; /* ptr to probe-ent */ 137 struct ata_host *host; 138 struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */ 139 u32 sactive_issued; 140 u32 sactive_queued; 141 struct phy *phy; 142 phys_addr_t dmadr; 143 #ifdef CONFIG_SATA_DWC_OLD_DMA 144 struct dw_dma_chip *dma; 145 #endif 146 }; 147 148 #define SATA_DWC_QCMD_MAX 32 149 150 struct sata_dwc_device_port { 151 struct sata_dwc_device *hsdev; 152 int cmd_issued[SATA_DWC_QCMD_MAX]; 153 int dma_pending[SATA_DWC_QCMD_MAX]; 154 155 /* DMA info */ 156 struct dma_chan *chan; 157 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 158 u32 dma_interrupt_count; 159 }; 160 161 /* 162 * Commonly used DWC SATA driver macros 163 */ 164 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data) 165 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data) 166 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data) 167 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data) 168 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev) 169 170 enum { 171 SATA_DWC_CMD_ISSUED_NOT = 0, 172 SATA_DWC_CMD_ISSUED_PEND = 1, 173 SATA_DWC_CMD_ISSUED_EXEC = 2, 174 SATA_DWC_CMD_ISSUED_NODATA = 3, 175 176 SATA_DWC_DMA_PENDING_NONE = 0, 177 SATA_DWC_DMA_PENDING_TX = 1, 178 SATA_DWC_DMA_PENDING_RX = 2, 179 }; 180 181 /* 182 * Prototypes 183 */ 184 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 185 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 186 u32 check_status); 187 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 188 static void sata_dwc_port_stop(struct ata_port *ap); 189 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 190 191 #ifdef CONFIG_SATA_DWC_OLD_DMA 192 193 #include <linux/platform_data/dma-dw.h> 194 #include <linux/dma/dw.h> 195 196 static struct dw_dma_slave sata_dwc_dma_dws = { 197 .src_id = 0, 198 .dst_id = 0, 199 .m_master = 1, 200 .p_master = 0, 201 }; 202 203 static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 204 { 205 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 206 207 if (dws->dma_dev != chan->device->dev) 208 return false; 209 210 chan->private = dws; 211 return true; 212 } 213 214 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) 215 { 216 struct sata_dwc_device *hsdev = hsdevp->hsdev; 217 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 218 dma_cap_mask_t mask; 219 220 dws->dma_dev = hsdev->dev; 221 222 dma_cap_zero(mask); 223 dma_cap_set(DMA_SLAVE, mask); 224 225 /* Acquire DMA channel */ 226 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 227 if (!hsdevp->chan) { 228 dev_err(hsdev->dev, "%s: dma channel unavailable\n", 229 __func__); 230 return -EAGAIN; 231 } 232 233 return 0; 234 } 235 236 static int sata_dwc_dma_init_old(struct platform_device *pdev, 237 struct sata_dwc_device *hsdev) 238 { 239 struct device_node *np = pdev->dev.of_node; 240 241 hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL); 242 if (!hsdev->dma) 243 return -ENOMEM; 244 245 hsdev->dma->dev = &pdev->dev; 246 hsdev->dma->id = pdev->id; 247 248 /* Get SATA DMA interrupt number */ 249 hsdev->dma->irq = irq_of_parse_and_map(np, 1); 250 if (hsdev->dma->irq == NO_IRQ) { 251 dev_err(&pdev->dev, "no SATA DMA irq\n"); 252 return -ENODEV; 253 } 254 255 /* Get physical SATA DMA register base address */ 256 hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1); 257 if (IS_ERR(hsdev->dma->regs)) 258 return PTR_ERR(hsdev->dma->regs); 259 260 /* Initialize AHB DMAC */ 261 return dw_dma_probe(hsdev->dma); 262 } 263 264 static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) 265 { 266 if (!hsdev->dma) 267 return; 268 269 dw_dma_remove(hsdev->dma); 270 } 271 272 #endif 273 274 static const char *get_prot_descript(u8 protocol) 275 { 276 switch (protocol) { 277 case ATA_PROT_NODATA: 278 return "ATA no data"; 279 case ATA_PROT_PIO: 280 return "ATA PIO"; 281 case ATA_PROT_DMA: 282 return "ATA DMA"; 283 case ATA_PROT_NCQ: 284 return "ATA NCQ"; 285 case ATA_PROT_NCQ_NODATA: 286 return "ATA NCQ no data"; 287 case ATAPI_PROT_NODATA: 288 return "ATAPI no data"; 289 case ATAPI_PROT_PIO: 290 return "ATAPI PIO"; 291 case ATAPI_PROT_DMA: 292 return "ATAPI DMA"; 293 default: 294 return "unknown"; 295 } 296 } 297 298 static const char *get_dma_dir_descript(int dma_dir) 299 { 300 switch ((enum dma_data_direction)dma_dir) { 301 case DMA_BIDIRECTIONAL: 302 return "bidirectional"; 303 case DMA_TO_DEVICE: 304 return "to device"; 305 case DMA_FROM_DEVICE: 306 return "from device"; 307 default: 308 return "none"; 309 } 310 } 311 312 static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) 313 { 314 dev_vdbg(ap->dev, 315 "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", 316 tf->command, get_prot_descript(tf->protocol), tf->flags, 317 tf->device); 318 dev_vdbg(ap->dev, 319 "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", 320 tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); 321 dev_vdbg(ap->dev, 322 "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 323 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 324 tf->hob_lbah); 325 } 326 327 static void dma_dwc_xfer_done(void *hsdev_instance) 328 { 329 unsigned long flags; 330 struct sata_dwc_device *hsdev = hsdev_instance; 331 struct ata_host *host = (struct ata_host *)hsdev->host; 332 struct ata_port *ap; 333 struct sata_dwc_device_port *hsdevp; 334 u8 tag = 0; 335 unsigned int port = 0; 336 337 spin_lock_irqsave(&host->lock, flags); 338 ap = host->ports[port]; 339 hsdevp = HSDEVP_FROM_AP(ap); 340 tag = ap->link.active_tag; 341 342 /* 343 * Each DMA command produces 2 interrupts. Only 344 * complete the command after both interrupts have been 345 * seen. (See sata_dwc_isr()) 346 */ 347 hsdevp->dma_interrupt_count++; 348 sata_dwc_clear_dmacr(hsdevp, tag); 349 350 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 351 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 352 tag, hsdevp->dma_pending[tag]); 353 } 354 355 if ((hsdevp->dma_interrupt_count % 2) == 0) 356 sata_dwc_dma_xfer_complete(ap, 1); 357 358 spin_unlock_irqrestore(&host->lock, flags); 359 } 360 361 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 362 { 363 struct ata_port *ap = qc->ap; 364 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 365 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 366 struct dma_slave_config sconf; 367 struct dma_async_tx_descriptor *desc; 368 369 if (qc->dma_dir == DMA_DEV_TO_MEM) { 370 sconf.src_addr = hsdev->dmadr; 371 sconf.device_fc = false; 372 } else { /* DMA_MEM_TO_DEV */ 373 sconf.dst_addr = hsdev->dmadr; 374 sconf.device_fc = false; 375 } 376 377 sconf.direction = qc->dma_dir; 378 sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 379 sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 380 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 381 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 382 383 dmaengine_slave_config(hsdevp->chan, &sconf); 384 385 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 386 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 387 qc->dma_dir, 388 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 389 390 if (!desc) 391 return NULL; 392 393 desc->callback = dma_dwc_xfer_done; 394 desc->callback_param = hsdev; 395 396 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__, 397 qc->sg, qc->n_elem, &hsdev->dmadr); 398 399 return desc; 400 } 401 402 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 403 { 404 if (scr > SCR_NOTIFICATION) { 405 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 406 __func__, scr); 407 return -EINVAL; 408 } 409 410 *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4)); 411 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 412 link->ap->print_id, scr, *val); 413 414 return 0; 415 } 416 417 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 418 { 419 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 420 link->ap->print_id, scr, val); 421 if (scr > SCR_NOTIFICATION) { 422 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 423 __func__, scr); 424 return -EINVAL; 425 } 426 sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val); 427 428 return 0; 429 } 430 431 static void clear_serror(struct ata_port *ap) 432 { 433 u32 val; 434 sata_dwc_scr_read(&ap->link, SCR_ERROR, &val); 435 sata_dwc_scr_write(&ap->link, SCR_ERROR, val); 436 } 437 438 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 439 { 440 sata_dwc_writel(&hsdev->sata_dwc_regs->intpr, 441 sata_dwc_readl(&hsdev->sata_dwc_regs->intpr)); 442 } 443 444 static u32 qcmd_tag_to_mask(u8 tag) 445 { 446 return 0x00000001 << (tag & 0x1f); 447 } 448 449 /* See ahci.c */ 450 static void sata_dwc_error_intr(struct ata_port *ap, 451 struct sata_dwc_device *hsdev, uint intpr) 452 { 453 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 454 struct ata_eh_info *ehi = &ap->link.eh_info; 455 unsigned int err_mask = 0, action = 0; 456 struct ata_queued_cmd *qc; 457 u32 serror; 458 u8 status, tag; 459 460 ata_ehi_clear_desc(ehi); 461 462 sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror); 463 status = ap->ops->sff_check_status(ap); 464 465 tag = ap->link.active_tag; 466 467 dev_err(ap->dev, 468 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 469 __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 470 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 471 472 /* Clear error register and interrupt bit */ 473 clear_serror(ap); 474 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 475 476 /* This is the only error happening now. TODO check for exact error */ 477 478 err_mask |= AC_ERR_HOST_BUS; 479 action |= ATA_EH_RESET; 480 481 /* Pass this on to EH */ 482 ehi->serror |= serror; 483 ehi->action |= action; 484 485 qc = ata_qc_from_tag(ap, tag); 486 if (qc) 487 qc->err_mask |= err_mask; 488 else 489 ehi->err_mask |= err_mask; 490 491 ata_port_abort(ap); 492 } 493 494 /* 495 * Function : sata_dwc_isr 496 * arguments : irq, void *dev_instance, struct pt_regs *regs 497 * Return value : irqreturn_t - status of IRQ 498 * This Interrupt handler called via port ops registered function. 499 * .irq_handler = sata_dwc_isr 500 */ 501 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 502 { 503 struct ata_host *host = (struct ata_host *)dev_instance; 504 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 505 struct ata_port *ap; 506 struct ata_queued_cmd *qc; 507 unsigned long flags; 508 u8 status, tag; 509 int handled, num_processed, port = 0; 510 uint intpr, sactive, sactive2, tag_mask; 511 struct sata_dwc_device_port *hsdevp; 512 hsdev->sactive_issued = 0; 513 514 spin_lock_irqsave(&host->lock, flags); 515 516 /* Read the interrupt register */ 517 intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr); 518 519 ap = host->ports[port]; 520 hsdevp = HSDEVP_FROM_AP(ap); 521 522 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 523 ap->link.active_tag); 524 525 /* Check for error interrupt */ 526 if (intpr & SATA_DWC_INTPR_ERR) { 527 sata_dwc_error_intr(ap, hsdev, intpr); 528 handled = 1; 529 goto DONE; 530 } 531 532 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 533 if (intpr & SATA_DWC_INTPR_NEWFP) { 534 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 535 536 tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr)); 537 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 538 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 539 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 540 541 hsdev->sactive_issued |= qcmd_tag_to_mask(tag); 542 543 qc = ata_qc_from_tag(ap, tag); 544 if (unlikely(!qc)) { 545 dev_err(ap->dev, "failed to get qc"); 546 handled = 1; 547 goto DONE; 548 } 549 /* 550 * Start FP DMA for NCQ command. At this point the tag is the 551 * active tag. It is the tag that matches the command about to 552 * be completed. 553 */ 554 qc->ap->link.active_tag = tag; 555 sata_dwc_bmdma_start_by_tag(qc, tag); 556 557 handled = 1; 558 goto DONE; 559 } 560 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 561 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 562 563 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 564 if (hsdev->sactive_issued == 0 && tag_mask == 0) { 565 if (ap->link.active_tag == ATA_TAG_POISON) 566 tag = 0; 567 else 568 tag = ap->link.active_tag; 569 qc = ata_qc_from_tag(ap, tag); 570 571 /* DEV interrupt w/ no active qc? */ 572 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 573 dev_err(ap->dev, 574 "%s interrupt with no active qc qc=%p\n", 575 __func__, qc); 576 ap->ops->sff_check_status(ap); 577 handled = 1; 578 goto DONE; 579 } 580 status = ap->ops->sff_check_status(ap); 581 582 qc->ap->link.active_tag = tag; 583 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 584 585 if (status & ATA_ERR) { 586 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 587 sata_dwc_qc_complete(ap, qc, 1); 588 handled = 1; 589 goto DONE; 590 } 591 592 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 593 __func__, get_prot_descript(qc->tf.protocol)); 594 DRVSTILLBUSY: 595 if (ata_is_dma(qc->tf.protocol)) { 596 /* 597 * Each DMA transaction produces 2 interrupts. The DMAC 598 * transfer complete interrupt and the SATA controller 599 * operation done interrupt. The command should be 600 * completed only after both interrupts are seen. 601 */ 602 hsdevp->dma_interrupt_count++; 603 if (hsdevp->dma_pending[tag] == \ 604 SATA_DWC_DMA_PENDING_NONE) { 605 dev_err(ap->dev, 606 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 607 __func__, intpr, status, 608 hsdevp->dma_pending[tag]); 609 } 610 611 if ((hsdevp->dma_interrupt_count % 2) == 0) 612 sata_dwc_dma_xfer_complete(ap, 1); 613 } else if (ata_is_pio(qc->tf.protocol)) { 614 ata_sff_hsm_move(ap, qc, status, 0); 615 handled = 1; 616 goto DONE; 617 } else { 618 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 619 goto DRVSTILLBUSY; 620 } 621 622 handled = 1; 623 goto DONE; 624 } 625 626 /* 627 * This is a NCQ command. At this point we need to figure out for which 628 * tags we have gotten a completion interrupt. One interrupt may serve 629 * as completion for more than one operation when commands are queued 630 * (NCQ). We need to process each completed command. 631 */ 632 633 /* process completed commands */ 634 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 635 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 636 637 if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) { 638 dev_dbg(ap->dev, 639 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 640 __func__, sactive, hsdev->sactive_issued, tag_mask); 641 } 642 643 if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) { 644 dev_warn(ap->dev, 645 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 646 sactive, hsdev->sactive_issued, tag_mask); 647 } 648 649 /* read just to clear ... not bad if currently still busy */ 650 status = ap->ops->sff_check_status(ap); 651 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 652 653 tag = 0; 654 num_processed = 0; 655 while (tag_mask) { 656 num_processed++; 657 while (!(tag_mask & 0x00000001)) { 658 tag++; 659 tag_mask <<= 1; 660 } 661 662 tag_mask &= (~0x00000001); 663 qc = ata_qc_from_tag(ap, tag); 664 if (unlikely(!qc)) { 665 dev_err(ap->dev, "failed to get qc"); 666 handled = 1; 667 goto DONE; 668 } 669 670 /* To be picked up by completion functions */ 671 qc->ap->link.active_tag = tag; 672 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 673 674 /* Let libata/scsi layers handle error */ 675 if (status & ATA_ERR) { 676 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 677 status); 678 sata_dwc_qc_complete(ap, qc, 1); 679 handled = 1; 680 goto DONE; 681 } 682 683 /* Process completed command */ 684 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 685 get_prot_descript(qc->tf.protocol)); 686 if (ata_is_dma(qc->tf.protocol)) { 687 hsdevp->dma_interrupt_count++; 688 if (hsdevp->dma_pending[tag] == \ 689 SATA_DWC_DMA_PENDING_NONE) 690 dev_warn(ap->dev, "%s: DMA not pending?\n", 691 __func__); 692 if ((hsdevp->dma_interrupt_count % 2) == 0) 693 sata_dwc_dma_xfer_complete(ap, 1); 694 } else { 695 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 696 goto STILLBUSY; 697 } 698 continue; 699 700 STILLBUSY: 701 ap->stats.idle_irq++; 702 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 703 ap->print_id); 704 } /* while tag_mask */ 705 706 /* 707 * Check to see if any commands completed while we were processing our 708 * initial set of completed commands (read status clears interrupts, 709 * so we might miss a completed command interrupt if one came in while 710 * we were processing --we read status as part of processing a completed 711 * command). 712 */ 713 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2); 714 if (sactive2 != sactive) { 715 dev_dbg(ap->dev, 716 "More completed - sactive=0x%x sactive2=0x%x\n", 717 sactive, sactive2); 718 } 719 handled = 1; 720 721 DONE: 722 spin_unlock_irqrestore(&host->lock, flags); 723 return IRQ_RETVAL(handled); 724 } 725 726 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 727 { 728 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 729 u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr); 730 731 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 732 dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr); 733 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 734 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 735 dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr); 736 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 737 } else { 738 /* 739 * This should not happen, it indicates the driver is out of 740 * sync. If it does happen, clear dmacr anyway. 741 */ 742 dev_err(hsdev->dev, 743 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 744 __func__, tag, hsdevp->dma_pending[tag], dmacr); 745 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 746 SATA_DWC_DMACR_TXRXCH_CLEAR); 747 } 748 } 749 750 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) 751 { 752 struct ata_queued_cmd *qc; 753 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 754 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 755 u8 tag = 0; 756 757 tag = ap->link.active_tag; 758 qc = ata_qc_from_tag(ap, tag); 759 if (!qc) { 760 dev_err(ap->dev, "failed to get qc"); 761 return; 762 } 763 764 #ifdef DEBUG_NCQ 765 if (tag > 0) { 766 dev_info(ap->dev, 767 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", 768 __func__, qc->hw_tag, qc->tf.command, 769 get_dma_dir_descript(qc->dma_dir), 770 get_prot_descript(qc->tf.protocol), 771 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 772 } 773 #endif 774 775 if (ata_is_dma(qc->tf.protocol)) { 776 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 777 dev_err(ap->dev, 778 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 779 __func__, 780 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 781 } 782 783 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 784 sata_dwc_qc_complete(ap, qc, check_status); 785 ap->link.active_tag = ATA_TAG_POISON; 786 } else { 787 sata_dwc_qc_complete(ap, qc, check_status); 788 } 789 } 790 791 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 792 u32 check_status) 793 { 794 u8 status = 0; 795 u32 mask = 0x0; 796 u8 tag = qc->hw_tag; 797 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 798 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 799 hsdev->sactive_queued = 0; 800 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); 801 802 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 803 dev_err(ap->dev, "TX DMA PENDING\n"); 804 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 805 dev_err(ap->dev, "RX DMA PENDING\n"); 806 dev_dbg(ap->dev, 807 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 808 qc->tf.command, status, ap->print_id, qc->tf.protocol); 809 810 /* clear active bit */ 811 mask = (~(qcmd_tag_to_mask(tag))); 812 hsdev->sactive_queued = hsdev->sactive_queued & mask; 813 hsdev->sactive_issued = hsdev->sactive_issued & mask; 814 ata_qc_complete(qc); 815 return 0; 816 } 817 818 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 819 { 820 /* Enable selective interrupts by setting the interrupt maskregister*/ 821 sata_dwc_writel(&hsdev->sata_dwc_regs->intmr, 822 SATA_DWC_INTMR_ERRM | 823 SATA_DWC_INTMR_NEWFPM | 824 SATA_DWC_INTMR_PMABRTM | 825 SATA_DWC_INTMR_DMATM); 826 /* 827 * Unmask the error bits that should trigger an error interrupt by 828 * setting the error mask register. 829 */ 830 sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 831 832 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 833 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr), 834 sata_dwc_readl(&hsdev->sata_dwc_regs->errmr)); 835 } 836 837 static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base) 838 { 839 port->cmd_addr = base + 0x00; 840 port->data_addr = base + 0x00; 841 842 port->error_addr = base + 0x04; 843 port->feature_addr = base + 0x04; 844 845 port->nsect_addr = base + 0x08; 846 847 port->lbal_addr = base + 0x0c; 848 port->lbam_addr = base + 0x10; 849 port->lbah_addr = base + 0x14; 850 851 port->device_addr = base + 0x18; 852 port->command_addr = base + 0x1c; 853 port->status_addr = base + 0x1c; 854 855 port->altstatus_addr = base + 0x20; 856 port->ctl_addr = base + 0x20; 857 } 858 859 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp) 860 { 861 struct sata_dwc_device *hsdev = hsdevp->hsdev; 862 struct device *dev = hsdev->dev; 863 864 #ifdef CONFIG_SATA_DWC_OLD_DMA 865 if (!of_find_property(dev->of_node, "dmas", NULL)) 866 return sata_dwc_dma_get_channel_old(hsdevp); 867 #endif 868 869 hsdevp->chan = dma_request_chan(dev, "sata-dma"); 870 if (IS_ERR(hsdevp->chan)) { 871 dev_err(dev, "failed to allocate dma channel: %ld\n", 872 PTR_ERR(hsdevp->chan)); 873 return PTR_ERR(hsdevp->chan); 874 } 875 876 return 0; 877 } 878 879 /* 880 * Function : sata_dwc_port_start 881 * arguments : struct ata_ioports *port 882 * Return value : returns 0 if success, error code otherwise 883 * This function allocates the scatter gather LLI table for AHB DMA 884 */ 885 static int sata_dwc_port_start(struct ata_port *ap) 886 { 887 int err = 0; 888 struct sata_dwc_device *hsdev; 889 struct sata_dwc_device_port *hsdevp = NULL; 890 struct device *pdev; 891 int i; 892 893 hsdev = HSDEV_FROM_AP(ap); 894 895 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 896 897 hsdev->host = ap->host; 898 pdev = ap->host->dev; 899 if (!pdev) { 900 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 901 err = -ENODEV; 902 goto CLEANUP; 903 } 904 905 /* Allocate Port Struct */ 906 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 907 if (!hsdevp) { 908 err = -ENOMEM; 909 goto CLEANUP; 910 } 911 hsdevp->hsdev = hsdev; 912 913 err = sata_dwc_dma_get_channel(hsdevp); 914 if (err) 915 goto CLEANUP_ALLOC; 916 917 err = phy_power_on(hsdev->phy); 918 if (err) 919 goto CLEANUP_ALLOC; 920 921 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 922 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 923 924 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 925 ap->bmdma_prd_dma = 0; 926 927 if (ap->port_no == 0) { 928 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 929 __func__); 930 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 931 SATA_DWC_DMACR_TXRXCH_CLEAR); 932 933 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 934 __func__); 935 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 936 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 937 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 938 } 939 940 /* Clear any error bits before libata starts issuing commands */ 941 clear_serror(ap); 942 ap->private_data = hsdevp; 943 dev_dbg(ap->dev, "%s: done\n", __func__); 944 return 0; 945 946 CLEANUP_ALLOC: 947 kfree(hsdevp); 948 CLEANUP: 949 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 950 return err; 951 } 952 953 static void sata_dwc_port_stop(struct ata_port *ap) 954 { 955 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 956 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 957 958 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 959 960 dmaengine_terminate_sync(hsdevp->chan); 961 dma_release_channel(hsdevp->chan); 962 phy_power_off(hsdev->phy); 963 964 kfree(hsdevp); 965 ap->private_data = NULL; 966 } 967 968 /* 969 * Function : sata_dwc_exec_command_by_tag 970 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 971 * Return value : None 972 * This function keeps track of individual command tag ids and calls 973 * ata_exec_command in libata 974 */ 975 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 976 struct ata_taskfile *tf, 977 u8 tag, u32 cmd_issued) 978 { 979 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 980 981 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 982 ata_get_cmd_descript(tf->command), tag); 983 984 hsdevp->cmd_issued[tag] = cmd_issued; 985 986 /* 987 * Clear SError before executing a new command. 988 * sata_dwc_scr_write and read can not be used here. Clearing the PM 989 * managed SError register for the disk needs to be done before the 990 * task file is loaded. 991 */ 992 clear_serror(ap); 993 ata_sff_exec_command(ap, tf); 994 } 995 996 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 997 { 998 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 999 SATA_DWC_CMD_ISSUED_PEND); 1000 } 1001 1002 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 1003 { 1004 u8 tag = qc->hw_tag; 1005 1006 if (ata_is_ncq(qc->tf.protocol)) { 1007 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1008 __func__, qc->ap->link.sactive, tag); 1009 } else { 1010 tag = 0; 1011 } 1012 sata_dwc_bmdma_setup_by_tag(qc, tag); 1013 } 1014 1015 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 1016 { 1017 int start_dma; 1018 u32 reg; 1019 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 1020 struct ata_port *ap = qc->ap; 1021 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1022 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 1023 int dir = qc->dma_dir; 1024 1025 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 1026 start_dma = 1; 1027 if (dir == DMA_TO_DEVICE) 1028 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 1029 else 1030 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 1031 } else { 1032 dev_err(ap->dev, 1033 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 1034 __func__, hsdevp->cmd_issued[tag], tag); 1035 start_dma = 0; 1036 } 1037 1038 dev_dbg(ap->dev, 1039 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", 1040 __func__, qc, tag, qc->tf.command, 1041 get_dma_dir_descript(qc->dma_dir), start_dma); 1042 sata_dwc_tf_dump(ap, &qc->tf); 1043 1044 if (start_dma) { 1045 sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); 1046 if (reg & SATA_DWC_SERROR_ERR_BITS) { 1047 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 1048 __func__, reg); 1049 } 1050 1051 if (dir == DMA_TO_DEVICE) 1052 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1053 SATA_DWC_DMACR_TXCHEN); 1054 else 1055 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1056 SATA_DWC_DMACR_RXCHEN); 1057 1058 /* Enable AHB DMA transfer on the specified channel */ 1059 dmaengine_submit(desc); 1060 dma_async_issue_pending(hsdevp->chan); 1061 } 1062 } 1063 1064 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1065 { 1066 u8 tag = qc->hw_tag; 1067 1068 if (ata_is_ncq(qc->tf.protocol)) { 1069 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1070 __func__, qc->ap->link.sactive, tag); 1071 } else { 1072 tag = 0; 1073 } 1074 dev_dbg(qc->ap->dev, "%s\n", __func__); 1075 sata_dwc_bmdma_start_by_tag(qc, tag); 1076 } 1077 1078 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1079 { 1080 u32 sactive; 1081 u8 tag = qc->hw_tag; 1082 struct ata_port *ap = qc->ap; 1083 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1084 1085 #ifdef DEBUG_NCQ 1086 if (qc->hw_tag > 0 || ap->link.sactive > 1) 1087 dev_info(ap->dev, 1088 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1089 __func__, ap->print_id, qc->tf.command, 1090 ata_get_cmd_descript(qc->tf.command), 1091 qc->hw_tag, get_prot_descript(qc->tf.protocol), 1092 ap->link.active_tag, ap->link.sactive); 1093 #endif 1094 1095 if (!ata_is_ncq(qc->tf.protocol)) 1096 tag = 0; 1097 1098 if (ata_is_dma(qc->tf.protocol)) { 1099 hsdevp->desc[tag] = dma_dwc_xfer_setup(qc); 1100 if (!hsdevp->desc[tag]) 1101 return AC_ERR_SYSTEM; 1102 } else { 1103 hsdevp->desc[tag] = NULL; 1104 } 1105 1106 if (ata_is_ncq(qc->tf.protocol)) { 1107 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 1108 sactive |= (0x00000001 << tag); 1109 sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); 1110 1111 dev_dbg(qc->ap->dev, 1112 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", 1113 __func__, tag, qc->ap->link.sactive, sactive); 1114 1115 ap->ops->sff_tf_load(ap, &qc->tf); 1116 sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, 1117 SATA_DWC_CMD_ISSUED_PEND); 1118 } else { 1119 return ata_bmdma_qc_issue(qc); 1120 } 1121 return 0; 1122 } 1123 1124 static void sata_dwc_error_handler(struct ata_port *ap) 1125 { 1126 ata_sff_error_handler(ap); 1127 } 1128 1129 static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1130 unsigned long deadline) 1131 { 1132 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1133 int ret; 1134 1135 ret = sata_sff_hardreset(link, class, deadline); 1136 1137 sata_dwc_enable_interrupts(hsdev); 1138 1139 /* Reconfigure the DMA control register */ 1140 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1141 SATA_DWC_DMACR_TXRXCH_CLEAR); 1142 1143 /* Reconfigure the DMA Burst Transaction Size register */ 1144 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 1145 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1146 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); 1147 1148 return ret; 1149 } 1150 1151 static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) 1152 { 1153 /* SATA DWC is master only */ 1154 } 1155 1156 /* 1157 * scsi mid-layer and libata interface structures 1158 */ 1159 static struct scsi_host_template sata_dwc_sht = { 1160 ATA_NCQ_SHT(DRV_NAME), 1161 /* 1162 * test-only: Currently this driver doesn't handle NCQ 1163 * correctly. We enable NCQ but set the queue depth to a 1164 * max of 1. This will get fixed in in a future release. 1165 */ 1166 .sg_tablesize = LIBATA_MAX_PRD, 1167 /* .can_queue = ATA_MAX_QUEUE, */ 1168 /* 1169 * Make sure a LLI block is not created that will span 8K max FIS 1170 * boundary. If the block spans such a FIS boundary, there is a chance 1171 * that a DMA burst will cross that boundary -- this results in an 1172 * error in the host controller. 1173 */ 1174 .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */, 1175 }; 1176 1177 static struct ata_port_operations sata_dwc_ops = { 1178 .inherits = &ata_sff_port_ops, 1179 1180 .error_handler = sata_dwc_error_handler, 1181 .hardreset = sata_dwc_hardreset, 1182 1183 .qc_issue = sata_dwc_qc_issue, 1184 1185 .scr_read = sata_dwc_scr_read, 1186 .scr_write = sata_dwc_scr_write, 1187 1188 .port_start = sata_dwc_port_start, 1189 .port_stop = sata_dwc_port_stop, 1190 1191 .sff_dev_select = sata_dwc_dev_select, 1192 1193 .bmdma_setup = sata_dwc_bmdma_setup, 1194 .bmdma_start = sata_dwc_bmdma_start, 1195 }; 1196 1197 static const struct ata_port_info sata_dwc_port_info[] = { 1198 { 1199 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1200 .pio_mask = ATA_PIO4, 1201 .udma_mask = ATA_UDMA6, 1202 .port_ops = &sata_dwc_ops, 1203 }, 1204 }; 1205 1206 static int sata_dwc_probe(struct platform_device *ofdev) 1207 { 1208 struct sata_dwc_device *hsdev; 1209 u32 idr, versionr; 1210 char *ver = (char *)&versionr; 1211 void __iomem *base; 1212 int err = 0; 1213 int irq; 1214 struct ata_host *host; 1215 struct ata_port_info pi = sata_dwc_port_info[0]; 1216 const struct ata_port_info *ppi[] = { &pi, NULL }; 1217 struct device_node *np = ofdev->dev.of_node; 1218 struct resource *res; 1219 1220 /* Allocate DWC SATA device */ 1221 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1222 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); 1223 if (!host || !hsdev) 1224 return -ENOMEM; 1225 1226 host->private_data = hsdev; 1227 1228 /* Ioremap SATA registers */ 1229 base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); 1230 if (IS_ERR(base)) 1231 return PTR_ERR(base); 1232 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1233 1234 /* Synopsys DWC SATA specific Registers */ 1235 hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; 1236 hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); 1237 1238 /* Setup port */ 1239 host->ports[0]->ioaddr.cmd_addr = base; 1240 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1241 sata_dwc_setup_port(&host->ports[0]->ioaddr, base); 1242 1243 /* Read the ID and Version Registers */ 1244 idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); 1245 versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); 1246 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", 1247 idr, ver[0], ver[1], ver[2]); 1248 1249 /* Save dev for later use in dev_xxx() routines */ 1250 hsdev->dev = &ofdev->dev; 1251 1252 /* Enable SATA Interrupts */ 1253 sata_dwc_enable_interrupts(hsdev); 1254 1255 /* Get SATA interrupt number */ 1256 irq = irq_of_parse_and_map(np, 0); 1257 if (irq == NO_IRQ) { 1258 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1259 return -ENODEV; 1260 } 1261 1262 #ifdef CONFIG_SATA_DWC_OLD_DMA 1263 if (!of_find_property(np, "dmas", NULL)) { 1264 err = sata_dwc_dma_init_old(ofdev, hsdev); 1265 if (err) 1266 return err; 1267 } 1268 #endif 1269 1270 hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); 1271 if (IS_ERR(hsdev->phy)) 1272 return PTR_ERR(hsdev->phy); 1273 1274 err = phy_init(hsdev->phy); 1275 if (err) 1276 goto error_out; 1277 1278 /* 1279 * Now, register with libATA core, this will also initiate the 1280 * device discovery process, invoking our port_start() handler & 1281 * error_handler() to execute a dummy Softreset EH session 1282 */ 1283 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1284 if (err) 1285 dev_err(&ofdev->dev, "failed to activate host"); 1286 1287 return 0; 1288 1289 error_out: 1290 phy_exit(hsdev->phy); 1291 return err; 1292 } 1293 1294 static int sata_dwc_remove(struct platform_device *ofdev) 1295 { 1296 struct device *dev = &ofdev->dev; 1297 struct ata_host *host = dev_get_drvdata(dev); 1298 struct sata_dwc_device *hsdev = host->private_data; 1299 1300 ata_host_detach(host); 1301 1302 phy_exit(hsdev->phy); 1303 1304 #ifdef CONFIG_SATA_DWC_OLD_DMA 1305 /* Free SATA DMA resources */ 1306 sata_dwc_dma_exit_old(hsdev); 1307 #endif 1308 1309 dev_dbg(&ofdev->dev, "done\n"); 1310 return 0; 1311 } 1312 1313 static const struct of_device_id sata_dwc_match[] = { 1314 { .compatible = "amcc,sata-460ex", }, 1315 {} 1316 }; 1317 MODULE_DEVICE_TABLE(of, sata_dwc_match); 1318 1319 static struct platform_driver sata_dwc_driver = { 1320 .driver = { 1321 .name = DRV_NAME, 1322 .of_match_table = sata_dwc_match, 1323 }, 1324 .probe = sata_dwc_probe, 1325 .remove = sata_dwc_remove, 1326 }; 1327 1328 module_platform_driver(sata_dwc_driver); 1329 1330 MODULE_LICENSE("GPL"); 1331 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1332 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1333 MODULE_VERSION(DRV_VERSION); 1334