1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * drivers/ata/sata_dwc_460ex.c 4 * 5 * Synopsys DesignWare Cores (DWC) SATA host driver 6 * 7 * Author: Mark Miesfeld <mmiesfeld@amcc.com> 8 * 9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de> 10 * Copyright 2008 DENX Software Engineering 11 * 12 * Based on versions provided by AMCC and Synopsys which are: 13 * Copyright 2006 Applied Micro Circuits Corporation 14 * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED 15 */ 16 17 #ifdef CONFIG_SATA_DWC_DEBUG 18 #define DEBUG 19 #endif 20 21 #ifdef CONFIG_SATA_DWC_VDEBUG 22 #define VERBOSE_DEBUG 23 #define DEBUG_NCQ 24 #endif 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/device.h> 29 #include <linux/dmaengine.h> 30 #include <linux/of_address.h> 31 #include <linux/of_irq.h> 32 #include <linux/of_platform.h> 33 #include <linux/platform_device.h> 34 #include <linux/phy/phy.h> 35 #include <linux/libata.h> 36 #include <linux/slab.h> 37 38 #include "libata.h" 39 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 43 /* These two are defined in "libata.h" */ 44 #undef DRV_NAME 45 #undef DRV_VERSION 46 47 #define DRV_NAME "sata-dwc" 48 #define DRV_VERSION "1.3" 49 50 #define sata_dwc_writel(a, v) writel_relaxed(v, a) 51 #define sata_dwc_readl(a) readl_relaxed(a) 52 53 #ifndef NO_IRQ 54 #define NO_IRQ 0 55 #endif 56 57 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */ 58 59 enum { 60 SATA_DWC_MAX_PORTS = 1, 61 62 SATA_DWC_SCR_OFFSET = 0x24, 63 SATA_DWC_REG_OFFSET = 0x64, 64 }; 65 66 /* DWC SATA Registers */ 67 struct sata_dwc_regs { 68 u32 fptagr; /* 1st party DMA tag */ 69 u32 fpbor; /* 1st party DMA buffer offset */ 70 u32 fptcr; /* 1st party DMA Xfr count */ 71 u32 dmacr; /* DMA Control */ 72 u32 dbtsr; /* DMA Burst Transac size */ 73 u32 intpr; /* Interrupt Pending */ 74 u32 intmr; /* Interrupt Mask */ 75 u32 errmr; /* Error Mask */ 76 u32 llcr; /* Link Layer Control */ 77 u32 phycr; /* PHY Control */ 78 u32 physr; /* PHY Status */ 79 u32 rxbistpd; /* Recvd BIST pattern def register */ 80 u32 rxbistpd1; /* Recvd BIST data dword1 */ 81 u32 rxbistpd2; /* Recvd BIST pattern data dword2 */ 82 u32 txbistpd; /* Trans BIST pattern def register */ 83 u32 txbistpd1; /* Trans BIST data dword1 */ 84 u32 txbistpd2; /* Trans BIST data dword2 */ 85 u32 bistcr; /* BIST Control Register */ 86 u32 bistfctr; /* BIST FIS Count Register */ 87 u32 bistsr; /* BIST Status Register */ 88 u32 bistdecr; /* BIST Dword Error count register */ 89 u32 res[15]; /* Reserved locations */ 90 u32 testr; /* Test Register */ 91 u32 versionr; /* Version Register */ 92 u32 idr; /* ID Register */ 93 u32 unimpl[192]; /* Unimplemented */ 94 u32 dmadr[256]; /* FIFO Locations in DMA Mode */ 95 }; 96 97 enum { 98 SCR_SCONTROL_DET_ENABLE = 0x00000001, 99 SCR_SSTATUS_DET_PRESENT = 0x00000001, 100 SCR_SERROR_DIAG_X = 0x04000000, 101 /* DWC SATA Register Operations */ 102 SATA_DWC_TXFIFO_DEPTH = 0x01FF, 103 SATA_DWC_RXFIFO_DEPTH = 0x01FF, 104 SATA_DWC_DMACR_TMOD_TXCHEN = 0x00000004, 105 SATA_DWC_DMACR_TXCHEN = (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN), 106 SATA_DWC_DMACR_RXCHEN = (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN), 107 SATA_DWC_DMACR_TXRXCH_CLEAR = SATA_DWC_DMACR_TMOD_TXCHEN, 108 SATA_DWC_INTPR_DMAT = 0x00000001, 109 SATA_DWC_INTPR_NEWFP = 0x00000002, 110 SATA_DWC_INTPR_PMABRT = 0x00000004, 111 SATA_DWC_INTPR_ERR = 0x00000008, 112 SATA_DWC_INTPR_NEWBIST = 0x00000010, 113 SATA_DWC_INTPR_IPF = 0x10000000, 114 SATA_DWC_INTMR_DMATM = 0x00000001, 115 SATA_DWC_INTMR_NEWFPM = 0x00000002, 116 SATA_DWC_INTMR_PMABRTM = 0x00000004, 117 SATA_DWC_INTMR_ERRM = 0x00000008, 118 SATA_DWC_INTMR_NEWBISTM = 0x00000010, 119 SATA_DWC_LLCR_SCRAMEN = 0x00000001, 120 SATA_DWC_LLCR_DESCRAMEN = 0x00000002, 121 SATA_DWC_LLCR_RPDEN = 0x00000004, 122 /* This is all error bits, zero's are reserved fields. */ 123 SATA_DWC_SERROR_ERR_BITS = 0x0FFF0F03 124 }; 125 126 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F) 127 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\ 128 SATA_DWC_DMACR_TMOD_TXCHEN) 129 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\ 130 SATA_DWC_DMACR_TMOD_TXCHEN) 131 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH) 132 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\ 133 << 16) 134 struct sata_dwc_device { 135 struct device *dev; /* generic device struct */ 136 struct ata_probe_ent *pe; /* ptr to probe-ent */ 137 struct ata_host *host; 138 struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */ 139 u32 sactive_issued; 140 u32 sactive_queued; 141 struct phy *phy; 142 phys_addr_t dmadr; 143 #ifdef CONFIG_SATA_DWC_OLD_DMA 144 struct dw_dma_chip *dma; 145 #endif 146 }; 147 148 #define SATA_DWC_QCMD_MAX 32 149 150 struct sata_dwc_device_port { 151 struct sata_dwc_device *hsdev; 152 int cmd_issued[SATA_DWC_QCMD_MAX]; 153 int dma_pending[SATA_DWC_QCMD_MAX]; 154 155 /* DMA info */ 156 struct dma_chan *chan; 157 struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX]; 158 u32 dma_interrupt_count; 159 }; 160 161 /* 162 * Commonly used DWC SATA driver macros 163 */ 164 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data) 165 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data) 166 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data) 167 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data) 168 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev) 169 170 enum { 171 SATA_DWC_CMD_ISSUED_NOT = 0, 172 SATA_DWC_CMD_ISSUED_PEND = 1, 173 SATA_DWC_CMD_ISSUED_EXEC = 2, 174 SATA_DWC_CMD_ISSUED_NODATA = 3, 175 176 SATA_DWC_DMA_PENDING_NONE = 0, 177 SATA_DWC_DMA_PENDING_TX = 1, 178 SATA_DWC_DMA_PENDING_RX = 2, 179 }; 180 181 /* 182 * Prototypes 183 */ 184 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag); 185 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 186 u32 check_status); 187 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status); 188 static void sata_dwc_port_stop(struct ata_port *ap); 189 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag); 190 191 #ifdef CONFIG_SATA_DWC_OLD_DMA 192 193 #include <linux/platform_data/dma-dw.h> 194 #include <linux/dma/dw.h> 195 196 static struct dw_dma_slave sata_dwc_dma_dws = { 197 .src_id = 0, 198 .dst_id = 0, 199 .m_master = 1, 200 .p_master = 0, 201 }; 202 203 static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param) 204 { 205 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 206 207 if (dws->dma_dev != chan->device->dev) 208 return false; 209 210 chan->private = dws; 211 return true; 212 } 213 214 static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp) 215 { 216 struct sata_dwc_device *hsdev = hsdevp->hsdev; 217 struct dw_dma_slave *dws = &sata_dwc_dma_dws; 218 dma_cap_mask_t mask; 219 220 dws->dma_dev = hsdev->dev; 221 222 dma_cap_zero(mask); 223 dma_cap_set(DMA_SLAVE, mask); 224 225 /* Acquire DMA channel */ 226 hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp); 227 if (!hsdevp->chan) { 228 dev_err(hsdev->dev, "%s: dma channel unavailable\n", 229 __func__); 230 return -EAGAIN; 231 } 232 233 return 0; 234 } 235 236 static int sata_dwc_dma_init_old(struct platform_device *pdev, 237 struct sata_dwc_device *hsdev) 238 { 239 struct device_node *np = pdev->dev.of_node; 240 struct resource *res; 241 242 hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL); 243 if (!hsdev->dma) 244 return -ENOMEM; 245 246 hsdev->dma->dev = &pdev->dev; 247 hsdev->dma->id = pdev->id; 248 249 /* Get SATA DMA interrupt number */ 250 hsdev->dma->irq = irq_of_parse_and_map(np, 1); 251 if (hsdev->dma->irq == NO_IRQ) { 252 dev_err(&pdev->dev, "no SATA DMA irq\n"); 253 return -ENODEV; 254 } 255 256 /* Get physical SATA DMA register base address */ 257 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 258 hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res); 259 if (IS_ERR(hsdev->dma->regs)) 260 return PTR_ERR(hsdev->dma->regs); 261 262 /* Initialize AHB DMAC */ 263 return dw_dma_probe(hsdev->dma); 264 } 265 266 static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev) 267 { 268 if (!hsdev->dma) 269 return; 270 271 dw_dma_remove(hsdev->dma); 272 } 273 274 #endif 275 276 static const char *get_prot_descript(u8 protocol) 277 { 278 switch (protocol) { 279 case ATA_PROT_NODATA: 280 return "ATA no data"; 281 case ATA_PROT_PIO: 282 return "ATA PIO"; 283 case ATA_PROT_DMA: 284 return "ATA DMA"; 285 case ATA_PROT_NCQ: 286 return "ATA NCQ"; 287 case ATA_PROT_NCQ_NODATA: 288 return "ATA NCQ no data"; 289 case ATAPI_PROT_NODATA: 290 return "ATAPI no data"; 291 case ATAPI_PROT_PIO: 292 return "ATAPI PIO"; 293 case ATAPI_PROT_DMA: 294 return "ATAPI DMA"; 295 default: 296 return "unknown"; 297 } 298 } 299 300 static const char *get_dma_dir_descript(int dma_dir) 301 { 302 switch ((enum dma_data_direction)dma_dir) { 303 case DMA_BIDIRECTIONAL: 304 return "bidirectional"; 305 case DMA_TO_DEVICE: 306 return "to device"; 307 case DMA_FROM_DEVICE: 308 return "from device"; 309 default: 310 return "none"; 311 } 312 } 313 314 static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf) 315 { 316 dev_vdbg(ap->dev, 317 "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n", 318 tf->command, get_prot_descript(tf->protocol), tf->flags, 319 tf->device); 320 dev_vdbg(ap->dev, 321 "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n", 322 tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah); 323 dev_vdbg(ap->dev, 324 "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n", 325 tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam, 326 tf->hob_lbah); 327 } 328 329 static void dma_dwc_xfer_done(void *hsdev_instance) 330 { 331 unsigned long flags; 332 struct sata_dwc_device *hsdev = hsdev_instance; 333 struct ata_host *host = (struct ata_host *)hsdev->host; 334 struct ata_port *ap; 335 struct sata_dwc_device_port *hsdevp; 336 u8 tag = 0; 337 unsigned int port = 0; 338 339 spin_lock_irqsave(&host->lock, flags); 340 ap = host->ports[port]; 341 hsdevp = HSDEVP_FROM_AP(ap); 342 tag = ap->link.active_tag; 343 344 /* 345 * Each DMA command produces 2 interrupts. Only 346 * complete the command after both interrupts have been 347 * seen. (See sata_dwc_isr()) 348 */ 349 hsdevp->dma_interrupt_count++; 350 sata_dwc_clear_dmacr(hsdevp, tag); 351 352 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 353 dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n", 354 tag, hsdevp->dma_pending[tag]); 355 } 356 357 if ((hsdevp->dma_interrupt_count % 2) == 0) 358 sata_dwc_dma_xfer_complete(ap, 1); 359 360 spin_unlock_irqrestore(&host->lock, flags); 361 } 362 363 static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc) 364 { 365 struct ata_port *ap = qc->ap; 366 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 367 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 368 struct dma_slave_config sconf; 369 struct dma_async_tx_descriptor *desc; 370 371 if (qc->dma_dir == DMA_DEV_TO_MEM) { 372 sconf.src_addr = hsdev->dmadr; 373 sconf.device_fc = false; 374 } else { /* DMA_MEM_TO_DEV */ 375 sconf.dst_addr = hsdev->dmadr; 376 sconf.device_fc = false; 377 } 378 379 sconf.direction = qc->dma_dir; 380 sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 381 sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */ 382 sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 383 sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 384 385 dmaengine_slave_config(hsdevp->chan, &sconf); 386 387 /* Convert SG list to linked list of items (LLIs) for AHB DMA */ 388 desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem, 389 qc->dma_dir, 390 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 391 392 if (!desc) 393 return NULL; 394 395 desc->callback = dma_dwc_xfer_done; 396 desc->callback_param = hsdev; 397 398 dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__, 399 qc->sg, qc->n_elem, &hsdev->dmadr); 400 401 return desc; 402 } 403 404 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) 405 { 406 if (scr > SCR_NOTIFICATION) { 407 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 408 __func__, scr); 409 return -EINVAL; 410 } 411 412 *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4)); 413 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 414 link->ap->print_id, scr, *val); 415 416 return 0; 417 } 418 419 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val) 420 { 421 dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__, 422 link->ap->print_id, scr, val); 423 if (scr > SCR_NOTIFICATION) { 424 dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n", 425 __func__, scr); 426 return -EINVAL; 427 } 428 sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val); 429 430 return 0; 431 } 432 433 static void clear_serror(struct ata_port *ap) 434 { 435 u32 val; 436 sata_dwc_scr_read(&ap->link, SCR_ERROR, &val); 437 sata_dwc_scr_write(&ap->link, SCR_ERROR, val); 438 } 439 440 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit) 441 { 442 sata_dwc_writel(&hsdev->sata_dwc_regs->intpr, 443 sata_dwc_readl(&hsdev->sata_dwc_regs->intpr)); 444 } 445 446 static u32 qcmd_tag_to_mask(u8 tag) 447 { 448 return 0x00000001 << (tag & 0x1f); 449 } 450 451 /* See ahci.c */ 452 static void sata_dwc_error_intr(struct ata_port *ap, 453 struct sata_dwc_device *hsdev, uint intpr) 454 { 455 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 456 struct ata_eh_info *ehi = &ap->link.eh_info; 457 unsigned int err_mask = 0, action = 0; 458 struct ata_queued_cmd *qc; 459 u32 serror; 460 u8 status, tag; 461 462 ata_ehi_clear_desc(ehi); 463 464 sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror); 465 status = ap->ops->sff_check_status(ap); 466 467 tag = ap->link.active_tag; 468 469 dev_err(ap->dev, 470 "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d", 471 __func__, serror, intpr, status, hsdevp->dma_interrupt_count, 472 hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]); 473 474 /* Clear error register and interrupt bit */ 475 clear_serror(ap); 476 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR); 477 478 /* This is the only error happening now. TODO check for exact error */ 479 480 err_mask |= AC_ERR_HOST_BUS; 481 action |= ATA_EH_RESET; 482 483 /* Pass this on to EH */ 484 ehi->serror |= serror; 485 ehi->action |= action; 486 487 qc = ata_qc_from_tag(ap, tag); 488 if (qc) 489 qc->err_mask |= err_mask; 490 else 491 ehi->err_mask |= err_mask; 492 493 ata_port_abort(ap); 494 } 495 496 /* 497 * Function : sata_dwc_isr 498 * arguments : irq, void *dev_instance, struct pt_regs *regs 499 * Return value : irqreturn_t - status of IRQ 500 * This Interrupt handler called via port ops registered function. 501 * .irq_handler = sata_dwc_isr 502 */ 503 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance) 504 { 505 struct ata_host *host = (struct ata_host *)dev_instance; 506 struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host); 507 struct ata_port *ap; 508 struct ata_queued_cmd *qc; 509 unsigned long flags; 510 u8 status, tag; 511 int handled, num_processed, port = 0; 512 uint intpr, sactive, sactive2, tag_mask; 513 struct sata_dwc_device_port *hsdevp; 514 hsdev->sactive_issued = 0; 515 516 spin_lock_irqsave(&host->lock, flags); 517 518 /* Read the interrupt register */ 519 intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr); 520 521 ap = host->ports[port]; 522 hsdevp = HSDEVP_FROM_AP(ap); 523 524 dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, 525 ap->link.active_tag); 526 527 /* Check for error interrupt */ 528 if (intpr & SATA_DWC_INTPR_ERR) { 529 sata_dwc_error_intr(ap, hsdev, intpr); 530 handled = 1; 531 goto DONE; 532 } 533 534 /* Check for DMA SETUP FIS (FP DMA) interrupt */ 535 if (intpr & SATA_DWC_INTPR_NEWFP) { 536 clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP); 537 538 tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr)); 539 dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag); 540 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND) 541 dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag); 542 543 hsdev->sactive_issued |= qcmd_tag_to_mask(tag); 544 545 qc = ata_qc_from_tag(ap, tag); 546 /* 547 * Start FP DMA for NCQ command. At this point the tag is the 548 * active tag. It is the tag that matches the command about to 549 * be completed. 550 */ 551 qc->ap->link.active_tag = tag; 552 sata_dwc_bmdma_start_by_tag(qc, tag); 553 554 handled = 1; 555 goto DONE; 556 } 557 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 558 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 559 560 /* If no sactive issued and tag_mask is zero then this is not NCQ */ 561 if (hsdev->sactive_issued == 0 && tag_mask == 0) { 562 if (ap->link.active_tag == ATA_TAG_POISON) 563 tag = 0; 564 else 565 tag = ap->link.active_tag; 566 qc = ata_qc_from_tag(ap, tag); 567 568 /* DEV interrupt w/ no active qc? */ 569 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { 570 dev_err(ap->dev, 571 "%s interrupt with no active qc qc=%p\n", 572 __func__, qc); 573 ap->ops->sff_check_status(ap); 574 handled = 1; 575 goto DONE; 576 } 577 status = ap->ops->sff_check_status(ap); 578 579 qc->ap->link.active_tag = tag; 580 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 581 582 if (status & ATA_ERR) { 583 dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status); 584 sata_dwc_qc_complete(ap, qc, 1); 585 handled = 1; 586 goto DONE; 587 } 588 589 dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n", 590 __func__, get_prot_descript(qc->tf.protocol)); 591 DRVSTILLBUSY: 592 if (ata_is_dma(qc->tf.protocol)) { 593 /* 594 * Each DMA transaction produces 2 interrupts. The DMAC 595 * transfer complete interrupt and the SATA controller 596 * operation done interrupt. The command should be 597 * completed only after both interrupts are seen. 598 */ 599 hsdevp->dma_interrupt_count++; 600 if (hsdevp->dma_pending[tag] == \ 601 SATA_DWC_DMA_PENDING_NONE) { 602 dev_err(ap->dev, 603 "%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n", 604 __func__, intpr, status, 605 hsdevp->dma_pending[tag]); 606 } 607 608 if ((hsdevp->dma_interrupt_count % 2) == 0) 609 sata_dwc_dma_xfer_complete(ap, 1); 610 } else if (ata_is_pio(qc->tf.protocol)) { 611 ata_sff_hsm_move(ap, qc, status, 0); 612 handled = 1; 613 goto DONE; 614 } else { 615 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 616 goto DRVSTILLBUSY; 617 } 618 619 handled = 1; 620 goto DONE; 621 } 622 623 /* 624 * This is a NCQ command. At this point we need to figure out for which 625 * tags we have gotten a completion interrupt. One interrupt may serve 626 * as completion for more than one operation when commands are queued 627 * (NCQ). We need to process each completed command. 628 */ 629 630 /* process completed commands */ 631 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 632 tag_mask = (hsdev->sactive_issued | sactive) ^ sactive; 633 634 if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) { 635 dev_dbg(ap->dev, 636 "%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 637 __func__, sactive, hsdev->sactive_issued, tag_mask); 638 } 639 640 if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) { 641 dev_warn(ap->dev, 642 "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n", 643 sactive, hsdev->sactive_issued, tag_mask); 644 } 645 646 /* read just to clear ... not bad if currently still busy */ 647 status = ap->ops->sff_check_status(ap); 648 dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status); 649 650 tag = 0; 651 num_processed = 0; 652 while (tag_mask) { 653 num_processed++; 654 while (!(tag_mask & 0x00000001)) { 655 tag++; 656 tag_mask <<= 1; 657 } 658 659 tag_mask &= (~0x00000001); 660 qc = ata_qc_from_tag(ap, tag); 661 662 /* To be picked up by completion functions */ 663 qc->ap->link.active_tag = tag; 664 hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT; 665 666 /* Let libata/scsi layers handle error */ 667 if (status & ATA_ERR) { 668 dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__, 669 status); 670 sata_dwc_qc_complete(ap, qc, 1); 671 handled = 1; 672 goto DONE; 673 } 674 675 /* Process completed command */ 676 dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__, 677 get_prot_descript(qc->tf.protocol)); 678 if (ata_is_dma(qc->tf.protocol)) { 679 hsdevp->dma_interrupt_count++; 680 if (hsdevp->dma_pending[tag] == \ 681 SATA_DWC_DMA_PENDING_NONE) 682 dev_warn(ap->dev, "%s: DMA not pending?\n", 683 __func__); 684 if ((hsdevp->dma_interrupt_count % 2) == 0) 685 sata_dwc_dma_xfer_complete(ap, 1); 686 } else { 687 if (unlikely(sata_dwc_qc_complete(ap, qc, 1))) 688 goto STILLBUSY; 689 } 690 continue; 691 692 STILLBUSY: 693 ap->stats.idle_irq++; 694 dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n", 695 ap->print_id); 696 } /* while tag_mask */ 697 698 /* 699 * Check to see if any commands completed while we were processing our 700 * initial set of completed commands (read status clears interrupts, 701 * so we might miss a completed command interrupt if one came in while 702 * we were processing --we read status as part of processing a completed 703 * command). 704 */ 705 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2); 706 if (sactive2 != sactive) { 707 dev_dbg(ap->dev, 708 "More completed - sactive=0x%x sactive2=0x%x\n", 709 sactive, sactive2); 710 } 711 handled = 1; 712 713 DONE: 714 spin_unlock_irqrestore(&host->lock, flags); 715 return IRQ_RETVAL(handled); 716 } 717 718 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag) 719 { 720 struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp); 721 u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr); 722 723 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) { 724 dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr); 725 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 726 } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) { 727 dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr); 728 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr); 729 } else { 730 /* 731 * This should not happen, it indicates the driver is out of 732 * sync. If it does happen, clear dmacr anyway. 733 */ 734 dev_err(hsdev->dev, 735 "%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n", 736 __func__, tag, hsdevp->dma_pending[tag], dmacr); 737 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 738 SATA_DWC_DMACR_TXRXCH_CLEAR); 739 } 740 } 741 742 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) 743 { 744 struct ata_queued_cmd *qc; 745 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 746 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 747 u8 tag = 0; 748 749 tag = ap->link.active_tag; 750 qc = ata_qc_from_tag(ap, tag); 751 if (!qc) { 752 dev_err(ap->dev, "failed to get qc"); 753 return; 754 } 755 756 #ifdef DEBUG_NCQ 757 if (tag > 0) { 758 dev_info(ap->dev, 759 "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", 760 __func__, qc->hw_tag, qc->tf.command, 761 get_dma_dir_descript(qc->dma_dir), 762 get_prot_descript(qc->tf.protocol), 763 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 764 } 765 #endif 766 767 if (ata_is_dma(qc->tf.protocol)) { 768 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) { 769 dev_err(ap->dev, 770 "%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n", 771 __func__, 772 sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); 773 } 774 775 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE; 776 sata_dwc_qc_complete(ap, qc, check_status); 777 ap->link.active_tag = ATA_TAG_POISON; 778 } else { 779 sata_dwc_qc_complete(ap, qc, check_status); 780 } 781 } 782 783 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, 784 u32 check_status) 785 { 786 u8 status = 0; 787 u32 mask = 0x0; 788 u8 tag = qc->hw_tag; 789 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 790 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 791 hsdev->sactive_queued = 0; 792 dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status); 793 794 if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) 795 dev_err(ap->dev, "TX DMA PENDING\n"); 796 else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) 797 dev_err(ap->dev, "RX DMA PENDING\n"); 798 dev_dbg(ap->dev, 799 "QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n", 800 qc->tf.command, status, ap->print_id, qc->tf.protocol); 801 802 /* clear active bit */ 803 mask = (~(qcmd_tag_to_mask(tag))); 804 hsdev->sactive_queued = hsdev->sactive_queued & mask; 805 hsdev->sactive_issued = hsdev->sactive_issued & mask; 806 ata_qc_complete(qc); 807 return 0; 808 } 809 810 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev) 811 { 812 /* Enable selective interrupts by setting the interrupt maskregister*/ 813 sata_dwc_writel(&hsdev->sata_dwc_regs->intmr, 814 SATA_DWC_INTMR_ERRM | 815 SATA_DWC_INTMR_NEWFPM | 816 SATA_DWC_INTMR_PMABRTM | 817 SATA_DWC_INTMR_DMATM); 818 /* 819 * Unmask the error bits that should trigger an error interrupt by 820 * setting the error mask register. 821 */ 822 sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS); 823 824 dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", 825 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr), 826 sata_dwc_readl(&hsdev->sata_dwc_regs->errmr)); 827 } 828 829 static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base) 830 { 831 port->cmd_addr = base + 0x00; 832 port->data_addr = base + 0x00; 833 834 port->error_addr = base + 0x04; 835 port->feature_addr = base + 0x04; 836 837 port->nsect_addr = base + 0x08; 838 839 port->lbal_addr = base + 0x0c; 840 port->lbam_addr = base + 0x10; 841 port->lbah_addr = base + 0x14; 842 843 port->device_addr = base + 0x18; 844 port->command_addr = base + 0x1c; 845 port->status_addr = base + 0x1c; 846 847 port->altstatus_addr = base + 0x20; 848 port->ctl_addr = base + 0x20; 849 } 850 851 static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp) 852 { 853 struct sata_dwc_device *hsdev = hsdevp->hsdev; 854 struct device *dev = hsdev->dev; 855 856 #ifdef CONFIG_SATA_DWC_OLD_DMA 857 if (!of_find_property(dev->of_node, "dmas", NULL)) 858 return sata_dwc_dma_get_channel_old(hsdevp); 859 #endif 860 861 hsdevp->chan = dma_request_chan(dev, "sata-dma"); 862 if (IS_ERR(hsdevp->chan)) { 863 dev_err(dev, "failed to allocate dma channel: %ld\n", 864 PTR_ERR(hsdevp->chan)); 865 return PTR_ERR(hsdevp->chan); 866 } 867 868 return 0; 869 } 870 871 /* 872 * Function : sata_dwc_port_start 873 * arguments : struct ata_ioports *port 874 * Return value : returns 0 if success, error code otherwise 875 * This function allocates the scatter gather LLI table for AHB DMA 876 */ 877 static int sata_dwc_port_start(struct ata_port *ap) 878 { 879 int err = 0; 880 struct sata_dwc_device *hsdev; 881 struct sata_dwc_device_port *hsdevp = NULL; 882 struct device *pdev; 883 int i; 884 885 hsdev = HSDEV_FROM_AP(ap); 886 887 dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no); 888 889 hsdev->host = ap->host; 890 pdev = ap->host->dev; 891 if (!pdev) { 892 dev_err(ap->dev, "%s: no ap->host->dev\n", __func__); 893 err = -ENODEV; 894 goto CLEANUP; 895 } 896 897 /* Allocate Port Struct */ 898 hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL); 899 if (!hsdevp) { 900 err = -ENOMEM; 901 goto CLEANUP; 902 } 903 hsdevp->hsdev = hsdev; 904 905 err = sata_dwc_dma_get_channel(hsdevp); 906 if (err) 907 goto CLEANUP_ALLOC; 908 909 err = phy_power_on(hsdev->phy); 910 if (err) 911 goto CLEANUP_ALLOC; 912 913 for (i = 0; i < SATA_DWC_QCMD_MAX; i++) 914 hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT; 915 916 ap->bmdma_prd = NULL; /* set these so libata doesn't use them */ 917 ap->bmdma_prd_dma = 0; 918 919 if (ap->port_no == 0) { 920 dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n", 921 __func__); 922 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 923 SATA_DWC_DMACR_TXRXCH_CLEAR); 924 925 dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", 926 __func__); 927 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 928 (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 929 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT))); 930 } 931 932 /* Clear any error bits before libata starts issuing commands */ 933 clear_serror(ap); 934 ap->private_data = hsdevp; 935 dev_dbg(ap->dev, "%s: done\n", __func__); 936 return 0; 937 938 CLEANUP_ALLOC: 939 kfree(hsdevp); 940 CLEANUP: 941 dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); 942 return err; 943 } 944 945 static void sata_dwc_port_stop(struct ata_port *ap) 946 { 947 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 948 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 949 950 dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id); 951 952 dmaengine_terminate_sync(hsdevp->chan); 953 dma_release_channel(hsdevp->chan); 954 phy_power_off(hsdev->phy); 955 956 kfree(hsdevp); 957 ap->private_data = NULL; 958 } 959 960 /* 961 * Function : sata_dwc_exec_command_by_tag 962 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued 963 * Return value : None 964 * This function keeps track of individual command tag ids and calls 965 * ata_exec_command in libata 966 */ 967 static void sata_dwc_exec_command_by_tag(struct ata_port *ap, 968 struct ata_taskfile *tf, 969 u8 tag, u32 cmd_issued) 970 { 971 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 972 973 dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command, 974 ata_get_cmd_descript(tf->command), tag); 975 976 hsdevp->cmd_issued[tag] = cmd_issued; 977 978 /* 979 * Clear SError before executing a new command. 980 * sata_dwc_scr_write and read can not be used here. Clearing the PM 981 * managed SError register for the disk needs to be done before the 982 * task file is loaded. 983 */ 984 clear_serror(ap); 985 ata_sff_exec_command(ap, tf); 986 } 987 988 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) 989 { 990 sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, 991 SATA_DWC_CMD_ISSUED_PEND); 992 } 993 994 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) 995 { 996 u8 tag = qc->hw_tag; 997 998 if (ata_is_ncq(qc->tf.protocol)) { 999 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1000 __func__, qc->ap->link.sactive, tag); 1001 } else { 1002 tag = 0; 1003 } 1004 sata_dwc_bmdma_setup_by_tag(qc, tag); 1005 } 1006 1007 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) 1008 { 1009 int start_dma; 1010 u32 reg; 1011 struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc); 1012 struct ata_port *ap = qc->ap; 1013 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1014 struct dma_async_tx_descriptor *desc = hsdevp->desc[tag]; 1015 int dir = qc->dma_dir; 1016 1017 if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) { 1018 start_dma = 1; 1019 if (dir == DMA_TO_DEVICE) 1020 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX; 1021 else 1022 hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX; 1023 } else { 1024 dev_err(ap->dev, 1025 "%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n", 1026 __func__, hsdevp->cmd_issued[tag], tag); 1027 start_dma = 0; 1028 } 1029 1030 dev_dbg(ap->dev, 1031 "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n", 1032 __func__, qc, tag, qc->tf.command, 1033 get_dma_dir_descript(qc->dma_dir), start_dma); 1034 sata_dwc_tf_dump(ap, &qc->tf); 1035 1036 if (start_dma) { 1037 sata_dwc_scr_read(&ap->link, SCR_ERROR, ®); 1038 if (reg & SATA_DWC_SERROR_ERR_BITS) { 1039 dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n", 1040 __func__, reg); 1041 } 1042 1043 if (dir == DMA_TO_DEVICE) 1044 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1045 SATA_DWC_DMACR_TXCHEN); 1046 else 1047 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1048 SATA_DWC_DMACR_RXCHEN); 1049 1050 /* Enable AHB DMA transfer on the specified channel */ 1051 dmaengine_submit(desc); 1052 dma_async_issue_pending(hsdevp->chan); 1053 } 1054 } 1055 1056 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) 1057 { 1058 u8 tag = qc->hw_tag; 1059 1060 if (ata_is_ncq(qc->tf.protocol)) { 1061 dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", 1062 __func__, qc->ap->link.sactive, tag); 1063 } else { 1064 tag = 0; 1065 } 1066 dev_dbg(qc->ap->dev, "%s\n", __func__); 1067 sata_dwc_bmdma_start_by_tag(qc, tag); 1068 } 1069 1070 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) 1071 { 1072 u32 sactive; 1073 u8 tag = qc->hw_tag; 1074 struct ata_port *ap = qc->ap; 1075 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1076 1077 #ifdef DEBUG_NCQ 1078 if (qc->hw_tag > 0 || ap->link.sactive > 1) 1079 dev_info(ap->dev, 1080 "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", 1081 __func__, ap->print_id, qc->tf.command, 1082 ata_get_cmd_descript(qc->tf.command), 1083 qc->hw_tag, get_prot_descript(qc->tf.protocol), 1084 ap->link.active_tag, ap->link.sactive); 1085 #endif 1086 1087 if (!ata_is_ncq(qc->tf.protocol)) 1088 tag = 0; 1089 1090 if (ata_is_dma(qc->tf.protocol)) { 1091 hsdevp->desc[tag] = dma_dwc_xfer_setup(qc); 1092 if (!hsdevp->desc[tag]) 1093 return AC_ERR_SYSTEM; 1094 } else { 1095 hsdevp->desc[tag] = NULL; 1096 } 1097 1098 if (ata_is_ncq(qc->tf.protocol)) { 1099 sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive); 1100 sactive |= (0x00000001 << tag); 1101 sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive); 1102 1103 dev_dbg(qc->ap->dev, 1104 "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n", 1105 __func__, tag, qc->ap->link.sactive, sactive); 1106 1107 ap->ops->sff_tf_load(ap, &qc->tf); 1108 sata_dwc_exec_command_by_tag(ap, &qc->tf, tag, 1109 SATA_DWC_CMD_ISSUED_PEND); 1110 } else { 1111 return ata_bmdma_qc_issue(qc); 1112 } 1113 return 0; 1114 } 1115 1116 static void sata_dwc_error_handler(struct ata_port *ap) 1117 { 1118 ata_sff_error_handler(ap); 1119 } 1120 1121 static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class, 1122 unsigned long deadline) 1123 { 1124 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap); 1125 int ret; 1126 1127 ret = sata_sff_hardreset(link, class, deadline); 1128 1129 sata_dwc_enable_interrupts(hsdev); 1130 1131 /* Reconfigure the DMA control register */ 1132 sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, 1133 SATA_DWC_DMACR_TXRXCH_CLEAR); 1134 1135 /* Reconfigure the DMA Burst Transaction Size register */ 1136 sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr, 1137 SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) | 1138 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)); 1139 1140 return ret; 1141 } 1142 1143 static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device) 1144 { 1145 /* SATA DWC is master only */ 1146 } 1147 1148 /* 1149 * scsi mid-layer and libata interface structures 1150 */ 1151 static struct scsi_host_template sata_dwc_sht = { 1152 ATA_NCQ_SHT(DRV_NAME), 1153 /* 1154 * test-only: Currently this driver doesn't handle NCQ 1155 * correctly. We enable NCQ but set the queue depth to a 1156 * max of 1. This will get fixed in in a future release. 1157 */ 1158 .sg_tablesize = LIBATA_MAX_PRD, 1159 /* .can_queue = ATA_MAX_QUEUE, */ 1160 /* 1161 * Make sure a LLI block is not created that will span 8K max FIS 1162 * boundary. If the block spans such a FIS boundary, there is a chance 1163 * that a DMA burst will cross that boundary -- this results in an 1164 * error in the host controller. 1165 */ 1166 .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */, 1167 }; 1168 1169 static struct ata_port_operations sata_dwc_ops = { 1170 .inherits = &ata_sff_port_ops, 1171 1172 .error_handler = sata_dwc_error_handler, 1173 .hardreset = sata_dwc_hardreset, 1174 1175 .qc_issue = sata_dwc_qc_issue, 1176 1177 .scr_read = sata_dwc_scr_read, 1178 .scr_write = sata_dwc_scr_write, 1179 1180 .port_start = sata_dwc_port_start, 1181 .port_stop = sata_dwc_port_stop, 1182 1183 .sff_dev_select = sata_dwc_dev_select, 1184 1185 .bmdma_setup = sata_dwc_bmdma_setup, 1186 .bmdma_start = sata_dwc_bmdma_start, 1187 }; 1188 1189 static const struct ata_port_info sata_dwc_port_info[] = { 1190 { 1191 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ, 1192 .pio_mask = ATA_PIO4, 1193 .udma_mask = ATA_UDMA6, 1194 .port_ops = &sata_dwc_ops, 1195 }, 1196 }; 1197 1198 static int sata_dwc_probe(struct platform_device *ofdev) 1199 { 1200 struct sata_dwc_device *hsdev; 1201 u32 idr, versionr; 1202 char *ver = (char *)&versionr; 1203 void __iomem *base; 1204 int err = 0; 1205 int irq; 1206 struct ata_host *host; 1207 struct ata_port_info pi = sata_dwc_port_info[0]; 1208 const struct ata_port_info *ppi[] = { &pi, NULL }; 1209 struct device_node *np = ofdev->dev.of_node; 1210 struct resource *res; 1211 1212 /* Allocate DWC SATA device */ 1213 host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS); 1214 hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL); 1215 if (!host || !hsdev) 1216 return -ENOMEM; 1217 1218 host->private_data = hsdev; 1219 1220 /* Ioremap SATA registers */ 1221 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 1222 base = devm_ioremap_resource(&ofdev->dev, res); 1223 if (IS_ERR(base)) 1224 return PTR_ERR(base); 1225 dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n"); 1226 1227 /* Synopsys DWC SATA specific Registers */ 1228 hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET; 1229 hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr); 1230 1231 /* Setup port */ 1232 host->ports[0]->ioaddr.cmd_addr = base; 1233 host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET; 1234 sata_dwc_setup_port(&host->ports[0]->ioaddr, base); 1235 1236 /* Read the ID and Version Registers */ 1237 idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr); 1238 versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr); 1239 dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n", 1240 idr, ver[0], ver[1], ver[2]); 1241 1242 /* Save dev for later use in dev_xxx() routines */ 1243 hsdev->dev = &ofdev->dev; 1244 1245 /* Enable SATA Interrupts */ 1246 sata_dwc_enable_interrupts(hsdev); 1247 1248 /* Get SATA interrupt number */ 1249 irq = irq_of_parse_and_map(np, 0); 1250 if (irq == NO_IRQ) { 1251 dev_err(&ofdev->dev, "no SATA DMA irq\n"); 1252 err = -ENODEV; 1253 goto error_out; 1254 } 1255 1256 #ifdef CONFIG_SATA_DWC_OLD_DMA 1257 if (!of_find_property(np, "dmas", NULL)) { 1258 err = sata_dwc_dma_init_old(ofdev, hsdev); 1259 if (err) 1260 goto error_out; 1261 } 1262 #endif 1263 1264 hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy"); 1265 if (IS_ERR(hsdev->phy)) { 1266 err = PTR_ERR(hsdev->phy); 1267 hsdev->phy = NULL; 1268 goto error_out; 1269 } 1270 1271 err = phy_init(hsdev->phy); 1272 if (err) 1273 goto error_out; 1274 1275 /* 1276 * Now, register with libATA core, this will also initiate the 1277 * device discovery process, invoking our port_start() handler & 1278 * error_handler() to execute a dummy Softreset EH session 1279 */ 1280 err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); 1281 if (err) 1282 dev_err(&ofdev->dev, "failed to activate host"); 1283 1284 return 0; 1285 1286 error_out: 1287 phy_exit(hsdev->phy); 1288 return err; 1289 } 1290 1291 static int sata_dwc_remove(struct platform_device *ofdev) 1292 { 1293 struct device *dev = &ofdev->dev; 1294 struct ata_host *host = dev_get_drvdata(dev); 1295 struct sata_dwc_device *hsdev = host->private_data; 1296 1297 ata_host_detach(host); 1298 1299 phy_exit(hsdev->phy); 1300 1301 #ifdef CONFIG_SATA_DWC_OLD_DMA 1302 /* Free SATA DMA resources */ 1303 sata_dwc_dma_exit_old(hsdev); 1304 #endif 1305 1306 dev_dbg(&ofdev->dev, "done\n"); 1307 return 0; 1308 } 1309 1310 static const struct of_device_id sata_dwc_match[] = { 1311 { .compatible = "amcc,sata-460ex", }, 1312 {} 1313 }; 1314 MODULE_DEVICE_TABLE(of, sata_dwc_match); 1315 1316 static struct platform_driver sata_dwc_driver = { 1317 .driver = { 1318 .name = DRV_NAME, 1319 .of_match_table = sata_dwc_match, 1320 }, 1321 .probe = sata_dwc_probe, 1322 .remove = sata_dwc_remove, 1323 }; 1324 1325 module_platform_driver(sata_dwc_driver); 1326 1327 MODULE_LICENSE("GPL"); 1328 MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>"); 1329 MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver"); 1330 MODULE_VERSION(DRV_VERSION); 1331