xref: /openbmc/linux/drivers/ata/ahci_xgene.c (revision bc5aa3a0)
1 /*
2  * AppliedMicro X-Gene SoC SATA Host Controller Driver
3  *
4  * Copyright (c) 2014, Applied Micro Circuits Corporation
5  * Author: Loc Ho <lho@apm.com>
6  *         Tuan Phan <tphan@apm.com>
7  *         Suman Tripathi <stripathi@apm.com>
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
21  *
22  * NOTE: PM support is not currently available.
23  *
24  */
25 #include <linux/acpi.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/ahci_platform.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
31 #include <linux/of_irq.h>
32 #include <linux/phy/phy.h>
33 #include "ahci.h"
34 
35 #define DRV_NAME "xgene-ahci"
36 
37 /* Max # of disk per a controller */
38 #define MAX_AHCI_CHN_PERCTR		2
39 
40 /* MUX CSR */
41 #define SATA_ENET_CONFIG_REG		0x00000000
42 #define  CFG_SATA_ENET_SELECT_MASK	0x00000001
43 
44 /* SATA core host controller CSR */
45 #define SLVRDERRATTRIBUTES		0x00000000
46 #define SLVWRERRATTRIBUTES		0x00000004
47 #define MSTRDERRATTRIBUTES		0x00000008
48 #define MSTWRERRATTRIBUTES		0x0000000c
49 #define BUSCTLREG			0x00000014
50 #define IOFMSTRWAUX			0x00000018
51 #define INTSTATUSMASK			0x0000002c
52 #define ERRINTSTATUS			0x00000030
53 #define ERRINTSTATUSMASK		0x00000034
54 
55 /* SATA host AHCI CSR */
56 #define PORTCFG				0x000000a4
57 #define  PORTADDR_SET(dst, src) \
58 		(((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
59 #define PORTPHY1CFG		0x000000a8
60 #define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
61 		(((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
62 #define PORTPHY2CFG			0x000000ac
63 #define PORTPHY3CFG			0x000000b0
64 #define PORTPHY4CFG			0x000000b4
65 #define PORTPHY5CFG			0x000000b8
66 #define SCTL0				0x0000012C
67 #define PORTPHY5CFG_RTCHG_SET(dst, src) \
68 		(((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
69 #define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
70 		(((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
71 #define PORTAXICFG			0x000000bc
72 #define PORTAXICFG_OUTTRANS_SET(dst, src) \
73 		(((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
74 #define PORTRANSCFG			0x000000c8
75 #define PORTRANSCFG_RXWM_SET(dst, src)		\
76 		(((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
77 
78 /* SATA host controller AXI CSR */
79 #define INT_SLV_TMOMASK			0x00000010
80 
81 /* SATA diagnostic CSR */
82 #define CFG_MEM_RAM_SHUTDOWN		0x00000070
83 #define BLOCK_MEM_RDY			0x00000074
84 
85 /* Max retry for link down */
86 #define MAX_LINK_DOWN_RETRY 3
87 
88 enum xgene_ahci_version {
89 	XGENE_AHCI_V1 = 1,
90 	XGENE_AHCI_V2,
91 };
92 
93 struct xgene_ahci_context {
94 	struct ahci_host_priv *hpriv;
95 	struct device *dev;
96 	u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
97 	u32 class[MAX_AHCI_CHN_PERCTR]; /* tracking the class of device */
98 	void __iomem *csr_core;		/* Core CSR address of IP */
99 	void __iomem *csr_diag;		/* Diag CSR address of IP */
100 	void __iomem *csr_axi;		/* AXI CSR address of IP */
101 	void __iomem *csr_mux;		/* MUX CSR address of IP */
102 };
103 
104 static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
105 {
106 	dev_dbg(ctx->dev, "Release memory from shutdown\n");
107 	writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
108 	readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
109 	msleep(1);	/* reset may take up to 1ms */
110 	if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
111 		dev_err(ctx->dev, "failed to release memory from shutdown\n");
112 		return -ENODEV;
113 	}
114 	return 0;
115 }
116 
117 /**
118  * xgene_ahci_poll_reg_val- Poll a register on a specific value.
119  * @ap : ATA port of interest.
120  * @reg : Register of interest.
121  * @val : Value to be attained.
122  * @interval : waiting interval for polling.
123  * @timeout : timeout for achieving the value.
124  */
125 static int xgene_ahci_poll_reg_val(struct ata_port *ap,
126 				   void __iomem *reg, unsigned
127 				   int val, unsigned long interval,
128 				   unsigned long timeout)
129 {
130 	unsigned long deadline;
131 	unsigned int tmp;
132 
133 	tmp = ioread32(reg);
134 	deadline = ata_deadline(jiffies, timeout);
135 
136 	while (tmp != val && time_before(jiffies, deadline)) {
137 		ata_msleep(ap, interval);
138 		tmp = ioread32(reg);
139 	}
140 
141 	return tmp;
142 }
143 
144 /**
145  * xgene_ahci_restart_engine - Restart the dma engine.
146  * @ap : ATA port of interest
147  *
148  * Waits for completion of multiple commands and restarts
149  * the DMA engine inside the controller.
150  */
151 static int xgene_ahci_restart_engine(struct ata_port *ap)
152 {
153 	struct ahci_host_priv *hpriv = ap->host->private_data;
154 	struct ahci_port_priv *pp = ap->private_data;
155 	void __iomem *port_mmio = ahci_port_base(ap);
156 	u32 fbs;
157 
158 	/*
159 	 * In case of PMP multiple IDENTIFY DEVICE commands can be
160 	 * issued inside PxCI. So need to poll PxCI for the
161 	 * completion of outstanding IDENTIFY DEVICE commands before
162 	 * we restart the DMA engine.
163 	 */
164 	if (xgene_ahci_poll_reg_val(ap, port_mmio +
165 				    PORT_CMD_ISSUE, 0x0, 1, 100))
166 		  return -EBUSY;
167 
168 	ahci_stop_engine(ap);
169 	ahci_start_fis_rx(ap);
170 
171 	/*
172 	 * Enable the PxFBS.FBS_EN bit as it
173 	 * gets cleared due to stopping the engine.
174 	 */
175 	if (pp->fbs_supported) {
176 		fbs = readl(port_mmio + PORT_FBS);
177 		writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
178 		fbs = readl(port_mmio + PORT_FBS);
179 	}
180 
181 	hpriv->start_engine(ap);
182 
183 	return 0;
184 }
185 
186 /**
187  * xgene_ahci_qc_issue - Issue commands to the device
188  * @qc: Command to issue
189  *
190  * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
191  * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
192  * state machine goes into the CMFatalErrorUpdate state and locks up. By
193  * restarting the dma engine, it removes the controller out of lock up state.
194  *
195  * Due to H/W errata, the controller is unable to save the PMP
196  * field fetched from command header before sending the H2D FIS.
197  * When the device returns the PMP port field in the D2H FIS, there is
198  * a mismatch and results in command completion failure. The
199  * workaround is to write the pmp value to PxFBS.DEV field before issuing
200  * any command to PMP.
201  */
202 static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
203 {
204 	struct ata_port *ap = qc->ap;
205 	struct ahci_host_priv *hpriv = ap->host->private_data;
206 	struct xgene_ahci_context *ctx = hpriv->plat_data;
207 	int rc = 0;
208 	u32 port_fbs;
209 	void *port_mmio = ahci_port_base(ap);
210 
211 	/*
212 	 * Write the pmp value to PxFBS.DEV
213 	 * for case of Port Mulitplier.
214 	 */
215 	if (ctx->class[ap->port_no] == ATA_DEV_PMP) {
216 		port_fbs = readl(port_mmio + PORT_FBS);
217 		port_fbs &= ~PORT_FBS_DEV_MASK;
218 		port_fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
219 		writel(port_fbs, port_mmio + PORT_FBS);
220 	}
221 
222 	if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
223 	    (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET) ||
224 	    (ctx->last_cmd[ap->port_no] == ATA_CMD_SMART)))
225 		xgene_ahci_restart_engine(ap);
226 
227 	rc = ahci_qc_issue(qc);
228 
229 	/* Save the last command issued */
230 	ctx->last_cmd[ap->port_no] = qc->tf.command;
231 
232 	return rc;
233 }
234 
235 static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
236 {
237 	void __iomem *diagcsr = ctx->csr_diag;
238 
239 	return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 &&
240 	        readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF);
241 }
242 
243 /**
244  * xgene_ahci_read_id - Read ID data from the specified device
245  * @dev: device
246  * @tf: proposed taskfile
247  * @id: data buffer
248  *
249  * This custom read ID function is required due to the fact that the HW
250  * does not support DEVSLP.
251  */
252 static unsigned int xgene_ahci_read_id(struct ata_device *dev,
253 				       struct ata_taskfile *tf, u16 *id)
254 {
255 	u32 err_mask;
256 
257 	err_mask = ata_do_dev_read_id(dev, tf, id);
258 	if (err_mask)
259 		return err_mask;
260 
261 	/*
262 	 * Mask reserved area. Word78 spec of Link Power Management
263 	 * bit15-8: reserved
264 	 * bit7: NCQ autosence
265 	 * bit6: Software settings preservation supported
266 	 * bit5: reserved
267 	 * bit4: In-order sata delivery supported
268 	 * bit3: DIPM requests supported
269 	 * bit2: DMA Setup FIS Auto-Activate optimization supported
270 	 * bit1: DMA Setup FIX non-Zero buffer offsets supported
271 	 * bit0: Reserved
272 	 *
273 	 * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
274 	 */
275 	id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
276 
277 	return 0;
278 }
279 
280 static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
281 {
282 	void __iomem *mmio = ctx->hpriv->mmio;
283 	u32 val;
284 
285 	dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
286 		mmio, channel);
287 	val = readl(mmio + PORTCFG);
288 	val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
289 	writel(val, mmio + PORTCFG);
290 	readl(mmio + PORTCFG);  /* Force a barrier */
291 	/* Disable fix rate */
292 	writel(0x0001fffe, mmio + PORTPHY1CFG);
293 	readl(mmio + PORTPHY1CFG); /* Force a barrier */
294 	writel(0x28183219, mmio + PORTPHY2CFG);
295 	readl(mmio + PORTPHY2CFG); /* Force a barrier */
296 	writel(0x13081008, mmio + PORTPHY3CFG);
297 	readl(mmio + PORTPHY3CFG); /* Force a barrier */
298 	writel(0x00480815, mmio + PORTPHY4CFG);
299 	readl(mmio + PORTPHY4CFG); /* Force a barrier */
300 	/* Set window negotiation */
301 	val = readl(mmio + PORTPHY5CFG);
302 	val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
303 	writel(val, mmio + PORTPHY5CFG);
304 	readl(mmio + PORTPHY5CFG); /* Force a barrier */
305 	val = readl(mmio + PORTAXICFG);
306 	val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
307 	val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
308 	writel(val, mmio + PORTAXICFG);
309 	readl(mmio + PORTAXICFG); /* Force a barrier */
310 	/* Set the watermark threshold of the receive FIFO */
311 	val = readl(mmio + PORTRANSCFG);
312 	val = PORTRANSCFG_RXWM_SET(val, 0x30);
313 	writel(val, mmio + PORTRANSCFG);
314 }
315 
316 /**
317  * xgene_ahci_do_hardreset - Issue the actual COMRESET
318  * @link: link to reset
319  * @deadline: deadline jiffies for the operation
320  * @online: Return value to indicate if device online
321  *
322  * Due to the limitation of the hardware PHY, a difference set of setting is
323  * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
324  * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
325  * report disparity error and etc. In addition, during COMRESET, there can
326  * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
327  * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long
328  * reboot cycle regression, sometimes the PHY reports link down even if the
329  * device is present because of speed negotiation failure. so need to retry
330  * the COMRESET to get the link up. The following algorithm is followed to
331  * proper configure the hardware PHY during COMRESET:
332  *
333  * Alg Part 1:
334  * 1. Start the PHY at Gen3 speed (default setting)
335  * 2. Issue the COMRESET
336  * 3. If no link, go to Alg Part 3
337  * 4. If link up, determine if the negotiated speed matches the PHY
338  *    configured speed
339  * 5. If they matched, go to Alg Part 2
340  * 6. If they do not matched and first time, configure the PHY for the linked
341  *    up disk speed and repeat step 2
342  * 7. Go to Alg Part 2
343  *
344  * Alg Part 2:
345  * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
346  *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
347  * 2. Go to Alg Part 4
348  *
349  * Alg Part 3:
350  * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY
351  *    communication establishment failed and maximum link down attempts are
352  *    less than Max attempts 3 then goto Alg Part 1.
353  * 2. Go to Alg Part 4.
354  *
355  * Alg Part 4:
356  * 1. Clear any pending from register PORT_SCR_ERR.
357  *
358  * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
359  *       and until the underlying PHY supports an method to reset the receiver
360  *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
361  *       an warning message will be printed.
362  */
363 static int xgene_ahci_do_hardreset(struct ata_link *link,
364 				   unsigned long deadline, bool *online)
365 {
366 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
367 	struct ata_port *ap = link->ap;
368 	struct ahci_host_priv *hpriv = ap->host->private_data;
369 	struct xgene_ahci_context *ctx = hpriv->plat_data;
370 	struct ahci_port_priv *pp = ap->private_data;
371 	u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
372 	void __iomem *port_mmio = ahci_port_base(ap);
373 	struct ata_taskfile tf;
374 	int link_down_retry = 0;
375 	int rc;
376 	u32 val, sstatus;
377 
378 	do {
379 		/* clear D2H reception area to properly wait for D2H FIS */
380 		ata_tf_init(link->device, &tf);
381 		tf.command = ATA_BUSY;
382 		ata_tf_to_fis(&tf, 0, 0, d2h_fis);
383 		rc = sata_link_hardreset(link, timing, deadline, online,
384 				 ahci_check_ready);
385 		if (*online) {
386 			val = readl(port_mmio + PORT_SCR_ERR);
387 			if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
388 				dev_warn(ctx->dev, "link has error\n");
389 			break;
390 		}
391 
392 		sata_scr_read(link, SCR_STATUS, &sstatus);
393 	} while (link_down_retry++ < MAX_LINK_DOWN_RETRY &&
394 		 (sstatus & 0xff) == 0x1);
395 
396 	/* clear all errors if any pending */
397 	val = readl(port_mmio + PORT_SCR_ERR);
398 	writel(val, port_mmio + PORT_SCR_ERR);
399 
400 	return rc;
401 }
402 
403 static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
404 				unsigned long deadline)
405 {
406 	struct ata_port *ap = link->ap;
407         struct ahci_host_priv *hpriv = ap->host->private_data;
408 	void __iomem *port_mmio = ahci_port_base(ap);
409 	bool online;
410 	int rc;
411 	u32 portcmd_saved;
412 	u32 portclb_saved;
413 	u32 portclbhi_saved;
414 	u32 portrxfis_saved;
415 	u32 portrxfishi_saved;
416 
417 	/* As hardreset resets these CSR, save it to restore later */
418 	portcmd_saved = readl(port_mmio + PORT_CMD);
419 	portclb_saved = readl(port_mmio + PORT_LST_ADDR);
420 	portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
421 	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
422 	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
423 
424 	ahci_stop_engine(ap);
425 
426 	rc = xgene_ahci_do_hardreset(link, deadline, &online);
427 
428 	/* As controller hardreset clears them, restore them */
429 	writel(portcmd_saved, port_mmio + PORT_CMD);
430 	writel(portclb_saved, port_mmio + PORT_LST_ADDR);
431 	writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
432 	writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
433 	writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
434 
435 	hpriv->start_engine(ap);
436 
437 	if (online)
438 		*class = ahci_dev_classify(ap);
439 
440 	return rc;
441 }
442 
443 static void xgene_ahci_host_stop(struct ata_host *host)
444 {
445 	struct ahci_host_priv *hpriv = host->private_data;
446 
447 	ahci_platform_disable_resources(hpriv);
448 }
449 
450 /**
451  * xgene_ahci_pmp_softreset - Issue the softreset to the drives connected
452  *                            to Port Multiplier.
453  * @link: link to reset
454  * @class: Return value to indicate class of device
455  * @deadline: deadline jiffies for the operation
456  *
457  * Due to H/W errata, the controller is unable to save the PMP
458  * field fetched from command header before sending the H2D FIS.
459  * When the device returns the PMP port field in the D2H FIS, there is
460  * a mismatch and results in command completion failure. The workaround
461  * is to write the pmp value to PxFBS.DEV field before issuing any command
462  * to PMP.
463  */
464 static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
465 			  unsigned long deadline)
466 {
467 	int pmp = sata_srst_pmp(link);
468 	struct ata_port *ap = link->ap;
469 	u32 rc;
470 	void *port_mmio = ahci_port_base(ap);
471 	u32 port_fbs;
472 
473 	/*
474 	 * Set PxFBS.DEV field with pmp
475 	 * value.
476 	 */
477 	port_fbs = readl(port_mmio + PORT_FBS);
478 	port_fbs &= ~PORT_FBS_DEV_MASK;
479 	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
480 	writel(port_fbs, port_mmio + PORT_FBS);
481 
482 	rc = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
483 
484 	return rc;
485 }
486 
487 /**
488  * xgene_ahci_softreset - Issue the softreset to the drive.
489  * @link: link to reset
490  * @class: Return value to indicate class of device
491  * @deadline: deadline jiffies for the operation
492  *
493  * Due to H/W errata, the controller is unable to save the PMP
494  * field fetched from command header before sending the H2D FIS.
495  * When the device returns the PMP port field in the D2H FIS, there is
496  * a mismatch and results in command completion failure. The workaround
497  * is to write the pmp value to PxFBS.DEV field before issuing any command
498  * to PMP. Here is the algorithm to detect PMP :
499  *
500  * 1. Save the PxFBS value
501  * 2. Program PxFBS.DEV with pmp value send by framework. Framework sends
502  *    0xF for both PMP/NON-PMP initially
503  * 3. Issue softreset
504  * 4. If signature class is PMP goto 6
505  * 5. restore the original PxFBS and goto 3
506  * 6. return
507  */
508 static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
509 			  unsigned long deadline)
510 {
511 	int pmp = sata_srst_pmp(link);
512 	struct ata_port *ap = link->ap;
513 	struct ahci_host_priv *hpriv = ap->host->private_data;
514 	struct xgene_ahci_context *ctx = hpriv->plat_data;
515 	void *port_mmio = ahci_port_base(ap);
516 	u32 port_fbs;
517 	u32 port_fbs_save;
518 	u32 retry = 1;
519 	u32 rc;
520 
521 	port_fbs_save = readl(port_mmio + PORT_FBS);
522 
523 	/*
524 	 * Set PxFBS.DEV field with pmp
525 	 * value.
526 	 */
527 	port_fbs = readl(port_mmio + PORT_FBS);
528 	port_fbs &= ~PORT_FBS_DEV_MASK;
529 	port_fbs |= pmp << PORT_FBS_DEV_OFFSET;
530 	writel(port_fbs, port_mmio + PORT_FBS);
531 
532 softreset_retry:
533 	rc = ahci_do_softreset(link, class, pmp,
534 			       deadline, ahci_check_ready);
535 
536 	ctx->class[ap->port_no] = *class;
537 	if (*class != ATA_DEV_PMP) {
538 		/*
539 		 * Retry for normal drives without
540 		 * setting PxFBS.DEV field with pmp value.
541 		 */
542 		if (retry--) {
543 			writel(port_fbs_save, port_mmio + PORT_FBS);
544 			goto softreset_retry;
545 		}
546 	}
547 
548 	return rc;
549 }
550 
551 /**
552  * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
553  * @ata_host: Host that recieved the irq
554  * @irq_masked: HOST_IRQ_STAT value
555  *
556  * For hardware with broken edge trigger latch
557  * the HOST_IRQ_STAT register misses the edge interrupt
558  * when clearing of HOST_IRQ_STAT register and hardware
559  * reporting the PORT_IRQ_STAT register at the
560  * same clock cycle.
561  * As such, the algorithm below outlines the workaround.
562  *
563  * 1. Read HOST_IRQ_STAT register and save the state.
564  * 2. Clear the HOST_IRQ_STAT register.
565  * 3. Read back the HOST_IRQ_STAT register.
566  * 4. If HOST_IRQ_STAT register equals to zero, then
567  *    traverse the rest of port's PORT_IRQ_STAT register
568  *    to check if an interrupt is triggered at that point else
569  *    go to step 6.
570  * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
571  *    then update the state of HOST_IRQ_STAT saved in step 1.
572  * 6. Handle port interrupts.
573  * 7. Exit
574  */
575 static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
576 					     u32 irq_masked)
577 {
578 	struct ahci_host_priv *hpriv = host->private_data;
579 	void __iomem *port_mmio;
580 	int i;
581 
582 	if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
583 		for (i = 0; i < host->n_ports; i++) {
584 			if (irq_masked & (1 << i))
585 				continue;
586 
587 			port_mmio = ahci_port_base(host->ports[i]);
588 			if (readl(port_mmio + PORT_IRQ_STAT))
589 				irq_masked |= (1 << i);
590 		}
591 	}
592 
593 	return ahci_handle_port_intr(host, irq_masked);
594 }
595 
596 static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
597 {
598 	struct ata_host *host = dev_instance;
599 	struct ahci_host_priv *hpriv;
600 	unsigned int rc = 0;
601 	void __iomem *mmio;
602 	u32 irq_stat, irq_masked;
603 
604 	VPRINTK("ENTER\n");
605 
606 	hpriv = host->private_data;
607 	mmio = hpriv->mmio;
608 
609 	/* sigh.  0xffffffff is a valid return from h/w */
610 	irq_stat = readl(mmio + HOST_IRQ_STAT);
611 	if (!irq_stat)
612 		return IRQ_NONE;
613 
614 	irq_masked = irq_stat & hpriv->port_map;
615 
616 	spin_lock(&host->lock);
617 
618 	/*
619 	 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
620 	 * it should be cleared before all the port events are cleared.
621 	 */
622 	writel(irq_stat, mmio + HOST_IRQ_STAT);
623 
624 	rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
625 
626 	spin_unlock(&host->lock);
627 
628 	VPRINTK("EXIT\n");
629 
630 	return IRQ_RETVAL(rc);
631 }
632 
633 static struct ata_port_operations xgene_ahci_v1_ops = {
634 	.inherits = &ahci_ops,
635 	.host_stop = xgene_ahci_host_stop,
636 	.hardreset = xgene_ahci_hardreset,
637 	.read_id = xgene_ahci_read_id,
638 	.qc_issue = xgene_ahci_qc_issue,
639 	.softreset = xgene_ahci_softreset,
640 	.pmp_softreset = xgene_ahci_pmp_softreset
641 };
642 
643 static const struct ata_port_info xgene_ahci_v1_port_info = {
644 	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
645 	.pio_mask = ATA_PIO4,
646 	.udma_mask = ATA_UDMA6,
647 	.port_ops = &xgene_ahci_v1_ops,
648 };
649 
650 static struct ata_port_operations xgene_ahci_v2_ops = {
651 	.inherits = &ahci_ops,
652 	.host_stop = xgene_ahci_host_stop,
653 	.hardreset = xgene_ahci_hardreset,
654 	.read_id = xgene_ahci_read_id,
655 };
656 
657 static const struct ata_port_info xgene_ahci_v2_port_info = {
658 	.flags = AHCI_FLAG_COMMON | ATA_FLAG_PMP,
659 	.pio_mask = ATA_PIO4,
660 	.udma_mask = ATA_UDMA6,
661 	.port_ops = &xgene_ahci_v2_ops,
662 };
663 
664 static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
665 {
666 	struct xgene_ahci_context *ctx = hpriv->plat_data;
667 	int i;
668 	int rc;
669 	u32 val;
670 
671 	/* Remove IP RAM out of shutdown */
672 	rc = xgene_ahci_init_memram(ctx);
673 	if (rc)
674 		return rc;
675 
676 	for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
677 		xgene_ahci_set_phy_cfg(ctx, i);
678 
679 	/* AXI disable Mask */
680 	writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
681 	readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
682 	writel(0, ctx->csr_core + INTSTATUSMASK);
683 	val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
684 	dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
685 		INTSTATUSMASK, val);
686 
687 	writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
688 	readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
689 	writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
690 	readl(ctx->csr_axi + INT_SLV_TMOMASK);
691 
692 	/* Enable AXI Interrupt */
693 	writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
694 	writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
695 	writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
696 	writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
697 
698 	/* Enable coherency */
699 	val = readl(ctx->csr_core + BUSCTLREG);
700 	val &= ~0x00000002;     /* Enable write coherency */
701 	val &= ~0x00000001;     /* Enable read coherency */
702 	writel(val, ctx->csr_core + BUSCTLREG);
703 
704 	val = readl(ctx->csr_core + IOFMSTRWAUX);
705 	val |= (1 << 3);        /* Enable read coherency */
706 	val |= (1 << 9);        /* Enable write coherency */
707 	writel(val, ctx->csr_core + IOFMSTRWAUX);
708 	val = readl(ctx->csr_core + IOFMSTRWAUX);
709 	dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
710 		IOFMSTRWAUX, val);
711 
712 	return rc;
713 }
714 
715 static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
716 {
717 	u32 val;
718 
719 	/* Check for optional MUX resource */
720 	if (!ctx->csr_mux)
721 		return 0;
722 
723 	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
724 	val &= ~CFG_SATA_ENET_SELECT_MASK;
725 	writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
726 	val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
727 	return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
728 }
729 
730 static struct scsi_host_template ahci_platform_sht = {
731 	AHCI_SHT(DRV_NAME),
732 };
733 
734 #ifdef CONFIG_ACPI
735 static const struct acpi_device_id xgene_ahci_acpi_match[] = {
736 	{ "APMC0D0D", XGENE_AHCI_V1},
737 	{ "APMC0D32", XGENE_AHCI_V2},
738 	{},
739 };
740 MODULE_DEVICE_TABLE(acpi, xgene_ahci_acpi_match);
741 #endif
742 
743 static const struct of_device_id xgene_ahci_of_match[] = {
744 	{.compatible = "apm,xgene-ahci", .data = (void *) XGENE_AHCI_V1},
745 	{.compatible = "apm,xgene-ahci-v2", .data = (void *) XGENE_AHCI_V2},
746 	{},
747 };
748 MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
749 
750 static int xgene_ahci_probe(struct platform_device *pdev)
751 {
752 	struct device *dev = &pdev->dev;
753 	struct ahci_host_priv *hpriv;
754 	struct xgene_ahci_context *ctx;
755 	struct resource *res;
756 	const struct of_device_id *of_devid;
757 	enum xgene_ahci_version version = XGENE_AHCI_V1;
758 	const struct ata_port_info *ppi[] = { &xgene_ahci_v1_port_info,
759 					      &xgene_ahci_v2_port_info };
760 	int rc;
761 
762 	hpriv = ahci_platform_get_resources(pdev);
763 	if (IS_ERR(hpriv))
764 		return PTR_ERR(hpriv);
765 
766 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
767 	if (!ctx)
768 		return -ENOMEM;
769 
770 	hpriv->plat_data = ctx;
771 	ctx->hpriv = hpriv;
772 	ctx->dev = dev;
773 
774 	/* Retrieve the IP core resource */
775 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
776 	ctx->csr_core = devm_ioremap_resource(dev, res);
777 	if (IS_ERR(ctx->csr_core))
778 		return PTR_ERR(ctx->csr_core);
779 
780 	/* Retrieve the IP diagnostic resource */
781 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
782 	ctx->csr_diag = devm_ioremap_resource(dev, res);
783 	if (IS_ERR(ctx->csr_diag))
784 		return PTR_ERR(ctx->csr_diag);
785 
786 	/* Retrieve the IP AXI resource */
787 	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
788 	ctx->csr_axi = devm_ioremap_resource(dev, res);
789 	if (IS_ERR(ctx->csr_axi))
790 		return PTR_ERR(ctx->csr_axi);
791 
792 	/* Retrieve the optional IP mux resource */
793 	res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
794 	if (res) {
795 		void __iomem *csr = devm_ioremap_resource(dev, res);
796 		if (IS_ERR(csr))
797 			return PTR_ERR(csr);
798 
799 		ctx->csr_mux = csr;
800 	}
801 
802 	of_devid = of_match_device(xgene_ahci_of_match, dev);
803 	if (of_devid) {
804 		if (of_devid->data)
805 			version = (enum xgene_ahci_version) of_devid->data;
806 	}
807 #ifdef CONFIG_ACPI
808 	else {
809 		const struct acpi_device_id *acpi_id;
810 		struct acpi_device_info *info;
811 		acpi_status status;
812 
813 		acpi_id = acpi_match_device(xgene_ahci_acpi_match, &pdev->dev);
814 		if (!acpi_id) {
815 			dev_warn(&pdev->dev, "No node entry in ACPI table. Assume version1\n");
816 			version = XGENE_AHCI_V1;
817 		} else if (acpi_id->driver_data) {
818 			version = (enum xgene_ahci_version) acpi_id->driver_data;
819 			status = acpi_get_object_info(ACPI_HANDLE(&pdev->dev), &info);
820 			if (ACPI_FAILURE(status)) {
821 				dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
822 					__func__);
823 				version = XGENE_AHCI_V1;
824 			} else if (info->valid & ACPI_VALID_CID) {
825 				version = XGENE_AHCI_V2;
826 			}
827 		}
828 	}
829 #endif
830 
831 	dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
832 		hpriv->mmio);
833 
834 	/* Select ATA */
835 	if ((rc = xgene_ahci_mux_select(ctx))) {
836 		dev_err(dev, "SATA mux selection failed error %d\n", rc);
837 		return -ENODEV;
838 	}
839 
840 	if (xgene_ahci_is_memram_inited(ctx)) {
841 		dev_info(dev, "skip clock and PHY initialization\n");
842 		goto skip_clk_phy;
843 	}
844 
845 	/* Due to errata, HW requires full toggle transition */
846 	rc = ahci_platform_enable_clks(hpriv);
847 	if (rc)
848 		goto disable_resources;
849 	ahci_platform_disable_clks(hpriv);
850 
851 	rc = ahci_platform_enable_resources(hpriv);
852 	if (rc)
853 		goto disable_resources;
854 
855 	/* Configure the host controller */
856 	xgene_ahci_hw_init(hpriv);
857 skip_clk_phy:
858 
859 	switch (version) {
860 	case XGENE_AHCI_V1:
861 		hpriv->flags = AHCI_HFLAG_NO_NCQ;
862 		break;
863 	case XGENE_AHCI_V2:
864 		hpriv->flags |= AHCI_HFLAG_YES_FBS;
865 		hpriv->irq_handler = xgene_ahci_irq_intr;
866 		break;
867 	default:
868 		break;
869 	}
870 
871 	rc = ahci_platform_init_host(pdev, hpriv, ppi[version - 1],
872 				     &ahci_platform_sht);
873 	if (rc)
874 		goto disable_resources;
875 
876 	dev_dbg(dev, "X-Gene SATA host controller initialized\n");
877 	return 0;
878 
879 disable_resources:
880 	ahci_platform_disable_resources(hpriv);
881 	return rc;
882 }
883 
884 static struct platform_driver xgene_ahci_driver = {
885 	.probe = xgene_ahci_probe,
886 	.remove = ata_platform_remove_one,
887 	.driver = {
888 		.name = DRV_NAME,
889 		.of_match_table = xgene_ahci_of_match,
890 		.acpi_match_table = ACPI_PTR(xgene_ahci_acpi_match),
891 	},
892 };
893 
894 module_platform_driver(xgene_ahci_driver);
895 
896 MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
897 MODULE_AUTHOR("Loc Ho <lho@apm.com>");
898 MODULE_LICENSE("GPL");
899 MODULE_VERSION("0.4");
900