xref: /openbmc/linux/drivers/ata/acard-ahci.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  *  acard-ahci.c - ACard AHCI SATA support
5  *
6  *  Maintained by:  Tejun Heo <tj@kernel.org>
7  *		    Please ALWAYS copy linux-ide@vger.kernel.org
8  *		    on emails.
9  *
10  *  Copyright 2010 Red Hat, Inc.
11  *
12  * libata documentation is available via 'make {ps|pdf}docs',
13  * as Documentation/driver-api/libata.rst
14  *
15  * AHCI hardware documentation:
16  * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
17  * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/pci.h>
23 #include <linux/blkdev.h>
24 #include <linux/delay.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/device.h>
28 #include <linux/dmi.h>
29 #include <linux/gfp.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <linux/libata.h>
33 #include "ahci.h"
34 
35 #define DRV_NAME	"acard-ahci"
36 #define DRV_VERSION	"1.0"
37 
38 /*
39   Received FIS structure limited to 80h.
40 */
41 
42 #define ACARD_AHCI_RX_FIS_SZ 128
43 
44 enum {
45 	AHCI_PCI_BAR		= 5,
46 };
47 
48 enum board_ids {
49 	board_acard_ahci,
50 };
51 
52 struct acard_sg {
53 	__le32			addr;
54 	__le32			addr_hi;
55 	__le32			reserved;
56 	__le32			size;	 /* bit 31 (EOT) max==0x10000 (64k) */
57 };
58 
59 static void acard_ahci_qc_prep(struct ata_queued_cmd *qc);
60 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
61 static int acard_ahci_port_start(struct ata_port *ap);
62 static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
63 
64 #ifdef CONFIG_PM_SLEEP
65 static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
66 static int acard_ahci_pci_device_resume(struct pci_dev *pdev);
67 #endif
68 
69 static struct scsi_host_template acard_ahci_sht = {
70 	AHCI_SHT("acard-ahci"),
71 };
72 
73 static struct ata_port_operations acard_ops = {
74 	.inherits		= &ahci_ops,
75 	.qc_prep		= acard_ahci_qc_prep,
76 	.qc_fill_rtf		= acard_ahci_qc_fill_rtf,
77 	.port_start             = acard_ahci_port_start,
78 };
79 
80 #define AHCI_HFLAGS(flags)	.private_data	= (void *)(flags)
81 
82 static const struct ata_port_info acard_ahci_port_info[] = {
83 	[board_acard_ahci] =
84 	{
85 		AHCI_HFLAGS	(AHCI_HFLAG_NO_NCQ),
86 		.flags		= AHCI_FLAG_COMMON,
87 		.pio_mask	= ATA_PIO4,
88 		.udma_mask	= ATA_UDMA6,
89 		.port_ops	= &acard_ops,
90 	},
91 };
92 
93 static const struct pci_device_id acard_ahci_pci_tbl[] = {
94 	/* ACard */
95 	{ PCI_VDEVICE(ARTOP, 0x000d), board_acard_ahci }, /* ATP8620 */
96 
97 	{ }    /* terminate list */
98 };
99 
100 static struct pci_driver acard_ahci_pci_driver = {
101 	.name			= DRV_NAME,
102 	.id_table		= acard_ahci_pci_tbl,
103 	.probe			= acard_ahci_init_one,
104 	.remove			= ata_pci_remove_one,
105 #ifdef CONFIG_PM_SLEEP
106 	.suspend		= acard_ahci_pci_device_suspend,
107 	.resume			= acard_ahci_pci_device_resume,
108 #endif
109 };
110 
111 #ifdef CONFIG_PM_SLEEP
112 static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
113 {
114 	struct ata_host *host = pci_get_drvdata(pdev);
115 	struct ahci_host_priv *hpriv = host->private_data;
116 	void __iomem *mmio = hpriv->mmio;
117 	u32 ctl;
118 
119 	if (mesg.event & PM_EVENT_SUSPEND &&
120 	    hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
121 		dev_err(&pdev->dev,
122 			"BIOS update required for suspend/resume\n");
123 		return -EIO;
124 	}
125 
126 	if (mesg.event & PM_EVENT_SLEEP) {
127 		/* AHCI spec rev1.1 section 8.3.3:
128 		 * Software must disable interrupts prior to requesting a
129 		 * transition of the HBA to D3 state.
130 		 */
131 		ctl = readl(mmio + HOST_CTL);
132 		ctl &= ~HOST_IRQ_EN;
133 		writel(ctl, mmio + HOST_CTL);
134 		readl(mmio + HOST_CTL); /* flush */
135 	}
136 
137 	return ata_pci_device_suspend(pdev, mesg);
138 }
139 
140 static int acard_ahci_pci_device_resume(struct pci_dev *pdev)
141 {
142 	struct ata_host *host = pci_get_drvdata(pdev);
143 	int rc;
144 
145 	rc = ata_pci_device_do_resume(pdev);
146 	if (rc)
147 		return rc;
148 
149 	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
150 		rc = ahci_reset_controller(host);
151 		if (rc)
152 			return rc;
153 
154 		ahci_init_controller(host);
155 	}
156 
157 	ata_host_resume(host);
158 
159 	return 0;
160 }
161 #endif
162 
163 static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
164 {
165 	int rc;
166 
167 	if (using_dac &&
168 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
169 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
170 		if (rc) {
171 			rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
172 			if (rc) {
173 				dev_err(&pdev->dev,
174 					   "64-bit DMA enable failed\n");
175 				return rc;
176 			}
177 		}
178 	} else {
179 		rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
180 		if (rc) {
181 			dev_err(&pdev->dev, "32-bit DMA enable failed\n");
182 			return rc;
183 		}
184 		rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
185 		if (rc) {
186 			dev_err(&pdev->dev,
187 				"32-bit consistent DMA enable failed\n");
188 			return rc;
189 		}
190 	}
191 	return 0;
192 }
193 
194 static void acard_ahci_pci_print_info(struct ata_host *host)
195 {
196 	struct pci_dev *pdev = to_pci_dev(host->dev);
197 	u16 cc;
198 	const char *scc_s;
199 
200 	pci_read_config_word(pdev, 0x0a, &cc);
201 	if (cc == PCI_CLASS_STORAGE_IDE)
202 		scc_s = "IDE";
203 	else if (cc == PCI_CLASS_STORAGE_SATA)
204 		scc_s = "SATA";
205 	else if (cc == PCI_CLASS_STORAGE_RAID)
206 		scc_s = "RAID";
207 	else
208 		scc_s = "unknown";
209 
210 	ahci_print_info(host, scc_s);
211 }
212 
213 static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
214 {
215 	struct scatterlist *sg;
216 	struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
217 	unsigned int si, last_si = 0;
218 
219 	VPRINTK("ENTER\n");
220 
221 	/*
222 	 * Next, the S/G list.
223 	 */
224 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
225 		dma_addr_t addr = sg_dma_address(sg);
226 		u32 sg_len = sg_dma_len(sg);
227 
228 		/*
229 		 * ACard note:
230 		 * We must set an end-of-table (EOT) bit,
231 		 * and the segment cannot exceed 64k (0x10000)
232 		 */
233 		acard_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
234 		acard_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
235 		acard_sg[si].size = cpu_to_le32(sg_len);
236 		last_si = si;
237 	}
238 
239 	acard_sg[last_si].size |= cpu_to_le32(1 << 31);	/* set EOT */
240 
241 	return si;
242 }
243 
244 static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
245 {
246 	struct ata_port *ap = qc->ap;
247 	struct ahci_port_priv *pp = ap->private_data;
248 	int is_atapi = ata_is_atapi(qc->tf.protocol);
249 	void *cmd_tbl;
250 	u32 opts;
251 	const u32 cmd_fis_len = 5; /* five dwords */
252 	unsigned int n_elem;
253 
254 	/*
255 	 * Fill in command table information.  First, the header,
256 	 * a SATA Register - Host to Device command FIS.
257 	 */
258 	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
259 
260 	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
261 	if (is_atapi) {
262 		memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
263 		memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
264 	}
265 
266 	n_elem = 0;
267 	if (qc->flags & ATA_QCFLAG_DMAMAP)
268 		n_elem = acard_ahci_fill_sg(qc, cmd_tbl);
269 
270 	/*
271 	 * Fill in command slot information.
272 	 *
273 	 * ACard note: prd table length not filled in
274 	 */
275 	opts = cmd_fis_len | (qc->dev->link->pmp << 12);
276 	if (qc->tf.flags & ATA_TFLAG_WRITE)
277 		opts |= AHCI_CMD_WRITE;
278 	if (is_atapi)
279 		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
280 
281 	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
282 }
283 
284 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
285 {
286 	struct ahci_port_priv *pp = qc->ap->private_data;
287 	u8 *rx_fis = pp->rx_fis;
288 
289 	if (pp->fbs_enabled)
290 		rx_fis += qc->dev->link->pmp * ACARD_AHCI_RX_FIS_SZ;
291 
292 	/*
293 	 * After a successful execution of an ATA PIO data-in command,
294 	 * the device doesn't send D2H Reg FIS to update the TF and
295 	 * the host should take TF and E_Status from the preceding PIO
296 	 * Setup FIS.
297 	 */
298 	if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
299 	    !(qc->flags & ATA_QCFLAG_FAILED)) {
300 		ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
301 		qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15];
302 	} else
303 		ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
304 
305 	return true;
306 }
307 
308 static int acard_ahci_port_start(struct ata_port *ap)
309 {
310 	struct ahci_host_priv *hpriv = ap->host->private_data;
311 	struct device *dev = ap->host->dev;
312 	struct ahci_port_priv *pp;
313 	void *mem;
314 	dma_addr_t mem_dma;
315 	size_t dma_sz, rx_fis_sz;
316 
317 	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
318 	if (!pp)
319 		return -ENOMEM;
320 
321 	/* check FBS capability */
322 	if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
323 		void __iomem *port_mmio = ahci_port_base(ap);
324 		u32 cmd = readl(port_mmio + PORT_CMD);
325 		if (cmd & PORT_CMD_FBSCP)
326 			pp->fbs_supported = true;
327 		else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
328 			dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
329 				 ap->port_no);
330 			pp->fbs_supported = true;
331 		} else
332 			dev_warn(dev, "port %d is not capable of FBS\n",
333 				 ap->port_no);
334 	}
335 
336 	if (pp->fbs_supported) {
337 		dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
338 		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ * 16;
339 	} else {
340 		dma_sz = AHCI_PORT_PRIV_DMA_SZ;
341 		rx_fis_sz = ACARD_AHCI_RX_FIS_SZ;
342 	}
343 
344 	mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
345 	if (!mem)
346 		return -ENOMEM;
347 
348 	/*
349 	 * First item in chunk of DMA memory: 32-slot command table,
350 	 * 32 bytes each in size
351 	 */
352 	pp->cmd_slot = mem;
353 	pp->cmd_slot_dma = mem_dma;
354 
355 	mem += AHCI_CMD_SLOT_SZ;
356 	mem_dma += AHCI_CMD_SLOT_SZ;
357 
358 	/*
359 	 * Second item: Received-FIS area
360 	 */
361 	pp->rx_fis = mem;
362 	pp->rx_fis_dma = mem_dma;
363 
364 	mem += rx_fis_sz;
365 	mem_dma += rx_fis_sz;
366 
367 	/*
368 	 * Third item: data area for storing a single command
369 	 * and its scatter-gather table
370 	 */
371 	pp->cmd_tbl = mem;
372 	pp->cmd_tbl_dma = mem_dma;
373 
374 	/*
375 	 * Save off initial list of interrupts to be enabled.
376 	 * This could be changed later
377 	 */
378 	pp->intr_mask = DEF_PORT_IRQ;
379 
380 	ap->private_data = pp;
381 
382 	/* engage engines, captain */
383 	return ahci_port_resume(ap);
384 }
385 
386 static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
387 {
388 	unsigned int board_id = ent->driver_data;
389 	struct ata_port_info pi = acard_ahci_port_info[board_id];
390 	const struct ata_port_info *ppi[] = { &pi, NULL };
391 	struct device *dev = &pdev->dev;
392 	struct ahci_host_priv *hpriv;
393 	struct ata_host *host;
394 	int n_ports, i, rc;
395 
396 	VPRINTK("ENTER\n");
397 
398 	WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
399 
400 	ata_print_version_once(&pdev->dev, DRV_VERSION);
401 
402 	/* acquire resources */
403 	rc = pcim_enable_device(pdev);
404 	if (rc)
405 		return rc;
406 
407 	/* AHCI controllers often implement SFF compatible interface.
408 	 * Grab all PCI BARs just in case.
409 	 */
410 	rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
411 	if (rc == -EBUSY)
412 		pcim_pin_device(pdev);
413 	if (rc)
414 		return rc;
415 
416 	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
417 	if (!hpriv)
418 		return -ENOMEM;
419 
420 	hpriv->irq = pdev->irq;
421 	hpriv->flags |= (unsigned long)pi.private_data;
422 
423 	if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
424 		pci_enable_msi(pdev);
425 
426 	hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
427 
428 	/* save initial config */
429 	ahci_save_initial_config(&pdev->dev, hpriv);
430 
431 	/* prepare host */
432 	if (hpriv->cap & HOST_CAP_NCQ)
433 		pi.flags |= ATA_FLAG_NCQ;
434 
435 	if (hpriv->cap & HOST_CAP_PMP)
436 		pi.flags |= ATA_FLAG_PMP;
437 
438 	ahci_set_em_messages(hpriv, &pi);
439 
440 	/* CAP.NP sometimes indicate the index of the last enabled
441 	 * port, at other times, that of the last possible port, so
442 	 * determining the maximum port number requires looking at
443 	 * both CAP.NP and port_map.
444 	 */
445 	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
446 
447 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
448 	if (!host)
449 		return -ENOMEM;
450 	host->private_data = hpriv;
451 
452 	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
453 		host->flags |= ATA_HOST_PARALLEL_SCAN;
454 	else
455 		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
456 
457 	for (i = 0; i < host->n_ports; i++) {
458 		struct ata_port *ap = host->ports[i];
459 
460 		ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
461 		ata_port_pbar_desc(ap, AHCI_PCI_BAR,
462 				   0x100 + ap->port_no * 0x80, "port");
463 
464 		/* set initial link pm policy */
465 		/*
466 		ap->pm_policy = NOT_AVAILABLE;
467 		*/
468 		/* disabled/not-implemented port */
469 		if (!(hpriv->port_map & (1 << i)))
470 			ap->ops = &ata_dummy_port_ops;
471 	}
472 
473 	/* initialize adapter */
474 	rc = acard_ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
475 	if (rc)
476 		return rc;
477 
478 	rc = ahci_reset_controller(host);
479 	if (rc)
480 		return rc;
481 
482 	ahci_init_controller(host);
483 	acard_ahci_pci_print_info(host);
484 
485 	pci_set_master(pdev);
486 	return ahci_host_activate(host, &acard_ahci_sht);
487 }
488 
489 module_pci_driver(acard_ahci_pci_driver);
490 
491 MODULE_AUTHOR("Jeff Garzik");
492 MODULE_DESCRIPTION("ACard AHCI SATA low-level driver");
493 MODULE_LICENSE("GPL");
494 MODULE_DEVICE_TABLE(pci, acard_ahci_pci_tbl);
495 MODULE_VERSION(DRV_VERSION);
496