xref: /openbmc/linux/drivers/ata/libata-sff.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /*
2  *  libata-sff.c - helper library for PCI IDE BMDMA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2006 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/libata.h>
38 
39 #include "libata.h"
40 
41 /**
42  *	ata_irq_on - Enable interrupts on a port.
43  *	@ap: Port on which interrupts are enabled.
44  *
45  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
46  *	wait for idle, clear any pending interrupts.
47  *
48  *	LOCKING:
49  *	Inherited from caller.
50  */
51 u8 ata_irq_on(struct ata_port *ap)
52 {
53 	struct ata_ioports *ioaddr = &ap->ioaddr;
54 	u8 tmp;
55 
56 	ap->ctl &= ~ATA_NIEN;
57 	ap->last_ctl = ap->ctl;
58 
59 	iowrite8(ap->ctl, ioaddr->ctl_addr);
60 	tmp = ata_wait_idle(ap);
61 
62 	ap->ops->irq_clear(ap);
63 
64 	return tmp;
65 }
66 
67 /**
68  *	ata_tf_load - send taskfile registers to host controller
69  *	@ap: Port to which output is sent
70  *	@tf: ATA taskfile register set
71  *
72  *	Outputs ATA taskfile to standard ATA host controller.
73  *
74  *	LOCKING:
75  *	Inherited from caller.
76  */
77 
78 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
79 {
80 	struct ata_ioports *ioaddr = &ap->ioaddr;
81 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
82 
83 	if (tf->ctl != ap->last_ctl) {
84 		iowrite8(tf->ctl, ioaddr->ctl_addr);
85 		ap->last_ctl = tf->ctl;
86 		ata_wait_idle(ap);
87 	}
88 
89 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
90 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
91 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
92 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
93 		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
94 		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
95 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
96 			tf->hob_feature,
97 			tf->hob_nsect,
98 			tf->hob_lbal,
99 			tf->hob_lbam,
100 			tf->hob_lbah);
101 	}
102 
103 	if (is_addr) {
104 		iowrite8(tf->feature, ioaddr->feature_addr);
105 		iowrite8(tf->nsect, ioaddr->nsect_addr);
106 		iowrite8(tf->lbal, ioaddr->lbal_addr);
107 		iowrite8(tf->lbam, ioaddr->lbam_addr);
108 		iowrite8(tf->lbah, ioaddr->lbah_addr);
109 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
110 			tf->feature,
111 			tf->nsect,
112 			tf->lbal,
113 			tf->lbam,
114 			tf->lbah);
115 	}
116 
117 	if (tf->flags & ATA_TFLAG_DEVICE) {
118 		iowrite8(tf->device, ioaddr->device_addr);
119 		VPRINTK("device 0x%X\n", tf->device);
120 	}
121 
122 	ata_wait_idle(ap);
123 }
124 
125 /**
126  *	ata_exec_command - issue ATA command to host controller
127  *	@ap: port to which command is being issued
128  *	@tf: ATA taskfile register set
129  *
130  *	Issues ATA command, with proper synchronization with interrupt
131  *	handler / other threads.
132  *
133  *	LOCKING:
134  *	spin_lock_irqsave(host lock)
135  */
136 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
137 {
138 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
139 
140 	iowrite8(tf->command, ap->ioaddr.command_addr);
141 	ata_pause(ap);
142 }
143 
144 /**
145  *	ata_tf_read - input device's ATA taskfile shadow registers
146  *	@ap: Port from which input is read
147  *	@tf: ATA taskfile register set for storing input
148  *
149  *	Reads ATA taskfile registers for currently-selected device
150  *	into @tf.
151  *
152  *	LOCKING:
153  *	Inherited from caller.
154  */
155 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
156 {
157 	struct ata_ioports *ioaddr = &ap->ioaddr;
158 
159 	tf->command = ata_chk_status(ap);
160 	tf->feature = ioread8(ioaddr->error_addr);
161 	tf->nsect = ioread8(ioaddr->nsect_addr);
162 	tf->lbal = ioread8(ioaddr->lbal_addr);
163 	tf->lbam = ioread8(ioaddr->lbam_addr);
164 	tf->lbah = ioread8(ioaddr->lbah_addr);
165 	tf->device = ioread8(ioaddr->device_addr);
166 
167 	if (tf->flags & ATA_TFLAG_LBA48) {
168 		iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
169 		tf->hob_feature = ioread8(ioaddr->error_addr);
170 		tf->hob_nsect = ioread8(ioaddr->nsect_addr);
171 		tf->hob_lbal = ioread8(ioaddr->lbal_addr);
172 		tf->hob_lbam = ioread8(ioaddr->lbam_addr);
173 		tf->hob_lbah = ioread8(ioaddr->lbah_addr);
174 		iowrite8(tf->ctl, ioaddr->ctl_addr);
175 		ap->last_ctl = tf->ctl;
176 	}
177 }
178 
179 /**
180  *	ata_check_status - Read device status reg & clear interrupt
181  *	@ap: port where the device is
182  *
183  *	Reads ATA taskfile status register for currently-selected device
184  *	and return its value. This also clears pending interrupts
185  *      from this device
186  *
187  *	LOCKING:
188  *	Inherited from caller.
189  */
190 u8 ata_check_status(struct ata_port *ap)
191 {
192 	return ioread8(ap->ioaddr.status_addr);
193 }
194 
195 /**
196  *	ata_altstatus - Read device alternate status reg
197  *	@ap: port where the device is
198  *
199  *	Reads ATA taskfile alternate status register for
200  *	currently-selected device and return its value.
201  *
202  *	Note: may NOT be used as the check_altstatus() entry in
203  *	ata_port_operations.
204  *
205  *	LOCKING:
206  *	Inherited from caller.
207  */
208 u8 ata_altstatus(struct ata_port *ap)
209 {
210 	if (ap->ops->check_altstatus)
211 		return ap->ops->check_altstatus(ap);
212 
213 	return ioread8(ap->ioaddr.altstatus_addr);
214 }
215 
216 /**
217  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
218  *	@qc: Info associated with this ATA transaction.
219  *
220  *	LOCKING:
221  *	spin_lock_irqsave(host lock)
222  */
223 void ata_bmdma_setup(struct ata_queued_cmd *qc)
224 {
225 	struct ata_port *ap = qc->ap;
226 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
227 	u8 dmactl;
228 
229 	/* load PRD table addr. */
230 	mb();	/* make sure PRD table writes are visible to controller */
231 	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
232 
233 	/* specify data direction, triple-check start bit is clear */
234 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
235 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
236 	if (!rw)
237 		dmactl |= ATA_DMA_WR;
238 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
239 
240 	/* issue r/w command */
241 	ap->ops->exec_command(ap, &qc->tf);
242 }
243 
244 /**
245  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
246  *	@qc: Info associated with this ATA transaction.
247  *
248  *	LOCKING:
249  *	spin_lock_irqsave(host lock)
250  */
251 void ata_bmdma_start (struct ata_queued_cmd *qc)
252 {
253 	struct ata_port *ap = qc->ap;
254 	u8 dmactl;
255 
256 	/* start host DMA transaction */
257 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
258 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
259 
260 	/* Strictly, one may wish to issue an ioread8() here, to
261 	 * flush the mmio write.  However, control also passes
262 	 * to the hardware at this point, and it will interrupt
263 	 * us when we are to resume control.  So, in effect,
264 	 * we don't care when the mmio write flushes.
265 	 * Further, a read of the DMA status register _immediately_
266 	 * following the write may not be what certain flaky hardware
267 	 * is expected, so I think it is best to not add a readb()
268 	 * without first all the MMIO ATA cards/mobos.
269 	 * Or maybe I'm just being paranoid.
270 	 *
271 	 * FIXME: The posting of this write means I/O starts are
272 	 * unneccessarily delayed for MMIO
273 	 */
274 }
275 
276 /**
277  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
278  *	@ap: Port associated with this ATA transaction.
279  *
280  *	Clear interrupt and error flags in DMA status register.
281  *
282  *	May be used as the irq_clear() entry in ata_port_operations.
283  *
284  *	LOCKING:
285  *	spin_lock_irqsave(host lock)
286  */
287 void ata_bmdma_irq_clear(struct ata_port *ap)
288 {
289 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
290 
291 	if (!mmio)
292 		return;
293 
294 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
295 }
296 
297 /**
298  *	ata_bmdma_status - Read PCI IDE BMDMA status
299  *	@ap: Port associated with this ATA transaction.
300  *
301  *	Read and return BMDMA status register.
302  *
303  *	May be used as the bmdma_status() entry in ata_port_operations.
304  *
305  *	LOCKING:
306  *	spin_lock_irqsave(host lock)
307  */
308 u8 ata_bmdma_status(struct ata_port *ap)
309 {
310 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
311 }
312 
313 /**
314  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
315  *	@qc: Command we are ending DMA for
316  *
317  *	Clears the ATA_DMA_START flag in the dma control register
318  *
319  *	May be used as the bmdma_stop() entry in ata_port_operations.
320  *
321  *	LOCKING:
322  *	spin_lock_irqsave(host lock)
323  */
324 void ata_bmdma_stop(struct ata_queued_cmd *qc)
325 {
326 	struct ata_port *ap = qc->ap;
327 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
328 
329 	/* clear start/stop bit */
330 	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
331 		 mmio + ATA_DMA_CMD);
332 
333 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
334 	ata_altstatus(ap);        /* dummy read */
335 }
336 
337 /**
338  *	ata_bmdma_freeze - Freeze BMDMA controller port
339  *	@ap: port to freeze
340  *
341  *	Freeze BMDMA controller port.
342  *
343  *	LOCKING:
344  *	Inherited from caller.
345  */
346 void ata_bmdma_freeze(struct ata_port *ap)
347 {
348 	struct ata_ioports *ioaddr = &ap->ioaddr;
349 
350 	ap->ctl |= ATA_NIEN;
351 	ap->last_ctl = ap->ctl;
352 
353 	iowrite8(ap->ctl, ioaddr->ctl_addr);
354 
355 	/* Under certain circumstances, some controllers raise IRQ on
356 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
357 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
358 	 */
359 	ata_chk_status(ap);
360 
361 	ap->ops->irq_clear(ap);
362 }
363 
364 /**
365  *	ata_bmdma_thaw - Thaw BMDMA controller port
366  *	@ap: port to thaw
367  *
368  *	Thaw BMDMA controller port.
369  *
370  *	LOCKING:
371  *	Inherited from caller.
372  */
373 void ata_bmdma_thaw(struct ata_port *ap)
374 {
375 	/* clear & re-enable interrupts */
376 	ata_chk_status(ap);
377 	ap->ops->irq_clear(ap);
378 	ap->ops->irq_on(ap);
379 }
380 
381 /**
382  *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
383  *	@ap: port to handle error for
384  *	@prereset: prereset method (can be NULL)
385  *	@softreset: softreset method (can be NULL)
386  *	@hardreset: hardreset method (can be NULL)
387  *	@postreset: postreset method (can be NULL)
388  *
389  *	Handle error for ATA BMDMA controller.  It can handle both
390  *	PATA and SATA controllers.  Many controllers should be able to
391  *	use this EH as-is or with some added handling before and
392  *	after.
393  *
394  *	This function is intended to be used for constructing
395  *	->error_handler callback by low level drivers.
396  *
397  *	LOCKING:
398  *	Kernel thread context (may sleep)
399  */
400 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
401 			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
402 			ata_postreset_fn_t postreset)
403 {
404 	struct ata_queued_cmd *qc;
405 	unsigned long flags;
406 	int thaw = 0;
407 
408 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
409 	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
410 		qc = NULL;
411 
412 	/* reset PIO HSM and stop DMA engine */
413 	spin_lock_irqsave(ap->lock, flags);
414 
415 	ap->hsm_task_state = HSM_ST_IDLE;
416 
417 	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
418 		   qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
419 		u8 host_stat;
420 
421 		host_stat = ap->ops->bmdma_status(ap);
422 
423 		/* BMDMA controllers indicate host bus error by
424 		 * setting DMA_ERR bit and timing out.  As it wasn't
425 		 * really a timeout event, adjust error mask and
426 		 * cancel frozen state.
427 		 */
428 		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
429 			qc->err_mask = AC_ERR_HOST_BUS;
430 			thaw = 1;
431 		}
432 
433 		ap->ops->bmdma_stop(qc);
434 	}
435 
436 	ata_altstatus(ap);
437 	ata_chk_status(ap);
438 	ap->ops->irq_clear(ap);
439 
440 	spin_unlock_irqrestore(ap->lock, flags);
441 
442 	if (thaw)
443 		ata_eh_thaw_port(ap);
444 
445 	/* PIO and DMA engines have been stopped, perform recovery */
446 	ata_do_eh(ap, prereset, softreset, hardreset, postreset);
447 }
448 
449 /**
450  *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
451  *	@ap: port to handle error for
452  *
453  *	Stock error handler for BMDMA controller.
454  *
455  *	LOCKING:
456  *	Kernel thread context (may sleep)
457  */
458 void ata_bmdma_error_handler(struct ata_port *ap)
459 {
460 	ata_reset_fn_t hardreset;
461 
462 	hardreset = NULL;
463 	if (sata_scr_valid(&ap->link))
464 		hardreset = sata_std_hardreset;
465 
466 	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
467 			   ata_std_postreset);
468 }
469 
470 /**
471  *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
472  *				      BMDMA controller
473  *	@qc: internal command to clean up
474  *
475  *	LOCKING:
476  *	Kernel thread context (may sleep)
477  */
478 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
479 {
480 	if (qc->ap->ioaddr.bmdma_addr)
481 		ata_bmdma_stop(qc);
482 }
483 
484 /**
485  *	ata_sff_port_start - Set port up for dma.
486  *	@ap: Port to initialize
487  *
488  *	Called just after data structures for each port are
489  *	initialized.  Allocates space for PRD table if the device
490  *	is DMA capable SFF.
491  *
492  *	May be used as the port_start() entry in ata_port_operations.
493  *
494  *	LOCKING:
495  *	Inherited from caller.
496  */
497 
498 int ata_sff_port_start(struct ata_port *ap)
499 {
500 	if (ap->ioaddr.bmdma_addr)
501 		return ata_port_start(ap);
502 	return 0;
503 }
504 
505 #ifdef CONFIG_PCI
506 
507 static int ata_resources_present(struct pci_dev *pdev, int port)
508 {
509 	int i;
510 
511 	/* Check the PCI resources for this channel are enabled */
512 	port = port * 2;
513 	for (i = 0; i < 2; i ++) {
514 		if (pci_resource_start(pdev, port + i) == 0 ||
515 		    pci_resource_len(pdev, port + i) == 0)
516 			return 0;
517 	}
518 	return 1;
519 }
520 
521 /**
522  *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
523  *	@host: target ATA host
524  *
525  *	Acquire PCI BMDMA resources and initialize @host accordingly.
526  *
527  *	LOCKING:
528  *	Inherited from calling layer (may sleep).
529  *
530  *	RETURNS:
531  *	0 on success, -errno otherwise.
532  */
533 int ata_pci_init_bmdma(struct ata_host *host)
534 {
535 	struct device *gdev = host->dev;
536 	struct pci_dev *pdev = to_pci_dev(gdev);
537 	int i, rc;
538 
539 	/* No BAR4 allocation: No DMA */
540 	if (pci_resource_start(pdev, 4) == 0)
541 		return 0;
542 
543 	/* TODO: If we get no DMA mask we should fall back to PIO */
544 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
545 	if (rc)
546 		return rc;
547 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
548 	if (rc)
549 		return rc;
550 
551 	/* request and iomap DMA region */
552 	rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
553 	if (rc) {
554 		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
555 		return -ENOMEM;
556 	}
557 	host->iomap = pcim_iomap_table(pdev);
558 
559 	for (i = 0; i < 2; i++) {
560 		struct ata_port *ap = host->ports[i];
561 		void __iomem *bmdma = host->iomap[4] + 8 * i;
562 
563 		if (ata_port_is_dummy(ap))
564 			continue;
565 
566 		ap->ioaddr.bmdma_addr = bmdma;
567 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
568 		    (ioread8(bmdma + 2) & 0x80))
569 			host->flags |= ATA_HOST_SIMPLEX;
570 
571 		ata_port_desc(ap, "bmdma 0x%llx",
572 			(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
573 	}
574 
575 	return 0;
576 }
577 
578 /**
579  *	ata_pci_init_sff_host - acquire native PCI ATA resources and init host
580  *	@host: target ATA host
581  *
582  *	Acquire native PCI ATA resources for @host and initialize the
583  *	first two ports of @host accordingly.  Ports marked dummy are
584  *	skipped and allocation failure makes the port dummy.
585  *
586  *	Note that native PCI resources are valid even for legacy hosts
587  *	as we fix up pdev resources array early in boot, so this
588  *	function can be used for both native and legacy SFF hosts.
589  *
590  *	LOCKING:
591  *	Inherited from calling layer (may sleep).
592  *
593  *	RETURNS:
594  *	0 if at least one port is initialized, -ENODEV if no port is
595  *	available.
596  */
597 int ata_pci_init_sff_host(struct ata_host *host)
598 {
599 	struct device *gdev = host->dev;
600 	struct pci_dev *pdev = to_pci_dev(gdev);
601 	unsigned int mask = 0;
602 	int i, rc;
603 
604 	/* request, iomap BARs and init port addresses accordingly */
605 	for (i = 0; i < 2; i++) {
606 		struct ata_port *ap = host->ports[i];
607 		int base = i * 2;
608 		void __iomem * const *iomap;
609 
610 		if (ata_port_is_dummy(ap))
611 			continue;
612 
613 		/* Discard disabled ports.  Some controllers show
614 		 * their unused channels this way.  Disabled ports are
615 		 * made dummy.
616 		 */
617 		if (!ata_resources_present(pdev, i)) {
618 			ap->ops = &ata_dummy_port_ops;
619 			continue;
620 		}
621 
622 		rc = pcim_iomap_regions(pdev, 0x3 << base, DRV_NAME);
623 		if (rc) {
624 			dev_printk(KERN_WARNING, gdev,
625 				   "failed to request/iomap BARs for port %d "
626 				   "(errno=%d)\n", i, rc);
627 			if (rc == -EBUSY)
628 				pcim_pin_device(pdev);
629 			ap->ops = &ata_dummy_port_ops;
630 			continue;
631 		}
632 		host->iomap = iomap = pcim_iomap_table(pdev);
633 
634 		ap->ioaddr.cmd_addr = iomap[base];
635 		ap->ioaddr.altstatus_addr =
636 		ap->ioaddr.ctl_addr = (void __iomem *)
637 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
638 		ata_std_ports(&ap->ioaddr);
639 
640 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
641 			(unsigned long long)pci_resource_start(pdev, base),
642 			(unsigned long long)pci_resource_start(pdev, base + 1));
643 
644 		mask |= 1 << i;
645 	}
646 
647 	if (!mask) {
648 		dev_printk(KERN_ERR, gdev, "no available native port\n");
649 		return -ENODEV;
650 	}
651 
652 	return 0;
653 }
654 
655 /**
656  *	ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
657  *	@pdev: target PCI device
658  *	@ppi: array of port_info, must be enough for two ports
659  *	@r_host: out argument for the initialized ATA host
660  *
661  *	Helper to allocate ATA host for @pdev, acquire all native PCI
662  *	resources and initialize it accordingly in one go.
663  *
664  *	LOCKING:
665  *	Inherited from calling layer (may sleep).
666  *
667  *	RETURNS:
668  *	0 on success, -errno otherwise.
669  */
670 int ata_pci_prepare_sff_host(struct pci_dev *pdev,
671 			     const struct ata_port_info * const * ppi,
672 			     struct ata_host **r_host)
673 {
674 	struct ata_host *host;
675 	int rc;
676 
677 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
678 		return -ENOMEM;
679 
680 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
681 	if (!host) {
682 		dev_printk(KERN_ERR, &pdev->dev,
683 			   "failed to allocate ATA host\n");
684 		rc = -ENOMEM;
685 		goto err_out;
686 	}
687 
688 	rc = ata_pci_init_sff_host(host);
689 	if (rc)
690 		goto err_out;
691 
692 	/* init DMA related stuff */
693 	rc = ata_pci_init_bmdma(host);
694 	if (rc)
695 		goto err_bmdma;
696 
697 	devres_remove_group(&pdev->dev, NULL);
698 	*r_host = host;
699 	return 0;
700 
701  err_bmdma:
702 	/* This is necessary because PCI and iomap resources are
703 	 * merged and releasing the top group won't release the
704 	 * acquired resources if some of those have been acquired
705 	 * before entering this function.
706 	 */
707 	pcim_iounmap_regions(pdev, 0xf);
708  err_out:
709 	devres_release_group(&pdev->dev, NULL);
710 	return rc;
711 }
712 
713 /**
714  *	ata_pci_init_one - Initialize/register PCI IDE host controller
715  *	@pdev: Controller to be initialized
716  *	@ppi: array of port_info, must be enough for two ports
717  *
718  *	This is a helper function which can be called from a driver's
719  *	xxx_init_one() probe function if the hardware uses traditional
720  *	IDE taskfile registers.
721  *
722  *	This function calls pci_enable_device(), reserves its register
723  *	regions, sets the dma mask, enables bus master mode, and calls
724  *	ata_device_add()
725  *
726  *	ASSUMPTION:
727  *	Nobody makes a single channel controller that appears solely as
728  *	the secondary legacy port on PCI.
729  *
730  *	LOCKING:
731  *	Inherited from PCI layer (may sleep).
732  *
733  *	RETURNS:
734  *	Zero on success, negative on errno-based value on error.
735  */
736 int ata_pci_init_one(struct pci_dev *pdev,
737 		     const struct ata_port_info * const * ppi)
738 {
739 	struct device *dev = &pdev->dev;
740 	const struct ata_port_info *pi = NULL;
741 	struct ata_host *host = NULL;
742 	u8 mask;
743 	int legacy_mode = 0;
744 	int i, rc;
745 
746 	DPRINTK("ENTER\n");
747 
748 	/* look up the first valid port_info */
749 	for (i = 0; i < 2 && ppi[i]; i++) {
750 		if (ppi[i]->port_ops != &ata_dummy_port_ops) {
751 			pi = ppi[i];
752 			break;
753 		}
754 	}
755 
756 	if (!pi) {
757 		dev_printk(KERN_ERR, &pdev->dev,
758 			   "no valid port_info specified\n");
759 		return -EINVAL;
760 	}
761 
762 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
763 		return -ENOMEM;
764 
765 	/* FIXME: Really for ATA it isn't safe because the device may be
766 	   multi-purpose and we want to leave it alone if it was already
767 	   enabled. Secondly for shared use as Arjan says we want refcounting
768 
769 	   Checking dev->is_enabled is insufficient as this is not set at
770 	   boot for the primary video which is BIOS enabled
771 	  */
772 
773 	rc = pcim_enable_device(pdev);
774 	if (rc)
775 		goto err_out;
776 
777 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
778 		u8 tmp8;
779 
780 		/* TODO: What if one channel is in native mode ... */
781 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
782 		mask = (1 << 2) | (1 << 0);
783 		if ((tmp8 & mask) != mask)
784 			legacy_mode = 1;
785 #if defined(CONFIG_NO_ATA_LEGACY)
786 		/* Some platforms with PCI limits cannot address compat
787 		   port space. In that case we punt if their firmware has
788 		   left a device in compatibility mode */
789 		if (legacy_mode) {
790 			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
791 			rc = -EOPNOTSUPP;
792 			goto err_out;
793 		}
794 #endif
795 	}
796 
797 	/* prepare host */
798 	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
799 	if (rc)
800 		goto err_out;
801 
802 	pci_set_master(pdev);
803 
804 	/* start host and request IRQ */
805 	rc = ata_host_start(host);
806 	if (rc)
807 		goto err_out;
808 
809 	if (!legacy_mode) {
810 		rc = devm_request_irq(dev, pdev->irq, pi->port_ops->irq_handler,
811 				      IRQF_SHARED, DRV_NAME, host);
812 		if (rc)
813 			goto err_out;
814 
815 		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
816 		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
817 	} else {
818 		if (!ata_port_is_dummy(host->ports[0])) {
819 			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
820 					      pi->port_ops->irq_handler,
821 					      IRQF_SHARED, DRV_NAME, host);
822 			if (rc)
823 				goto err_out;
824 
825 			ata_port_desc(host->ports[0], "irq %d",
826 				      ATA_PRIMARY_IRQ(pdev));
827 		}
828 
829 		if (!ata_port_is_dummy(host->ports[1])) {
830 			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
831 					      pi->port_ops->irq_handler,
832 					      IRQF_SHARED, DRV_NAME, host);
833 			if (rc)
834 				goto err_out;
835 
836 			ata_port_desc(host->ports[1], "irq %d",
837 				      ATA_SECONDARY_IRQ(pdev));
838 		}
839 	}
840 
841 	/* register */
842 	rc = ata_host_register(host, pi->sht);
843 	if (rc)
844 		goto err_out;
845 
846 	devres_remove_group(dev, NULL);
847 	return 0;
848 
849 err_out:
850 	devres_release_group(dev, NULL);
851 	return rc;
852 }
853 
854 /**
855  *	ata_pci_clear_simplex	-	attempt to kick device out of simplex
856  *	@pdev: PCI device
857  *
858  *	Some PCI ATA devices report simplex mode but in fact can be told to
859  *	enter non simplex mode. This implements the necessary logic to
860  *	perform the task on such devices. Calling it on other devices will
861  *	have -undefined- behaviour.
862  */
863 
864 int ata_pci_clear_simplex(struct pci_dev *pdev)
865 {
866 	unsigned long bmdma = pci_resource_start(pdev, 4);
867 	u8 simplex;
868 
869 	if (bmdma == 0)
870 		return -ENOENT;
871 
872 	simplex = inb(bmdma + 0x02);
873 	outb(simplex & 0x60, bmdma + 0x02);
874 	simplex = inb(bmdma + 0x02);
875 	if (simplex & 0x80)
876 		return -EOPNOTSUPP;
877 	return 0;
878 }
879 
880 unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
881 {
882 	/* Filter out DMA modes if the device has been configured by
883 	   the BIOS as PIO only */
884 
885 	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
886 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
887 	return xfer_mask;
888 }
889 
890 #endif /* CONFIG_PCI */
891 
892