xref: /openbmc/linux/drivers/ata/libata-sff.c (revision a1e58bbd)
1 /*
2  *  libata-sff.c - helper library for PCI IDE BMDMA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2006 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2006 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/libata.h>
38 
39 #include "libata.h"
40 
41 /**
42  *	ata_irq_on - Enable interrupts on a port.
43  *	@ap: Port on which interrupts are enabled.
44  *
45  *	Enable interrupts on a legacy IDE device using MMIO or PIO,
46  *	wait for idle, clear any pending interrupts.
47  *
48  *	LOCKING:
49  *	Inherited from caller.
50  */
51 u8 ata_irq_on(struct ata_port *ap)
52 {
53 	struct ata_ioports *ioaddr = &ap->ioaddr;
54 	u8 tmp;
55 
56 	ap->ctl &= ~ATA_NIEN;
57 	ap->last_ctl = ap->ctl;
58 
59 	if (ioaddr->ctl_addr)
60 		iowrite8(ap->ctl, ioaddr->ctl_addr);
61 	tmp = ata_wait_idle(ap);
62 
63 	ap->ops->irq_clear(ap);
64 
65 	return tmp;
66 }
67 
68 /**
69  *	ata_tf_load - send taskfile registers to host controller
70  *	@ap: Port to which output is sent
71  *	@tf: ATA taskfile register set
72  *
73  *	Outputs ATA taskfile to standard ATA host controller.
74  *
75  *	LOCKING:
76  *	Inherited from caller.
77  */
78 
79 void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
80 {
81 	struct ata_ioports *ioaddr = &ap->ioaddr;
82 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
83 
84 	if (tf->ctl != ap->last_ctl) {
85 		if (ioaddr->ctl_addr)
86 			iowrite8(tf->ctl, ioaddr->ctl_addr);
87 		ap->last_ctl = tf->ctl;
88 		ata_wait_idle(ap);
89 	}
90 
91 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
92 		WARN_ON(!ioaddr->ctl_addr);
93 		iowrite8(tf->hob_feature, ioaddr->feature_addr);
94 		iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
95 		iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
96 		iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
97 		iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
98 		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
99 			tf->hob_feature,
100 			tf->hob_nsect,
101 			tf->hob_lbal,
102 			tf->hob_lbam,
103 			tf->hob_lbah);
104 	}
105 
106 	if (is_addr) {
107 		iowrite8(tf->feature, ioaddr->feature_addr);
108 		iowrite8(tf->nsect, ioaddr->nsect_addr);
109 		iowrite8(tf->lbal, ioaddr->lbal_addr);
110 		iowrite8(tf->lbam, ioaddr->lbam_addr);
111 		iowrite8(tf->lbah, ioaddr->lbah_addr);
112 		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
113 			tf->feature,
114 			tf->nsect,
115 			tf->lbal,
116 			tf->lbam,
117 			tf->lbah);
118 	}
119 
120 	if (tf->flags & ATA_TFLAG_DEVICE) {
121 		iowrite8(tf->device, ioaddr->device_addr);
122 		VPRINTK("device 0x%X\n", tf->device);
123 	}
124 
125 	ata_wait_idle(ap);
126 }
127 
128 /**
129  *	ata_exec_command - issue ATA command to host controller
130  *	@ap: port to which command is being issued
131  *	@tf: ATA taskfile register set
132  *
133  *	Issues ATA command, with proper synchronization with interrupt
134  *	handler / other threads.
135  *
136  *	LOCKING:
137  *	spin_lock_irqsave(host lock)
138  */
139 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
140 {
141 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
142 
143 	iowrite8(tf->command, ap->ioaddr.command_addr);
144 	ata_pause(ap);
145 }
146 
147 /**
148  *	ata_tf_read - input device's ATA taskfile shadow registers
149  *	@ap: Port from which input is read
150  *	@tf: ATA taskfile register set for storing input
151  *
152  *	Reads ATA taskfile registers for currently-selected device
153  *	into @tf. Assumes the device has a fully SFF compliant task file
154  *	layout and behaviour. If you device does not (eg has a different
155  *	status method) then you will need to provide a replacement tf_read
156  *
157  *	LOCKING:
158  *	Inherited from caller.
159  */
160 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
161 {
162 	struct ata_ioports *ioaddr = &ap->ioaddr;
163 
164 	tf->command = ata_check_status(ap);
165 	tf->feature = ioread8(ioaddr->error_addr);
166 	tf->nsect = ioread8(ioaddr->nsect_addr);
167 	tf->lbal = ioread8(ioaddr->lbal_addr);
168 	tf->lbam = ioread8(ioaddr->lbam_addr);
169 	tf->lbah = ioread8(ioaddr->lbah_addr);
170 	tf->device = ioread8(ioaddr->device_addr);
171 
172 	if (tf->flags & ATA_TFLAG_LBA48) {
173 		if (likely(ioaddr->ctl_addr)) {
174 			iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
175 			tf->hob_feature = ioread8(ioaddr->error_addr);
176 			tf->hob_nsect = ioread8(ioaddr->nsect_addr);
177 			tf->hob_lbal = ioread8(ioaddr->lbal_addr);
178 			tf->hob_lbam = ioread8(ioaddr->lbam_addr);
179 			tf->hob_lbah = ioread8(ioaddr->lbah_addr);
180 			iowrite8(tf->ctl, ioaddr->ctl_addr);
181 			ap->last_ctl = tf->ctl;
182 		} else
183 			WARN_ON(1);
184 	}
185 }
186 
187 /**
188  *	ata_check_status - Read device status reg & clear interrupt
189  *	@ap: port where the device is
190  *
191  *	Reads ATA taskfile status register for currently-selected device
192  *	and return its value. This also clears pending interrupts
193  *      from this device
194  *
195  *	LOCKING:
196  *	Inherited from caller.
197  */
198 u8 ata_check_status(struct ata_port *ap)
199 {
200 	return ioread8(ap->ioaddr.status_addr);
201 }
202 
203 /**
204  *	ata_altstatus - Read device alternate status reg
205  *	@ap: port where the device is
206  *
207  *	Reads ATA taskfile alternate status register for
208  *	currently-selected device and return its value.
209  *
210  *	Note: may NOT be used as the check_altstatus() entry in
211  *	ata_port_operations.
212  *
213  *	LOCKING:
214  *	Inherited from caller.
215  */
216 u8 ata_altstatus(struct ata_port *ap)
217 {
218 	if (ap->ops->check_altstatus)
219 		return ap->ops->check_altstatus(ap);
220 
221 	return ioread8(ap->ioaddr.altstatus_addr);
222 }
223 
224 /**
225  *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
226  *	@qc: Info associated with this ATA transaction.
227  *
228  *	LOCKING:
229  *	spin_lock_irqsave(host lock)
230  */
231 void ata_bmdma_setup(struct ata_queued_cmd *qc)
232 {
233 	struct ata_port *ap = qc->ap;
234 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
235 	u8 dmactl;
236 
237 	/* load PRD table addr. */
238 	mb();	/* make sure PRD table writes are visible to controller */
239 	iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
240 
241 	/* specify data direction, triple-check start bit is clear */
242 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
243 	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
244 	if (!rw)
245 		dmactl |= ATA_DMA_WR;
246 	iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
247 
248 	/* issue r/w command */
249 	ap->ops->exec_command(ap, &qc->tf);
250 }
251 
252 /**
253  *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
254  *	@qc: Info associated with this ATA transaction.
255  *
256  *	LOCKING:
257  *	spin_lock_irqsave(host lock)
258  */
259 void ata_bmdma_start(struct ata_queued_cmd *qc)
260 {
261 	struct ata_port *ap = qc->ap;
262 	u8 dmactl;
263 
264 	/* start host DMA transaction */
265 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
266 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
267 
268 	/* Strictly, one may wish to issue an ioread8() here, to
269 	 * flush the mmio write.  However, control also passes
270 	 * to the hardware at this point, and it will interrupt
271 	 * us when we are to resume control.  So, in effect,
272 	 * we don't care when the mmio write flushes.
273 	 * Further, a read of the DMA status register _immediately_
274 	 * following the write may not be what certain flaky hardware
275 	 * is expected, so I think it is best to not add a readb()
276 	 * without first all the MMIO ATA cards/mobos.
277 	 * Or maybe I'm just being paranoid.
278 	 *
279 	 * FIXME: The posting of this write means I/O starts are
280 	 * unneccessarily delayed for MMIO
281 	 */
282 }
283 
284 /**
285  *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
286  *	@ap: Port associated with this ATA transaction.
287  *
288  *	Clear interrupt and error flags in DMA status register.
289  *
290  *	May be used as the irq_clear() entry in ata_port_operations.
291  *
292  *	LOCKING:
293  *	spin_lock_irqsave(host lock)
294  */
295 void ata_bmdma_irq_clear(struct ata_port *ap)
296 {
297 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
298 
299 	if (!mmio)
300 		return;
301 
302 	iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
303 }
304 
305 /**
306  *	ata_bmdma_status - Read PCI IDE BMDMA status
307  *	@ap: Port associated with this ATA transaction.
308  *
309  *	Read and return BMDMA status register.
310  *
311  *	May be used as the bmdma_status() entry in ata_port_operations.
312  *
313  *	LOCKING:
314  *	spin_lock_irqsave(host lock)
315  */
316 u8 ata_bmdma_status(struct ata_port *ap)
317 {
318 	return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
319 }
320 
321 /**
322  *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
323  *	@qc: Command we are ending DMA for
324  *
325  *	Clears the ATA_DMA_START flag in the dma control register
326  *
327  *	May be used as the bmdma_stop() entry in ata_port_operations.
328  *
329  *	LOCKING:
330  *	spin_lock_irqsave(host lock)
331  */
332 void ata_bmdma_stop(struct ata_queued_cmd *qc)
333 {
334 	struct ata_port *ap = qc->ap;
335 	void __iomem *mmio = ap->ioaddr.bmdma_addr;
336 
337 	/* clear start/stop bit */
338 	iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
339 		 mmio + ATA_DMA_CMD);
340 
341 	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
342 	ata_altstatus(ap);        /* dummy read */
343 }
344 
345 /**
346  *	ata_bmdma_freeze - Freeze BMDMA controller port
347  *	@ap: port to freeze
348  *
349  *	Freeze BMDMA controller port.
350  *
351  *	LOCKING:
352  *	Inherited from caller.
353  */
354 void ata_bmdma_freeze(struct ata_port *ap)
355 {
356 	struct ata_ioports *ioaddr = &ap->ioaddr;
357 
358 	ap->ctl |= ATA_NIEN;
359 	ap->last_ctl = ap->ctl;
360 
361 	if (ioaddr->ctl_addr)
362 		iowrite8(ap->ctl, ioaddr->ctl_addr);
363 
364 	/* Under certain circumstances, some controllers raise IRQ on
365 	 * ATA_NIEN manipulation.  Also, many controllers fail to mask
366 	 * previously pending IRQ on ATA_NIEN assertion.  Clear it.
367 	 */
368 	ata_chk_status(ap);
369 
370 	ap->ops->irq_clear(ap);
371 }
372 
373 /**
374  *	ata_bmdma_thaw - Thaw BMDMA controller port
375  *	@ap: port to thaw
376  *
377  *	Thaw BMDMA controller port.
378  *
379  *	LOCKING:
380  *	Inherited from caller.
381  */
382 void ata_bmdma_thaw(struct ata_port *ap)
383 {
384 	/* clear & re-enable interrupts */
385 	ata_chk_status(ap);
386 	ap->ops->irq_clear(ap);
387 	ap->ops->irq_on(ap);
388 }
389 
390 /**
391  *	ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
392  *	@ap: port to handle error for
393  *	@prereset: prereset method (can be NULL)
394  *	@softreset: softreset method (can be NULL)
395  *	@hardreset: hardreset method (can be NULL)
396  *	@postreset: postreset method (can be NULL)
397  *
398  *	Handle error for ATA BMDMA controller.  It can handle both
399  *	PATA and SATA controllers.  Many controllers should be able to
400  *	use this EH as-is or with some added handling before and
401  *	after.
402  *
403  *	This function is intended to be used for constructing
404  *	->error_handler callback by low level drivers.
405  *
406  *	LOCKING:
407  *	Kernel thread context (may sleep)
408  */
409 void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
410 			ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
411 			ata_postreset_fn_t postreset)
412 {
413 	struct ata_queued_cmd *qc;
414 	unsigned long flags;
415 	int thaw = 0;
416 
417 	qc = __ata_qc_from_tag(ap, ap->link.active_tag);
418 	if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
419 		qc = NULL;
420 
421 	/* reset PIO HSM and stop DMA engine */
422 	spin_lock_irqsave(ap->lock, flags);
423 
424 	ap->hsm_task_state = HSM_ST_IDLE;
425 
426 	if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
427 		   qc->tf.protocol == ATAPI_PROT_DMA)) {
428 		u8 host_stat;
429 
430 		host_stat = ap->ops->bmdma_status(ap);
431 
432 		/* BMDMA controllers indicate host bus error by
433 		 * setting DMA_ERR bit and timing out.  As it wasn't
434 		 * really a timeout event, adjust error mask and
435 		 * cancel frozen state.
436 		 */
437 		if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
438 			qc->err_mask = AC_ERR_HOST_BUS;
439 			thaw = 1;
440 		}
441 
442 		ap->ops->bmdma_stop(qc);
443 	}
444 
445 	ata_altstatus(ap);
446 	ata_chk_status(ap);
447 	ap->ops->irq_clear(ap);
448 
449 	spin_unlock_irqrestore(ap->lock, flags);
450 
451 	if (thaw)
452 		ata_eh_thaw_port(ap);
453 
454 	/* PIO and DMA engines have been stopped, perform recovery */
455 	ata_do_eh(ap, prereset, softreset, hardreset, postreset);
456 }
457 
458 /**
459  *	ata_bmdma_error_handler - Stock error handler for BMDMA controller
460  *	@ap: port to handle error for
461  *
462  *	Stock error handler for BMDMA controller.
463  *
464  *	LOCKING:
465  *	Kernel thread context (may sleep)
466  */
467 void ata_bmdma_error_handler(struct ata_port *ap)
468 {
469 	ata_reset_fn_t softreset = NULL, hardreset = NULL;
470 
471 	if (ap->ioaddr.ctl_addr)
472 		softreset = ata_std_softreset;
473 	if (sata_scr_valid(&ap->link))
474 		hardreset = sata_std_hardreset;
475 
476 	ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset,
477 			   ata_std_postreset);
478 }
479 
480 /**
481  *	ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
482  *				      BMDMA controller
483  *	@qc: internal command to clean up
484  *
485  *	LOCKING:
486  *	Kernel thread context (may sleep)
487  */
488 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
489 {
490 	if (qc->ap->ioaddr.bmdma_addr)
491 		ata_bmdma_stop(qc);
492 }
493 
494 /**
495  *	ata_sff_port_start - Set port up for dma.
496  *	@ap: Port to initialize
497  *
498  *	Called just after data structures for each port are
499  *	initialized.  Allocates space for PRD table if the device
500  *	is DMA capable SFF.
501  *
502  *	May be used as the port_start() entry in ata_port_operations.
503  *
504  *	LOCKING:
505  *	Inherited from caller.
506  */
507 
508 int ata_sff_port_start(struct ata_port *ap)
509 {
510 	if (ap->ioaddr.bmdma_addr)
511 		return ata_port_start(ap);
512 	return 0;
513 }
514 
515 #ifdef CONFIG_PCI
516 
517 static int ata_resources_present(struct pci_dev *pdev, int port)
518 {
519 	int i;
520 
521 	/* Check the PCI resources for this channel are enabled */
522 	port = port * 2;
523 	for (i = 0; i < 2; i ++) {
524 		if (pci_resource_start(pdev, port + i) == 0 ||
525 		    pci_resource_len(pdev, port + i) == 0)
526 			return 0;
527 	}
528 	return 1;
529 }
530 
531 /**
532  *	ata_pci_init_bmdma - acquire PCI BMDMA resources and init ATA host
533  *	@host: target ATA host
534  *
535  *	Acquire PCI BMDMA resources and initialize @host accordingly.
536  *
537  *	LOCKING:
538  *	Inherited from calling layer (may sleep).
539  *
540  *	RETURNS:
541  *	0 on success, -errno otherwise.
542  */
543 int ata_pci_init_bmdma(struct ata_host *host)
544 {
545 	struct device *gdev = host->dev;
546 	struct pci_dev *pdev = to_pci_dev(gdev);
547 	int i, rc;
548 
549 	/* No BAR4 allocation: No DMA */
550 	if (pci_resource_start(pdev, 4) == 0)
551 		return 0;
552 
553 	/* TODO: If we get no DMA mask we should fall back to PIO */
554 	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
555 	if (rc)
556 		return rc;
557 	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
558 	if (rc)
559 		return rc;
560 
561 	/* request and iomap DMA region */
562 	rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
563 	if (rc) {
564 		dev_printk(KERN_ERR, gdev, "failed to request/iomap BAR4\n");
565 		return -ENOMEM;
566 	}
567 	host->iomap = pcim_iomap_table(pdev);
568 
569 	for (i = 0; i < 2; i++) {
570 		struct ata_port *ap = host->ports[i];
571 		void __iomem *bmdma = host->iomap[4] + 8 * i;
572 
573 		if (ata_port_is_dummy(ap))
574 			continue;
575 
576 		ap->ioaddr.bmdma_addr = bmdma;
577 		if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
578 		    (ioread8(bmdma + 2) & 0x80))
579 			host->flags |= ATA_HOST_SIMPLEX;
580 
581 		ata_port_desc(ap, "bmdma 0x%llx",
582 			(unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
583 	}
584 
585 	return 0;
586 }
587 
588 /**
589  *	ata_pci_init_sff_host - acquire native PCI ATA resources and init host
590  *	@host: target ATA host
591  *
592  *	Acquire native PCI ATA resources for @host and initialize the
593  *	first two ports of @host accordingly.  Ports marked dummy are
594  *	skipped and allocation failure makes the port dummy.
595  *
596  *	Note that native PCI resources are valid even for legacy hosts
597  *	as we fix up pdev resources array early in boot, so this
598  *	function can be used for both native and legacy SFF hosts.
599  *
600  *	LOCKING:
601  *	Inherited from calling layer (may sleep).
602  *
603  *	RETURNS:
604  *	0 if at least one port is initialized, -ENODEV if no port is
605  *	available.
606  */
607 int ata_pci_init_sff_host(struct ata_host *host)
608 {
609 	struct device *gdev = host->dev;
610 	struct pci_dev *pdev = to_pci_dev(gdev);
611 	unsigned int mask = 0;
612 	int i, rc;
613 
614 	/* request, iomap BARs and init port addresses accordingly */
615 	for (i = 0; i < 2; i++) {
616 		struct ata_port *ap = host->ports[i];
617 		int base = i * 2;
618 		void __iomem * const *iomap;
619 
620 		if (ata_port_is_dummy(ap))
621 			continue;
622 
623 		/* Discard disabled ports.  Some controllers show
624 		 * their unused channels this way.  Disabled ports are
625 		 * made dummy.
626 		 */
627 		if (!ata_resources_present(pdev, i)) {
628 			ap->ops = &ata_dummy_port_ops;
629 			continue;
630 		}
631 
632 		rc = pcim_iomap_regions(pdev, 0x3 << base,
633 					dev_driver_string(gdev));
634 		if (rc) {
635 			dev_printk(KERN_WARNING, gdev,
636 				   "failed to request/iomap BARs for port %d "
637 				   "(errno=%d)\n", i, rc);
638 			if (rc == -EBUSY)
639 				pcim_pin_device(pdev);
640 			ap->ops = &ata_dummy_port_ops;
641 			continue;
642 		}
643 		host->iomap = iomap = pcim_iomap_table(pdev);
644 
645 		ap->ioaddr.cmd_addr = iomap[base];
646 		ap->ioaddr.altstatus_addr =
647 		ap->ioaddr.ctl_addr = (void __iomem *)
648 			((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
649 		ata_std_ports(&ap->ioaddr);
650 
651 		ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
652 			(unsigned long long)pci_resource_start(pdev, base),
653 			(unsigned long long)pci_resource_start(pdev, base + 1));
654 
655 		mask |= 1 << i;
656 	}
657 
658 	if (!mask) {
659 		dev_printk(KERN_ERR, gdev, "no available native port\n");
660 		return -ENODEV;
661 	}
662 
663 	return 0;
664 }
665 
666 /**
667  *	ata_pci_prepare_sff_host - helper to prepare native PCI ATA host
668  *	@pdev: target PCI device
669  *	@ppi: array of port_info, must be enough for two ports
670  *	@r_host: out argument for the initialized ATA host
671  *
672  *	Helper to allocate ATA host for @pdev, acquire all native PCI
673  *	resources and initialize it accordingly in one go.
674  *
675  *	LOCKING:
676  *	Inherited from calling layer (may sleep).
677  *
678  *	RETURNS:
679  *	0 on success, -errno otherwise.
680  */
681 int ata_pci_prepare_sff_host(struct pci_dev *pdev,
682 			     const struct ata_port_info * const * ppi,
683 			     struct ata_host **r_host)
684 {
685 	struct ata_host *host;
686 	int rc;
687 
688 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
689 		return -ENOMEM;
690 
691 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
692 	if (!host) {
693 		dev_printk(KERN_ERR, &pdev->dev,
694 			   "failed to allocate ATA host\n");
695 		rc = -ENOMEM;
696 		goto err_out;
697 	}
698 
699 	rc = ata_pci_init_sff_host(host);
700 	if (rc)
701 		goto err_out;
702 
703 	/* init DMA related stuff */
704 	rc = ata_pci_init_bmdma(host);
705 	if (rc)
706 		goto err_bmdma;
707 
708 	devres_remove_group(&pdev->dev, NULL);
709 	*r_host = host;
710 	return 0;
711 
712  err_bmdma:
713 	/* This is necessary because PCI and iomap resources are
714 	 * merged and releasing the top group won't release the
715 	 * acquired resources if some of those have been acquired
716 	 * before entering this function.
717 	 */
718 	pcim_iounmap_regions(pdev, 0xf);
719  err_out:
720 	devres_release_group(&pdev->dev, NULL);
721 	return rc;
722 }
723 
724 /**
725  *	ata_pci_activate_sff_host - start SFF host, request IRQ and register it
726  *	@host: target SFF ATA host
727  *	@irq_handler: irq_handler used when requesting IRQ(s)
728  *	@sht: scsi_host_template to use when registering the host
729  *
730  *	This is the counterpart of ata_host_activate() for SFF ATA
731  *	hosts.  This separate helper is necessary because SFF hosts
732  *	use two separate interrupts in legacy mode.
733  *
734  *	LOCKING:
735  *	Inherited from calling layer (may sleep).
736  *
737  *	RETURNS:
738  *	0 on success, -errno otherwise.
739  */
740 int ata_pci_activate_sff_host(struct ata_host *host,
741 			      irq_handler_t irq_handler,
742 			      struct scsi_host_template *sht)
743 {
744 	struct device *dev = host->dev;
745 	struct pci_dev *pdev = to_pci_dev(dev);
746 	const char *drv_name = dev_driver_string(host->dev);
747 	int legacy_mode = 0, rc;
748 
749 	rc = ata_host_start(host);
750 	if (rc)
751 		return rc;
752 
753 	if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
754 		u8 tmp8, mask;
755 
756 		/* TODO: What if one channel is in native mode ... */
757 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
758 		mask = (1 << 2) | (1 << 0);
759 		if ((tmp8 & mask) != mask)
760 			legacy_mode = 1;
761 #if defined(CONFIG_NO_ATA_LEGACY)
762 		/* Some platforms with PCI limits cannot address compat
763 		   port space. In that case we punt if their firmware has
764 		   left a device in compatibility mode */
765 		if (legacy_mode) {
766 			printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
767 			return -EOPNOTSUPP;
768 		}
769 #endif
770 	}
771 
772 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
773 		return -ENOMEM;
774 
775 	if (!legacy_mode && pdev->irq) {
776 		rc = devm_request_irq(dev, pdev->irq, irq_handler,
777 				      IRQF_SHARED, drv_name, host);
778 		if (rc)
779 			goto out;
780 
781 		ata_port_desc(host->ports[0], "irq %d", pdev->irq);
782 		ata_port_desc(host->ports[1], "irq %d", pdev->irq);
783 	} else if (legacy_mode) {
784 		if (!ata_port_is_dummy(host->ports[0])) {
785 			rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
786 					      irq_handler, IRQF_SHARED,
787 					      drv_name, host);
788 			if (rc)
789 				goto out;
790 
791 			ata_port_desc(host->ports[0], "irq %d",
792 				      ATA_PRIMARY_IRQ(pdev));
793 		}
794 
795 		if (!ata_port_is_dummy(host->ports[1])) {
796 			rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
797 					      irq_handler, IRQF_SHARED,
798 					      drv_name, host);
799 			if (rc)
800 				goto out;
801 
802 			ata_port_desc(host->ports[1], "irq %d",
803 				      ATA_SECONDARY_IRQ(pdev));
804 		}
805 	}
806 
807 	rc = ata_host_register(host, sht);
808  out:
809 	if (rc == 0)
810 		devres_remove_group(dev, NULL);
811 	else
812 		devres_release_group(dev, NULL);
813 
814 	return rc;
815 }
816 
817 /**
818  *	ata_pci_init_one - Initialize/register PCI IDE host controller
819  *	@pdev: Controller to be initialized
820  *	@ppi: array of port_info, must be enough for two ports
821  *
822  *	This is a helper function which can be called from a driver's
823  *	xxx_init_one() probe function if the hardware uses traditional
824  *	IDE taskfile registers.
825  *
826  *	This function calls pci_enable_device(), reserves its register
827  *	regions, sets the dma mask, enables bus master mode, and calls
828  *	ata_device_add()
829  *
830  *	ASSUMPTION:
831  *	Nobody makes a single channel controller that appears solely as
832  *	the secondary legacy port on PCI.
833  *
834  *	LOCKING:
835  *	Inherited from PCI layer (may sleep).
836  *
837  *	RETURNS:
838  *	Zero on success, negative on errno-based value on error.
839  */
840 int ata_pci_init_one(struct pci_dev *pdev,
841 		     const struct ata_port_info * const * ppi)
842 {
843 	struct device *dev = &pdev->dev;
844 	const struct ata_port_info *pi = NULL;
845 	struct ata_host *host = NULL;
846 	int i, rc;
847 
848 	DPRINTK("ENTER\n");
849 
850 	/* look up the first valid port_info */
851 	for (i = 0; i < 2 && ppi[i]; i++) {
852 		if (ppi[i]->port_ops != &ata_dummy_port_ops) {
853 			pi = ppi[i];
854 			break;
855 		}
856 	}
857 
858 	if (!pi) {
859 		dev_printk(KERN_ERR, &pdev->dev,
860 			   "no valid port_info specified\n");
861 		return -EINVAL;
862 	}
863 
864 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
865 		return -ENOMEM;
866 
867 	rc = pcim_enable_device(pdev);
868 	if (rc)
869 		goto out;
870 
871 	/* prepare and activate SFF host */
872 	rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
873 	if (rc)
874 		goto out;
875 
876 	pci_set_master(pdev);
877 	rc = ata_pci_activate_sff_host(host, pi->port_ops->irq_handler,
878 				       pi->sht);
879  out:
880 	if (rc == 0)
881 		devres_remove_group(&pdev->dev, NULL);
882 	else
883 		devres_release_group(&pdev->dev, NULL);
884 
885 	return rc;
886 }
887 
888 /**
889  *	ata_pci_clear_simplex	-	attempt to kick device out of simplex
890  *	@pdev: PCI device
891  *
892  *	Some PCI ATA devices report simplex mode but in fact can be told to
893  *	enter non simplex mode. This implements the necessary logic to
894  *	perform the task on such devices. Calling it on other devices will
895  *	have -undefined- behaviour.
896  */
897 
898 int ata_pci_clear_simplex(struct pci_dev *pdev)
899 {
900 	unsigned long bmdma = pci_resource_start(pdev, 4);
901 	u8 simplex;
902 
903 	if (bmdma == 0)
904 		return -ENOENT;
905 
906 	simplex = inb(bmdma + 0x02);
907 	outb(simplex & 0x60, bmdma + 0x02);
908 	simplex = inb(bmdma + 0x02);
909 	if (simplex & 0x80)
910 		return -EOPNOTSUPP;
911 	return 0;
912 }
913 
914 unsigned long ata_pci_default_filter(struct ata_device *adev, unsigned long xfer_mask)
915 {
916 	/* Filter out DMA modes if the device has been configured by
917 	   the BIOS as PIO only */
918 
919 	if (adev->link->ap->ioaddr.bmdma_addr == NULL)
920 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
921 	return xfer_mask;
922 }
923 
924 #endif /* CONFIG_PCI */
925 
926