xref: /openbmc/linux/drivers/ata/libata-core.c (revision 72ad6ec4)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
3392c52c52SAlan Cox  *  Standards documents from:
3492c52c52SAlan Cox  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
3592c52c52SAlan Cox  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
3692c52c52SAlan Cox  *	http://www.sata-io.org (SATA)
3792c52c52SAlan Cox  *	http://www.compactflash.org (CF)
3892c52c52SAlan Cox  *	http://www.qic.org (QIC157 - Tape and DSC)
3992c52c52SAlan Cox  *	http://www.ce-ata.org (CE-ATA: not supported)
4092c52c52SAlan Cox  *
41c6fd2807SJeff Garzik  */
42c6fd2807SJeff Garzik 
43c6fd2807SJeff Garzik #include <linux/kernel.h>
44c6fd2807SJeff Garzik #include <linux/module.h>
45c6fd2807SJeff Garzik #include <linux/pci.h>
46c6fd2807SJeff Garzik #include <linux/init.h>
47c6fd2807SJeff Garzik #include <linux/list.h>
48c6fd2807SJeff Garzik #include <linux/mm.h>
49c6fd2807SJeff Garzik #include <linux/highmem.h>
50c6fd2807SJeff Garzik #include <linux/spinlock.h>
51c6fd2807SJeff Garzik #include <linux/blkdev.h>
52c6fd2807SJeff Garzik #include <linux/delay.h>
53c6fd2807SJeff Garzik #include <linux/timer.h>
54c6fd2807SJeff Garzik #include <linux/interrupt.h>
55c6fd2807SJeff Garzik #include <linux/completion.h>
56c6fd2807SJeff Garzik #include <linux/suspend.h>
57c6fd2807SJeff Garzik #include <linux/workqueue.h>
58c6fd2807SJeff Garzik #include <linux/jiffies.h>
59c6fd2807SJeff Garzik #include <linux/scatterlist.h>
602dcb407eSJeff Garzik #include <linux/io.h>
61c6fd2807SJeff Garzik #include <scsi/scsi.h>
62c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
63c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
64c6fd2807SJeff Garzik #include <linux/libata.h>
65c6fd2807SJeff Garzik #include <asm/semaphore.h>
66c6fd2807SJeff Garzik #include <asm/byteorder.h>
67140b5e59STejun Heo #include <linux/cdrom.h>
68c6fd2807SJeff Garzik 
69c6fd2807SJeff Garzik #include "libata.h"
70c6fd2807SJeff Garzik 
71fda0efc5SJeff Garzik 
72c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
73c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
74c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
75c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
76c6fd2807SJeff Garzik 
77c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
78c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
79c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
81218f3d30SJeff Garzik 					u8 enable, u8 feature);
82c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
8375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
84c6fd2807SJeff Garzik 
85f3187195STejun Heo unsigned int ata_print_id = 1;
86c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
89c6fd2807SJeff Garzik 
9033267325STejun Heo struct ata_force_param {
9133267325STejun Heo 	const char	*name;
9233267325STejun Heo 	unsigned int	cbl;
9333267325STejun Heo 	int		spd_limit;
9433267325STejun Heo 	unsigned long	xfer_mask;
9533267325STejun Heo 	unsigned int	horkage_on;
9633267325STejun Heo 	unsigned int	horkage_off;
9733267325STejun Heo };
9833267325STejun Heo 
9933267325STejun Heo struct ata_force_ent {
10033267325STejun Heo 	int			port;
10133267325STejun Heo 	int			device;
10233267325STejun Heo 	struct ata_force_param	param;
10333267325STejun Heo };
10433267325STejun Heo 
10533267325STejun Heo static struct ata_force_ent *ata_force_tbl;
10633267325STejun Heo static int ata_force_tbl_size;
10733267325STejun Heo 
10833267325STejun Heo static char ata_force_param_buf[PAGE_SIZE] __initdata;
10933267325STejun Heo module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444);
11033267325STejun Heo MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
11133267325STejun Heo 
112c6fd2807SJeff Garzik int atapi_enabled = 1;
113c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
114c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
115c6fd2807SJeff Garzik 
116c5c61bdaSAdrian Bunk static int atapi_dmadir = 0;
117c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
118c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
119c6fd2807SJeff Garzik 
120baf4fdfaSMark Lord int atapi_passthru16 = 1;
121baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
122baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
123baf4fdfaSMark Lord 
124c6fd2807SJeff Garzik int libata_fua = 0;
125c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
126c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
127c6fd2807SJeff Garzik 
1282dcb407eSJeff Garzik static int ata_ignore_hpa;
1291e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
1301e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1311e999736SAlan Cox 
132b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
133b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
134b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
135b3a70601SAlan Cox 
136c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
137c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
138c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
139c6fd2807SJeff Garzik 
1406ebe9d86SJeff Garzik int libata_noacpi = 0;
141d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1426ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
14311ef697bSKristen Carlson Accardi 
144ae8d4ee7SAlan Cox int libata_allow_tpm = 0;
145ae8d4ee7SAlan Cox module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
146ae8d4ee7SAlan Cox MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
147ae8d4ee7SAlan Cox 
148c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
149c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
150c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
151c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
152c6fd2807SJeff Garzik 
153c6fd2807SJeff Garzik 
154c6fd2807SJeff Garzik /**
15533267325STejun Heo  *	ata_force_cbl - force cable type according to libata.force
1564cdfa1b3SRandy Dunlap  *	@ap: ATA port of interest
15733267325STejun Heo  *
15833267325STejun Heo  *	Force cable type according to libata.force and whine about it.
15933267325STejun Heo  *	The last entry which has matching port number is used, so it
16033267325STejun Heo  *	can be specified as part of device force parameters.  For
16133267325STejun Heo  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
16233267325STejun Heo  *	same effect.
16333267325STejun Heo  *
16433267325STejun Heo  *	LOCKING:
16533267325STejun Heo  *	EH context.
16633267325STejun Heo  */
16733267325STejun Heo void ata_force_cbl(struct ata_port *ap)
16833267325STejun Heo {
16933267325STejun Heo 	int i;
17033267325STejun Heo 
17133267325STejun Heo 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
17233267325STejun Heo 		const struct ata_force_ent *fe = &ata_force_tbl[i];
17333267325STejun Heo 
17433267325STejun Heo 		if (fe->port != -1 && fe->port != ap->print_id)
17533267325STejun Heo 			continue;
17633267325STejun Heo 
17733267325STejun Heo 		if (fe->param.cbl == ATA_CBL_NONE)
17833267325STejun Heo 			continue;
17933267325STejun Heo 
18033267325STejun Heo 		ap->cbl = fe->param.cbl;
18133267325STejun Heo 		ata_port_printk(ap, KERN_NOTICE,
18233267325STejun Heo 				"FORCE: cable set to %s\n", fe->param.name);
18333267325STejun Heo 		return;
18433267325STejun Heo 	}
18533267325STejun Heo }
18633267325STejun Heo 
18733267325STejun Heo /**
18833267325STejun Heo  *	ata_force_spd_limit - force SATA spd limit according to libata.force
18933267325STejun Heo  *	@link: ATA link of interest
19033267325STejun Heo  *
19133267325STejun Heo  *	Force SATA spd limit according to libata.force and whine about
19233267325STejun Heo  *	it.  When only the port part is specified (e.g. 1:), the limit
19333267325STejun Heo  *	applies to all links connected to both the host link and all
19433267325STejun Heo  *	fan-out ports connected via PMP.  If the device part is
19533267325STejun Heo  *	specified as 0 (e.g. 1.00:), it specifies the first fan-out
19633267325STejun Heo  *	link not the host link.  Device number 15 always points to the
19733267325STejun Heo  *	host link whether PMP is attached or not.
19833267325STejun Heo  *
19933267325STejun Heo  *	LOCKING:
20033267325STejun Heo  *	EH context.
20133267325STejun Heo  */
20233267325STejun Heo static void ata_force_spd_limit(struct ata_link *link)
20333267325STejun Heo {
20433267325STejun Heo 	int linkno, i;
20533267325STejun Heo 
20633267325STejun Heo 	if (ata_is_host_link(link))
20733267325STejun Heo 		linkno = 15;
20833267325STejun Heo 	else
20933267325STejun Heo 		linkno = link->pmp;
21033267325STejun Heo 
21133267325STejun Heo 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
21233267325STejun Heo 		const struct ata_force_ent *fe = &ata_force_tbl[i];
21333267325STejun Heo 
21433267325STejun Heo 		if (fe->port != -1 && fe->port != link->ap->print_id)
21533267325STejun Heo 			continue;
21633267325STejun Heo 
21733267325STejun Heo 		if (fe->device != -1 && fe->device != linkno)
21833267325STejun Heo 			continue;
21933267325STejun Heo 
22033267325STejun Heo 		if (!fe->param.spd_limit)
22133267325STejun Heo 			continue;
22233267325STejun Heo 
22333267325STejun Heo 		link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
22433267325STejun Heo 		ata_link_printk(link, KERN_NOTICE,
22533267325STejun Heo 			"FORCE: PHY spd limit set to %s\n", fe->param.name);
22633267325STejun Heo 		return;
22733267325STejun Heo 	}
22833267325STejun Heo }
22933267325STejun Heo 
23033267325STejun Heo /**
23133267325STejun Heo  *	ata_force_xfermask - force xfermask according to libata.force
23233267325STejun Heo  *	@dev: ATA device of interest
23333267325STejun Heo  *
23433267325STejun Heo  *	Force xfer_mask according to libata.force and whine about it.
23533267325STejun Heo  *	For consistency with link selection, device number 15 selects
23633267325STejun Heo  *	the first device connected to the host link.
23733267325STejun Heo  *
23833267325STejun Heo  *	LOCKING:
23933267325STejun Heo  *	EH context.
24033267325STejun Heo  */
24133267325STejun Heo static void ata_force_xfermask(struct ata_device *dev)
24233267325STejun Heo {
24333267325STejun Heo 	int devno = dev->link->pmp + dev->devno;
24433267325STejun Heo 	int alt_devno = devno;
24533267325STejun Heo 	int i;
24633267325STejun Heo 
24733267325STejun Heo 	/* allow n.15 for the first device attached to host port */
24833267325STejun Heo 	if (ata_is_host_link(dev->link) && devno == 0)
24933267325STejun Heo 		alt_devno = 15;
25033267325STejun Heo 
25133267325STejun Heo 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
25233267325STejun Heo 		const struct ata_force_ent *fe = &ata_force_tbl[i];
25333267325STejun Heo 		unsigned long pio_mask, mwdma_mask, udma_mask;
25433267325STejun Heo 
25533267325STejun Heo 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
25633267325STejun Heo 			continue;
25733267325STejun Heo 
25833267325STejun Heo 		if (fe->device != -1 && fe->device != devno &&
25933267325STejun Heo 		    fe->device != alt_devno)
26033267325STejun Heo 			continue;
26133267325STejun Heo 
26233267325STejun Heo 		if (!fe->param.xfer_mask)
26333267325STejun Heo 			continue;
26433267325STejun Heo 
26533267325STejun Heo 		ata_unpack_xfermask(fe->param.xfer_mask,
26633267325STejun Heo 				    &pio_mask, &mwdma_mask, &udma_mask);
26733267325STejun Heo 		if (udma_mask)
26833267325STejun Heo 			dev->udma_mask = udma_mask;
26933267325STejun Heo 		else if (mwdma_mask) {
27033267325STejun Heo 			dev->udma_mask = 0;
27133267325STejun Heo 			dev->mwdma_mask = mwdma_mask;
27233267325STejun Heo 		} else {
27333267325STejun Heo 			dev->udma_mask = 0;
27433267325STejun Heo 			dev->mwdma_mask = 0;
27533267325STejun Heo 			dev->pio_mask = pio_mask;
27633267325STejun Heo 		}
27733267325STejun Heo 
27833267325STejun Heo 		ata_dev_printk(dev, KERN_NOTICE,
27933267325STejun Heo 			"FORCE: xfer_mask set to %s\n", fe->param.name);
28033267325STejun Heo 		return;
28133267325STejun Heo 	}
28233267325STejun Heo }
28333267325STejun Heo 
28433267325STejun Heo /**
28533267325STejun Heo  *	ata_force_horkage - force horkage according to libata.force
28633267325STejun Heo  *	@dev: ATA device of interest
28733267325STejun Heo  *
28833267325STejun Heo  *	Force horkage according to libata.force and whine about it.
28933267325STejun Heo  *	For consistency with link selection, device number 15 selects
29033267325STejun Heo  *	the first device connected to the host link.
29133267325STejun Heo  *
29233267325STejun Heo  *	LOCKING:
29333267325STejun Heo  *	EH context.
29433267325STejun Heo  */
29533267325STejun Heo static void ata_force_horkage(struct ata_device *dev)
29633267325STejun Heo {
29733267325STejun Heo 	int devno = dev->link->pmp + dev->devno;
29833267325STejun Heo 	int alt_devno = devno;
29933267325STejun Heo 	int i;
30033267325STejun Heo 
30133267325STejun Heo 	/* allow n.15 for the first device attached to host port */
30233267325STejun Heo 	if (ata_is_host_link(dev->link) && devno == 0)
30333267325STejun Heo 		alt_devno = 15;
30433267325STejun Heo 
30533267325STejun Heo 	for (i = 0; i < ata_force_tbl_size; i++) {
30633267325STejun Heo 		const struct ata_force_ent *fe = &ata_force_tbl[i];
30733267325STejun Heo 
30833267325STejun Heo 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
30933267325STejun Heo 			continue;
31033267325STejun Heo 
31133267325STejun Heo 		if (fe->device != -1 && fe->device != devno &&
31233267325STejun Heo 		    fe->device != alt_devno)
31333267325STejun Heo 			continue;
31433267325STejun Heo 
31533267325STejun Heo 		if (!(~dev->horkage & fe->param.horkage_on) &&
31633267325STejun Heo 		    !(dev->horkage & fe->param.horkage_off))
31733267325STejun Heo 			continue;
31833267325STejun Heo 
31933267325STejun Heo 		dev->horkage |= fe->param.horkage_on;
32033267325STejun Heo 		dev->horkage &= ~fe->param.horkage_off;
32133267325STejun Heo 
32233267325STejun Heo 		ata_dev_printk(dev, KERN_NOTICE,
32333267325STejun Heo 			"FORCE: horkage modified (%s)\n", fe->param.name);
32433267325STejun Heo 	}
32533267325STejun Heo }
32633267325STejun Heo 
32733267325STejun Heo /**
328c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
329c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
330c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
3319977126cSTejun Heo  *	@is_cmd: This FIS is for command
3329977126cSTejun Heo  *	@fis: Buffer into which data will output
333c6fd2807SJeff Garzik  *
334c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
335c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
336c6fd2807SJeff Garzik  *
337c6fd2807SJeff Garzik  *	LOCKING:
338c6fd2807SJeff Garzik  *	Inherited from caller.
339c6fd2807SJeff Garzik  */
3409977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
341c6fd2807SJeff Garzik {
342c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
3439977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
3449977126cSTejun Heo 	if (is_cmd)
3459977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
3469977126cSTejun Heo 
347c6fd2807SJeff Garzik 	fis[2] = tf->command;
348c6fd2807SJeff Garzik 	fis[3] = tf->feature;
349c6fd2807SJeff Garzik 
350c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
351c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
352c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
353c6fd2807SJeff Garzik 	fis[7] = tf->device;
354c6fd2807SJeff Garzik 
355c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
356c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
357c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
358c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
359c6fd2807SJeff Garzik 
360c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
361c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
362c6fd2807SJeff Garzik 	fis[14] = 0;
363c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
364c6fd2807SJeff Garzik 
365c6fd2807SJeff Garzik 	fis[16] = 0;
366c6fd2807SJeff Garzik 	fis[17] = 0;
367c6fd2807SJeff Garzik 	fis[18] = 0;
368c6fd2807SJeff Garzik 	fis[19] = 0;
369c6fd2807SJeff Garzik }
370c6fd2807SJeff Garzik 
371c6fd2807SJeff Garzik /**
372c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
373c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
374c6fd2807SJeff Garzik  *	@tf: Taskfile to output
375c6fd2807SJeff Garzik  *
376c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
377c6fd2807SJeff Garzik  *
378c6fd2807SJeff Garzik  *	LOCKING:
379c6fd2807SJeff Garzik  *	Inherited from caller.
380c6fd2807SJeff Garzik  */
381c6fd2807SJeff Garzik 
382c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
383c6fd2807SJeff Garzik {
384c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
385c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
386c6fd2807SJeff Garzik 
387c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
388c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
389c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
390c6fd2807SJeff Garzik 	tf->device	= fis[7];
391c6fd2807SJeff Garzik 
392c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
393c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
394c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
395c6fd2807SJeff Garzik 
396c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
397c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
398c6fd2807SJeff Garzik }
399c6fd2807SJeff Garzik 
400c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
401c6fd2807SJeff Garzik 	/* pio multi */
402c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
403c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
404c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
405c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
406c6fd2807SJeff Garzik 	0,
407c6fd2807SJeff Garzik 	0,
408c6fd2807SJeff Garzik 	0,
409c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
410c6fd2807SJeff Garzik 	/* pio */
411c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
412c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
413c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
414c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
415c6fd2807SJeff Garzik 	0,
416c6fd2807SJeff Garzik 	0,
417c6fd2807SJeff Garzik 	0,
418c6fd2807SJeff Garzik 	0,
419c6fd2807SJeff Garzik 	/* dma */
420c6fd2807SJeff Garzik 	ATA_CMD_READ,
421c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
422c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
423c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
424c6fd2807SJeff Garzik 	0,
425c6fd2807SJeff Garzik 	0,
426c6fd2807SJeff Garzik 	0,
427c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
428c6fd2807SJeff Garzik };
429c6fd2807SJeff Garzik 
430c6fd2807SJeff Garzik /**
431c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
432bd056d7eSTejun Heo  *	@tf: command to examine and configure
433bd056d7eSTejun Heo  *	@dev: device tf belongs to
434c6fd2807SJeff Garzik  *
435c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
436c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
437c6fd2807SJeff Garzik  *
438c6fd2807SJeff Garzik  *	LOCKING:
439c6fd2807SJeff Garzik  *	caller.
440c6fd2807SJeff Garzik  */
441bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
442c6fd2807SJeff Garzik {
443c6fd2807SJeff Garzik 	u8 cmd;
444c6fd2807SJeff Garzik 
445c6fd2807SJeff Garzik 	int index, fua, lba48, write;
446c6fd2807SJeff Garzik 
447c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
448c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
449c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
450c6fd2807SJeff Garzik 
451c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
452c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
453c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
4549af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
455c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
456c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
457c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
458c6fd2807SJeff Garzik 	} else {
459c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
460c6fd2807SJeff Garzik 		index = 16;
461c6fd2807SJeff Garzik 	}
462c6fd2807SJeff Garzik 
463c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
464c6fd2807SJeff Garzik 	if (cmd) {
465c6fd2807SJeff Garzik 		tf->command = cmd;
466c6fd2807SJeff Garzik 		return 0;
467c6fd2807SJeff Garzik 	}
468c6fd2807SJeff Garzik 	return -1;
469c6fd2807SJeff Garzik }
470c6fd2807SJeff Garzik 
471c6fd2807SJeff Garzik /**
47235b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
47335b649feSTejun Heo  *	@tf: ATA taskfile of interest
47435b649feSTejun Heo  *	@dev: ATA device @tf belongs to
47535b649feSTejun Heo  *
47635b649feSTejun Heo  *	LOCKING:
47735b649feSTejun Heo  *	None.
47835b649feSTejun Heo  *
47935b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
48035b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
48135b649feSTejun Heo  *	flags select the address format to use.
48235b649feSTejun Heo  *
48335b649feSTejun Heo  *	RETURNS:
48435b649feSTejun Heo  *	Block address read from @tf.
48535b649feSTejun Heo  */
48635b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
48735b649feSTejun Heo {
48835b649feSTejun Heo 	u64 block = 0;
48935b649feSTejun Heo 
49035b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
49135b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
49235b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
49335b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
49435b649feSTejun Heo 			block |= tf->hob_lbal << 24;
49535b649feSTejun Heo 		} else
49635b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
49735b649feSTejun Heo 
49835b649feSTejun Heo 		block |= tf->lbah << 16;
49935b649feSTejun Heo 		block |= tf->lbam << 8;
50035b649feSTejun Heo 		block |= tf->lbal;
50135b649feSTejun Heo 	} else {
50235b649feSTejun Heo 		u32 cyl, head, sect;
50335b649feSTejun Heo 
50435b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
50535b649feSTejun Heo 		head = tf->device & 0xf;
50635b649feSTejun Heo 		sect = tf->lbal;
50735b649feSTejun Heo 
50835b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
50935b649feSTejun Heo 	}
51035b649feSTejun Heo 
51135b649feSTejun Heo 	return block;
51235b649feSTejun Heo }
51335b649feSTejun Heo 
51435b649feSTejun Heo /**
515bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
516bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
517bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
518bd056d7eSTejun Heo  *	@block: Block address
519bd056d7eSTejun Heo  *	@n_block: Number of blocks
520bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
521bd056d7eSTejun Heo  *	@tag: tag
522bd056d7eSTejun Heo  *
523bd056d7eSTejun Heo  *	LOCKING:
524bd056d7eSTejun Heo  *	None.
525bd056d7eSTejun Heo  *
526bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
527bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
528bd056d7eSTejun Heo  *
529bd056d7eSTejun Heo  *	RETURNS:
530bd056d7eSTejun Heo  *
531bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
532bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
533bd056d7eSTejun Heo  */
534bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
535bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
536bd056d7eSTejun Heo 		    unsigned int tag)
537bd056d7eSTejun Heo {
538bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
539bd056d7eSTejun Heo 	tf->flags |= tf_flags;
540bd056d7eSTejun Heo 
5416d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
542bd056d7eSTejun Heo 		/* yay, NCQ */
543bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
544bd056d7eSTejun Heo 			return -ERANGE;
545bd056d7eSTejun Heo 
546bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
547bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
548bd056d7eSTejun Heo 
549bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
550bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
551bd056d7eSTejun Heo 		else
552bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
553bd056d7eSTejun Heo 
554bd056d7eSTejun Heo 		tf->nsect = tag << 3;
555bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
556bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
557bd056d7eSTejun Heo 
558bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
559bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
560bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
561bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
562bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
563bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
564bd056d7eSTejun Heo 
565bd056d7eSTejun Heo 		tf->device = 1 << 6;
566bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
567bd056d7eSTejun Heo 			tf->device |= 1 << 7;
568bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
569bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
570bd056d7eSTejun Heo 
571bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
572bd056d7eSTejun Heo 			/* use LBA28 */
573bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
574bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
575bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
576bd056d7eSTejun Heo 				return -ERANGE;
577bd056d7eSTejun Heo 
578bd056d7eSTejun Heo 			/* use LBA48 */
579bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
580bd056d7eSTejun Heo 
581bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
582bd056d7eSTejun Heo 
583bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
584bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
585bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
586bd056d7eSTejun Heo 		} else
587bd056d7eSTejun Heo 			/* request too large even for LBA48 */
588bd056d7eSTejun Heo 			return -ERANGE;
589bd056d7eSTejun Heo 
590bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
591bd056d7eSTejun Heo 			return -EINVAL;
592bd056d7eSTejun Heo 
593bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
594bd056d7eSTejun Heo 
595bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
596bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
597bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
598bd056d7eSTejun Heo 
599bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
600bd056d7eSTejun Heo 	} else {
601bd056d7eSTejun Heo 		/* CHS */
602bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
603bd056d7eSTejun Heo 
604bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
605bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
606bd056d7eSTejun Heo 			return -ERANGE;
607bd056d7eSTejun Heo 
608bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
609bd056d7eSTejun Heo 			return -EINVAL;
610bd056d7eSTejun Heo 
611bd056d7eSTejun Heo 		/* Convert LBA to CHS */
612bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
613bd056d7eSTejun Heo 		cyl   = track / dev->heads;
614bd056d7eSTejun Heo 		head  = track % dev->heads;
615bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
616bd056d7eSTejun Heo 
617bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
618bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
619bd056d7eSTejun Heo 
620bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
621bd056d7eSTejun Heo 		   Cylinder: 0-65535
622bd056d7eSTejun Heo 		   Head: 0-15
623bd056d7eSTejun Heo 		   Sector: 1-255*/
624bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
625bd056d7eSTejun Heo 			return -ERANGE;
626bd056d7eSTejun Heo 
627bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
628bd056d7eSTejun Heo 		tf->lbal = sect;
629bd056d7eSTejun Heo 		tf->lbam = cyl;
630bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
631bd056d7eSTejun Heo 		tf->device |= head;
632bd056d7eSTejun Heo 	}
633bd056d7eSTejun Heo 
634bd056d7eSTejun Heo 	return 0;
635bd056d7eSTejun Heo }
636bd056d7eSTejun Heo 
637bd056d7eSTejun Heo /**
638c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
639c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
640c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
641c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
642c6fd2807SJeff Garzik  *
643c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
644c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
645c6fd2807SJeff Garzik  *
646c6fd2807SJeff Garzik  *	LOCKING:
647c6fd2807SJeff Garzik  *	None.
648c6fd2807SJeff Garzik  *
649c6fd2807SJeff Garzik  *	RETURNS:
650c6fd2807SJeff Garzik  *	Packed xfer_mask.
651c6fd2807SJeff Garzik  */
6527dc951aeSTejun Heo unsigned long ata_pack_xfermask(unsigned long pio_mask,
6537dc951aeSTejun Heo 				unsigned long mwdma_mask,
6547dc951aeSTejun Heo 				unsigned long udma_mask)
655c6fd2807SJeff Garzik {
656c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
657c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
658c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
659c6fd2807SJeff Garzik }
660c6fd2807SJeff Garzik 
661c6fd2807SJeff Garzik /**
662c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
663c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
664c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
665c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
666c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
667c6fd2807SJeff Garzik  *
668c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
669c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
670c6fd2807SJeff Garzik  */
6717dc951aeSTejun Heo void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
6727dc951aeSTejun Heo 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
673c6fd2807SJeff Garzik {
674c6fd2807SJeff Garzik 	if (pio_mask)
675c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
676c6fd2807SJeff Garzik 	if (mwdma_mask)
677c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
678c6fd2807SJeff Garzik 	if (udma_mask)
679c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
680c6fd2807SJeff Garzik }
681c6fd2807SJeff Garzik 
682c6fd2807SJeff Garzik static const struct ata_xfer_ent {
683c6fd2807SJeff Garzik 	int shift, bits;
684c6fd2807SJeff Garzik 	u8 base;
685c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
68670cd071eSTejun Heo 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
68770cd071eSTejun Heo 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
68870cd071eSTejun Heo 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
689c6fd2807SJeff Garzik 	{ -1, },
690c6fd2807SJeff Garzik };
691c6fd2807SJeff Garzik 
692c6fd2807SJeff Garzik /**
693c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
694c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
695c6fd2807SJeff Garzik  *
696c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
697c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
698c6fd2807SJeff Garzik  *
699c6fd2807SJeff Garzik  *	LOCKING:
700c6fd2807SJeff Garzik  *	None.
701c6fd2807SJeff Garzik  *
702c6fd2807SJeff Garzik  *	RETURNS:
70370cd071eSTejun Heo  *	Matching XFER_* value, 0xff if no match found.
704c6fd2807SJeff Garzik  */
7057dc951aeSTejun Heo u8 ata_xfer_mask2mode(unsigned long xfer_mask)
706c6fd2807SJeff Garzik {
707c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
708c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
709c6fd2807SJeff Garzik 
710c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
711c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
712c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
71370cd071eSTejun Heo 	return 0xff;
714c6fd2807SJeff Garzik }
715c6fd2807SJeff Garzik 
716c6fd2807SJeff Garzik /**
717c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
718c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
719c6fd2807SJeff Garzik  *
720c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
721c6fd2807SJeff Garzik  *
722c6fd2807SJeff Garzik  *	LOCKING:
723c6fd2807SJeff Garzik  *	None.
724c6fd2807SJeff Garzik  *
725c6fd2807SJeff Garzik  *	RETURNS:
726c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
727c6fd2807SJeff Garzik  */
7287dc951aeSTejun Heo unsigned long ata_xfer_mode2mask(u8 xfer_mode)
729c6fd2807SJeff Garzik {
730c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
731c6fd2807SJeff Garzik 
732c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
733c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
73470cd071eSTejun Heo 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
73570cd071eSTejun Heo 				& ~((1 << ent->shift) - 1);
736c6fd2807SJeff Garzik 	return 0;
737c6fd2807SJeff Garzik }
738c6fd2807SJeff Garzik 
739c6fd2807SJeff Garzik /**
740c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
741c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
742c6fd2807SJeff Garzik  *
743c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
744c6fd2807SJeff Garzik  *
745c6fd2807SJeff Garzik  *	LOCKING:
746c6fd2807SJeff Garzik  *	None.
747c6fd2807SJeff Garzik  *
748c6fd2807SJeff Garzik  *	RETURNS:
749c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
750c6fd2807SJeff Garzik  */
7517dc951aeSTejun Heo int ata_xfer_mode2shift(unsigned long xfer_mode)
752c6fd2807SJeff Garzik {
753c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
754c6fd2807SJeff Garzik 
755c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
756c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
757c6fd2807SJeff Garzik 			return ent->shift;
758c6fd2807SJeff Garzik 	return -1;
759c6fd2807SJeff Garzik }
760c6fd2807SJeff Garzik 
761c6fd2807SJeff Garzik /**
762c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
763c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
764c6fd2807SJeff Garzik  *
765c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
766c6fd2807SJeff Garzik  *	(highest bit in @modemask).
767c6fd2807SJeff Garzik  *
768c6fd2807SJeff Garzik  *	LOCKING:
769c6fd2807SJeff Garzik  *	None.
770c6fd2807SJeff Garzik  *
771c6fd2807SJeff Garzik  *	RETURNS:
772c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
773c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
774c6fd2807SJeff Garzik  */
7757dc951aeSTejun Heo const char *ata_mode_string(unsigned long xfer_mask)
776c6fd2807SJeff Garzik {
777c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
778c6fd2807SJeff Garzik 		"PIO0",
779c6fd2807SJeff Garzik 		"PIO1",
780c6fd2807SJeff Garzik 		"PIO2",
781c6fd2807SJeff Garzik 		"PIO3",
782c6fd2807SJeff Garzik 		"PIO4",
783b352e57dSAlan Cox 		"PIO5",
784b352e57dSAlan Cox 		"PIO6",
785c6fd2807SJeff Garzik 		"MWDMA0",
786c6fd2807SJeff Garzik 		"MWDMA1",
787c6fd2807SJeff Garzik 		"MWDMA2",
788b352e57dSAlan Cox 		"MWDMA3",
789b352e57dSAlan Cox 		"MWDMA4",
790c6fd2807SJeff Garzik 		"UDMA/16",
791c6fd2807SJeff Garzik 		"UDMA/25",
792c6fd2807SJeff Garzik 		"UDMA/33",
793c6fd2807SJeff Garzik 		"UDMA/44",
794c6fd2807SJeff Garzik 		"UDMA/66",
795c6fd2807SJeff Garzik 		"UDMA/100",
796c6fd2807SJeff Garzik 		"UDMA/133",
797c6fd2807SJeff Garzik 		"UDMA7",
798c6fd2807SJeff Garzik 	};
799c6fd2807SJeff Garzik 	int highbit;
800c6fd2807SJeff Garzik 
801c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
802c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
803c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
804c6fd2807SJeff Garzik 	return "<n/a>";
805c6fd2807SJeff Garzik }
806c6fd2807SJeff Garzik 
807c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
808c6fd2807SJeff Garzik {
809c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
810c6fd2807SJeff Garzik 		"1.5 Gbps",
811c6fd2807SJeff Garzik 		"3.0 Gbps",
812c6fd2807SJeff Garzik 	};
813c6fd2807SJeff Garzik 
814c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
815c6fd2807SJeff Garzik 		return "<unknown>";
816c6fd2807SJeff Garzik 	return spd_str[spd - 1];
817c6fd2807SJeff Garzik }
818c6fd2807SJeff Garzik 
819c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
820c6fd2807SJeff Garzik {
82109d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
8229af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
823c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
824562f0c2dSTejun Heo 		ata_acpi_on_disable(dev);
8254ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
8264ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
827c6fd2807SJeff Garzik 		dev->class++;
828c6fd2807SJeff Garzik 	}
829c6fd2807SJeff Garzik }
830c6fd2807SJeff Garzik 
831ca77329fSKristen Carlson Accardi static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
832ca77329fSKristen Carlson Accardi {
833ca77329fSKristen Carlson Accardi 	struct ata_link *link = dev->link;
834ca77329fSKristen Carlson Accardi 	struct ata_port *ap = link->ap;
835ca77329fSKristen Carlson Accardi 	u32 scontrol;
836ca77329fSKristen Carlson Accardi 	unsigned int err_mask;
837ca77329fSKristen Carlson Accardi 	int rc;
838ca77329fSKristen Carlson Accardi 
839ca77329fSKristen Carlson Accardi 	/*
840ca77329fSKristen Carlson Accardi 	 * disallow DIPM for drivers which haven't set
841ca77329fSKristen Carlson Accardi 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
842ca77329fSKristen Carlson Accardi 	 * phy ready will be set in the interrupt status on
843ca77329fSKristen Carlson Accardi 	 * state changes, which will cause some drivers to
844ca77329fSKristen Carlson Accardi 	 * think there are errors - additionally drivers will
845ca77329fSKristen Carlson Accardi 	 * need to disable hot plug.
846ca77329fSKristen Carlson Accardi 	 */
847ca77329fSKristen Carlson Accardi 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
848ca77329fSKristen Carlson Accardi 		ap->pm_policy = NOT_AVAILABLE;
849ca77329fSKristen Carlson Accardi 		return -EINVAL;
850ca77329fSKristen Carlson Accardi 	}
851ca77329fSKristen Carlson Accardi 
852ca77329fSKristen Carlson Accardi 	/*
853ca77329fSKristen Carlson Accardi 	 * For DIPM, we will only enable it for the
854ca77329fSKristen Carlson Accardi 	 * min_power setting.
855ca77329fSKristen Carlson Accardi 	 *
856ca77329fSKristen Carlson Accardi 	 * Why?  Because Disks are too stupid to know that
857ca77329fSKristen Carlson Accardi 	 * If the host rejects a request to go to SLUMBER
858ca77329fSKristen Carlson Accardi 	 * they should retry at PARTIAL, and instead it
859ca77329fSKristen Carlson Accardi 	 * just would give up.  So, for medium_power to
860ca77329fSKristen Carlson Accardi 	 * work at all, we need to only allow HIPM.
861ca77329fSKristen Carlson Accardi 	 */
862ca77329fSKristen Carlson Accardi 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
863ca77329fSKristen Carlson Accardi 	if (rc)
864ca77329fSKristen Carlson Accardi 		return rc;
865ca77329fSKristen Carlson Accardi 
866ca77329fSKristen Carlson Accardi 	switch (policy) {
867ca77329fSKristen Carlson Accardi 	case MIN_POWER:
868ca77329fSKristen Carlson Accardi 		/* no restrictions on IPM transitions */
869ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x3 << 8);
870ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
871ca77329fSKristen Carlson Accardi 		if (rc)
872ca77329fSKristen Carlson Accardi 			return rc;
873ca77329fSKristen Carlson Accardi 
874ca77329fSKristen Carlson Accardi 		/* enable DIPM */
875ca77329fSKristen Carlson Accardi 		if (dev->flags & ATA_DFLAG_DIPM)
876ca77329fSKristen Carlson Accardi 			err_mask = ata_dev_set_feature(dev,
877ca77329fSKristen Carlson Accardi 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
878ca77329fSKristen Carlson Accardi 		break;
879ca77329fSKristen Carlson Accardi 	case MEDIUM_POWER:
880ca77329fSKristen Carlson Accardi 		/* allow IPM to PARTIAL */
881ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x1 << 8);
882ca77329fSKristen Carlson Accardi 		scontrol |= (0x2 << 8);
883ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
884ca77329fSKristen Carlson Accardi 		if (rc)
885ca77329fSKristen Carlson Accardi 			return rc;
886ca77329fSKristen Carlson Accardi 
887f5456b63SKristen Carlson Accardi 		/*
888f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
889f5456b63SKristen Carlson Accardi 		 * disallow transitions to SLUMBER, which effectively
890f5456b63SKristen Carlson Accardi 		 * disable DIPM if it does not support PARTIAL
891f5456b63SKristen Carlson Accardi 		 */
892ca77329fSKristen Carlson Accardi 		break;
893ca77329fSKristen Carlson Accardi 	case NOT_AVAILABLE:
894ca77329fSKristen Carlson Accardi 	case MAX_PERFORMANCE:
895ca77329fSKristen Carlson Accardi 		/* disable all IPM transitions */
896ca77329fSKristen Carlson Accardi 		scontrol |= (0x3 << 8);
897ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
898ca77329fSKristen Carlson Accardi 		if (rc)
899ca77329fSKristen Carlson Accardi 			return rc;
900ca77329fSKristen Carlson Accardi 
901f5456b63SKristen Carlson Accardi 		/*
902f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
903f5456b63SKristen Carlson Accardi 		 * disallow all transitions which effectively
904f5456b63SKristen Carlson Accardi 		 * disable DIPM anyway.
905f5456b63SKristen Carlson Accardi 		 */
906ca77329fSKristen Carlson Accardi 		break;
907ca77329fSKristen Carlson Accardi 	}
908ca77329fSKristen Carlson Accardi 
909ca77329fSKristen Carlson Accardi 	/* FIXME: handle SET FEATURES failure */
910ca77329fSKristen Carlson Accardi 	(void) err_mask;
911ca77329fSKristen Carlson Accardi 
912ca77329fSKristen Carlson Accardi 	return 0;
913ca77329fSKristen Carlson Accardi }
914ca77329fSKristen Carlson Accardi 
915ca77329fSKristen Carlson Accardi /**
916ca77329fSKristen Carlson Accardi  *	ata_dev_enable_pm - enable SATA interface power management
91748166fd9SStephen Hemminger  *	@dev:  device to enable power management
91848166fd9SStephen Hemminger  *	@policy: the link power management policy
919ca77329fSKristen Carlson Accardi  *
920ca77329fSKristen Carlson Accardi  *	Enable SATA Interface power management.  This will enable
921ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) for min_power
922ca77329fSKristen Carlson Accardi  * 	policy, and then call driver specific callbacks for
923ca77329fSKristen Carlson Accardi  *	enabling Host Initiated Power management.
924ca77329fSKristen Carlson Accardi  *
925ca77329fSKristen Carlson Accardi  *	Locking: Caller.
926ca77329fSKristen Carlson Accardi  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
927ca77329fSKristen Carlson Accardi  */
928ca77329fSKristen Carlson Accardi void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
929ca77329fSKristen Carlson Accardi {
930ca77329fSKristen Carlson Accardi 	int rc = 0;
931ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
932ca77329fSKristen Carlson Accardi 
933ca77329fSKristen Carlson Accardi 	/* set HIPM first, then DIPM */
934ca77329fSKristen Carlson Accardi 	if (ap->ops->enable_pm)
935ca77329fSKristen Carlson Accardi 		rc = ap->ops->enable_pm(ap, policy);
936ca77329fSKristen Carlson Accardi 	if (rc)
937ca77329fSKristen Carlson Accardi 		goto enable_pm_out;
938ca77329fSKristen Carlson Accardi 	rc = ata_dev_set_dipm(dev, policy);
939ca77329fSKristen Carlson Accardi 
940ca77329fSKristen Carlson Accardi enable_pm_out:
941ca77329fSKristen Carlson Accardi 	if (rc)
942ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
943ca77329fSKristen Carlson Accardi 	else
944ca77329fSKristen Carlson Accardi 		ap->pm_policy = policy;
945ca77329fSKristen Carlson Accardi 	return /* rc */;	/* hopefully we can use 'rc' eventually */
946ca77329fSKristen Carlson Accardi }
947ca77329fSKristen Carlson Accardi 
9481992a5edSStephen Rothwell #ifdef CONFIG_PM
949ca77329fSKristen Carlson Accardi /**
950ca77329fSKristen Carlson Accardi  *	ata_dev_disable_pm - disable SATA interface power management
95148166fd9SStephen Hemminger  *	@dev: device to disable power management
952ca77329fSKristen Carlson Accardi  *
953ca77329fSKristen Carlson Accardi  *	Disable SATA Interface power management.  This will disable
954ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) without changing
955ca77329fSKristen Carlson Accardi  * 	policy,  call driver specific callbacks for disabling Host
956ca77329fSKristen Carlson Accardi  * 	Initiated Power management.
957ca77329fSKristen Carlson Accardi  *
958ca77329fSKristen Carlson Accardi  *	Locking: Caller.
959ca77329fSKristen Carlson Accardi  *	Returns: void
960ca77329fSKristen Carlson Accardi  */
961ca77329fSKristen Carlson Accardi static void ata_dev_disable_pm(struct ata_device *dev)
962ca77329fSKristen Carlson Accardi {
963ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
964ca77329fSKristen Carlson Accardi 
965ca77329fSKristen Carlson Accardi 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
966ca77329fSKristen Carlson Accardi 	if (ap->ops->disable_pm)
967ca77329fSKristen Carlson Accardi 		ap->ops->disable_pm(ap);
968ca77329fSKristen Carlson Accardi }
9691992a5edSStephen Rothwell #endif	/* CONFIG_PM */
970ca77329fSKristen Carlson Accardi 
971ca77329fSKristen Carlson Accardi void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
972ca77329fSKristen Carlson Accardi {
973ca77329fSKristen Carlson Accardi 	ap->pm_policy = policy;
974ca77329fSKristen Carlson Accardi 	ap->link.eh_info.action |= ATA_EHI_LPM;
975ca77329fSKristen Carlson Accardi 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
976ca77329fSKristen Carlson Accardi 	ata_port_schedule_eh(ap);
977ca77329fSKristen Carlson Accardi }
978ca77329fSKristen Carlson Accardi 
9791992a5edSStephen Rothwell #ifdef CONFIG_PM
980ca77329fSKristen Carlson Accardi static void ata_lpm_enable(struct ata_host *host)
981ca77329fSKristen Carlson Accardi {
982ca77329fSKristen Carlson Accardi 	struct ata_link *link;
983ca77329fSKristen Carlson Accardi 	struct ata_port *ap;
984ca77329fSKristen Carlson Accardi 	struct ata_device *dev;
985ca77329fSKristen Carlson Accardi 	int i;
986ca77329fSKristen Carlson Accardi 
987ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
988ca77329fSKristen Carlson Accardi 		ap = host->ports[i];
989ca77329fSKristen Carlson Accardi 		ata_port_for_each_link(link, ap) {
990ca77329fSKristen Carlson Accardi 			ata_link_for_each_dev(dev, link)
991ca77329fSKristen Carlson Accardi 				ata_dev_disable_pm(dev);
992ca77329fSKristen Carlson Accardi 		}
993ca77329fSKristen Carlson Accardi 	}
994ca77329fSKristen Carlson Accardi }
995ca77329fSKristen Carlson Accardi 
996ca77329fSKristen Carlson Accardi static void ata_lpm_disable(struct ata_host *host)
997ca77329fSKristen Carlson Accardi {
998ca77329fSKristen Carlson Accardi 	int i;
999ca77329fSKristen Carlson Accardi 
1000ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
1001ca77329fSKristen Carlson Accardi 		struct ata_port *ap = host->ports[i];
1002ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
1003ca77329fSKristen Carlson Accardi 	}
1004ca77329fSKristen Carlson Accardi }
10051992a5edSStephen Rothwell #endif	/* CONFIG_PM */
1006ca77329fSKristen Carlson Accardi 
1007ca77329fSKristen Carlson Accardi 
1008c6fd2807SJeff Garzik /**
1009c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
1010c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
1011c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
1012c6fd2807SJeff Garzik  *
10130d5ff566STejun Heo  *	This technique was originally described in
10140d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
10150d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
10160d5ff566STejun Heo  *
10170d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
10180d5ff566STejun Heo  *	and if a device is present, it will respond by
10190d5ff566STejun Heo  *	correctly storing and echoing back the
10200d5ff566STejun Heo  *	ATA shadow register contents.
1021c6fd2807SJeff Garzik  *
1022c6fd2807SJeff Garzik  *	LOCKING:
1023c6fd2807SJeff Garzik  *	caller.
1024c6fd2807SJeff Garzik  */
1025c6fd2807SJeff Garzik 
10260d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1027c6fd2807SJeff Garzik {
10280d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
10290d5ff566STejun Heo 	u8 nsect, lbal;
10300d5ff566STejun Heo 
10310d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
10320d5ff566STejun Heo 
10330d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
10340d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
10350d5ff566STejun Heo 
10360d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
10370d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
10380d5ff566STejun Heo 
10390d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
10400d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
10410d5ff566STejun Heo 
10420d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
10430d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
10440d5ff566STejun Heo 
10450d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
10460d5ff566STejun Heo 		return 1;	/* we found a device */
10470d5ff566STejun Heo 
10480d5ff566STejun Heo 	return 0;		/* nothing found */
1049c6fd2807SJeff Garzik }
1050c6fd2807SJeff Garzik 
1051c6fd2807SJeff Garzik /**
1052c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
1053c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
1054c6fd2807SJeff Garzik  *
1055c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
1056c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
1057c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
1058c6fd2807SJeff Garzik  *
1059c6fd2807SJeff Garzik  *	LOCKING:
1060c6fd2807SJeff Garzik  *	None.
1061c6fd2807SJeff Garzik  *
1062c6fd2807SJeff Garzik  *	RETURNS:
1063633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1064633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
1065c6fd2807SJeff Garzik  */
1066c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1067c6fd2807SJeff Garzik {
1068c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
1069c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
1070c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
1071633273a3STejun Heo 	 *
1072633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1073633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1074633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1075633273a3STejun Heo 	 * spec has never mentioned about using different signatures
1076633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1077633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
1078633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1079633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1080633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
1081633273a3STejun Heo 	 * SerialATA.
1082633273a3STejun Heo 	 *
1083633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
1084633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1085c6fd2807SJeff Garzik 	 */
1086633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1087c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
1088c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
1089c6fd2807SJeff Garzik 	}
1090c6fd2807SJeff Garzik 
1091633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1092c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
1093c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
1094c6fd2807SJeff Garzik 	}
1095c6fd2807SJeff Garzik 
1096633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1097633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
1098633273a3STejun Heo 		return ATA_DEV_PMP;
1099633273a3STejun Heo 	}
1100633273a3STejun Heo 
1101633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
11022dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
1103633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1104633273a3STejun Heo 	}
1105633273a3STejun Heo 
1106c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
1107c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
1108c6fd2807SJeff Garzik }
1109c6fd2807SJeff Garzik 
1110c6fd2807SJeff Garzik /**
1111c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
11123f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
11133f19859eSTejun Heo  *	@present: device seems present
1114c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
1115c6fd2807SJeff Garzik  *
1116c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1117c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
1118c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
1119c6fd2807SJeff Garzik  *	and diagnostics.
1120c6fd2807SJeff Garzik  *
1121c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
1122c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
1123c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
1124c6fd2807SJeff Garzik  *
1125c6fd2807SJeff Garzik  *	LOCKING:
1126c6fd2807SJeff Garzik  *	caller.
1127c6fd2807SJeff Garzik  *
1128c6fd2807SJeff Garzik  *	RETURNS:
1129c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1130c6fd2807SJeff Garzik  */
11313f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
11323f19859eSTejun Heo 				  u8 *r_err)
1133c6fd2807SJeff Garzik {
11343f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
1135c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1136c6fd2807SJeff Garzik 	unsigned int class;
1137c6fd2807SJeff Garzik 	u8 err;
1138c6fd2807SJeff Garzik 
11393f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
1140c6fd2807SJeff Garzik 
1141c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
1142c6fd2807SJeff Garzik 
1143c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
1144c6fd2807SJeff Garzik 	err = tf.feature;
1145c6fd2807SJeff Garzik 	if (r_err)
1146c6fd2807SJeff Garzik 		*r_err = err;
1147c6fd2807SJeff Garzik 
1148c5038fc0SAlan Cox 	/* see if device passed diags: continue and warn later */
1149c5038fc0SAlan Cox 	if (err == 0)
115093590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
11513f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
115293590859SAlan Cox 	else if (err == 1)
1153c6fd2807SJeff Garzik 		/* do nothing */ ;
11543f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
1155c6fd2807SJeff Garzik 		/* do nothing */ ;
1156c6fd2807SJeff Garzik 	else
1157c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
1158c6fd2807SJeff Garzik 
1159c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
1160c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
1161c6fd2807SJeff Garzik 
1162d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
1163d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
1164d7fbee05STejun Heo 		 * have reported incorrect device signature too.
1165d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
1166d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
1167d7fbee05STejun Heo 		 * failure.
1168d7fbee05STejun Heo 		 */
1169d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1170d7fbee05STejun Heo 			class = ATA_DEV_ATA;
1171d7fbee05STejun Heo 		else
1172d7fbee05STejun Heo 			class = ATA_DEV_NONE;
1173d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1174d7fbee05STejun Heo 		class = ATA_DEV_NONE;
1175d7fbee05STejun Heo 
1176c6fd2807SJeff Garzik 	return class;
1177c6fd2807SJeff Garzik }
1178c6fd2807SJeff Garzik 
1179c6fd2807SJeff Garzik /**
1180c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1181c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1182c6fd2807SJeff Garzik  *	@s: string into which data is output
1183c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1184c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
1185c6fd2807SJeff Garzik  *
1186c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
1187c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
1188c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
1189c6fd2807SJeff Garzik  *
1190c6fd2807SJeff Garzik  *	LOCKING:
1191c6fd2807SJeff Garzik  *	caller.
1192c6fd2807SJeff Garzik  */
1193c6fd2807SJeff Garzik 
1194c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
1195c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
1196c6fd2807SJeff Garzik {
1197c6fd2807SJeff Garzik 	unsigned int c;
1198c6fd2807SJeff Garzik 
1199c6fd2807SJeff Garzik 	while (len > 0) {
1200c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
1201c6fd2807SJeff Garzik 		*s = c;
1202c6fd2807SJeff Garzik 		s++;
1203c6fd2807SJeff Garzik 
1204c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
1205c6fd2807SJeff Garzik 		*s = c;
1206c6fd2807SJeff Garzik 		s++;
1207c6fd2807SJeff Garzik 
1208c6fd2807SJeff Garzik 		ofs++;
1209c6fd2807SJeff Garzik 		len -= 2;
1210c6fd2807SJeff Garzik 	}
1211c6fd2807SJeff Garzik }
1212c6fd2807SJeff Garzik 
1213c6fd2807SJeff Garzik /**
1214c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1215c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1216c6fd2807SJeff Garzik  *	@s: string into which data is output
1217c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1218c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
1219c6fd2807SJeff Garzik  *
1220c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
1221c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
1222c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
1223c6fd2807SJeff Garzik  *
1224c6fd2807SJeff Garzik  *	LOCKING:
1225c6fd2807SJeff Garzik  *	caller.
1226c6fd2807SJeff Garzik  */
1227c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
1228c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
1229c6fd2807SJeff Garzik {
1230c6fd2807SJeff Garzik 	unsigned char *p;
1231c6fd2807SJeff Garzik 
1232c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
1233c6fd2807SJeff Garzik 
1234c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
1235c6fd2807SJeff Garzik 
1236c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
1237c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
1238c6fd2807SJeff Garzik 		p--;
1239c6fd2807SJeff Garzik 	*p = '\0';
1240c6fd2807SJeff Garzik }
1241c6fd2807SJeff Garzik 
1242db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
1243db6f8759STejun Heo {
1244db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
1245db6f8759STejun Heo 		if (ata_id_has_lba48(id))
1246db6f8759STejun Heo 			return ata_id_u64(id, 100);
1247db6f8759STejun Heo 		else
1248db6f8759STejun Heo 			return ata_id_u32(id, 60);
1249db6f8759STejun Heo 	} else {
1250db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
1251db6f8759STejun Heo 			return ata_id_u32(id, 57);
1252db6f8759STejun Heo 		else
1253db6f8759STejun Heo 			return id[1] * id[3] * id[6];
1254db6f8759STejun Heo 	}
1255db6f8759STejun Heo }
1256db6f8759STejun Heo 
12571e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
12581e999736SAlan Cox {
12591e999736SAlan Cox 	u64 sectors = 0;
12601e999736SAlan Cox 
12611e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
12621e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
12631e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
12641e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
12651e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
12661e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
12671e999736SAlan Cox 
12681e999736SAlan Cox 	return ++sectors;
12691e999736SAlan Cox }
12701e999736SAlan Cox 
12711e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
12721e999736SAlan Cox {
12731e999736SAlan Cox 	u64 sectors = 0;
12741e999736SAlan Cox 
12751e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
12761e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
12771e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
12781e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
12791e999736SAlan Cox 
12801e999736SAlan Cox 	return ++sectors;
12811e999736SAlan Cox }
12821e999736SAlan Cox 
12831e999736SAlan Cox /**
1284c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
1285c728a914STejun Heo  *	@dev: target device
1286c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
12871e999736SAlan Cox  *
1288c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
1289c728a914STejun Heo  *	question.
1290c728a914STejun Heo  *
1291c728a914STejun Heo  *	RETURNS:
1292c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
1293c728a914STejun Heo  *	-EIO on other errors.
12941e999736SAlan Cox  */
1295c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
12961e999736SAlan Cox {
1297c728a914STejun Heo 	unsigned int err_mask;
12981e999736SAlan Cox 	struct ata_taskfile tf;
1299c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
13001e999736SAlan Cox 
13011e999736SAlan Cox 	ata_tf_init(dev, &tf);
13021e999736SAlan Cox 
1303c728a914STejun Heo 	/* always clear all address registers */
13041e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1305c728a914STejun Heo 
1306c728a914STejun Heo 	if (lba48) {
1307c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1308c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
1309c728a914STejun Heo 	} else
1310c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1311c728a914STejun Heo 
13121e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1313c728a914STejun Heo 	tf.device |= ATA_LBA;
13141e999736SAlan Cox 
13152b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1316c728a914STejun Heo 	if (err_mask) {
1317c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1318c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1319c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1320c728a914STejun Heo 			return -EACCES;
1321c728a914STejun Heo 		return -EIO;
1322c728a914STejun Heo 	}
1323c728a914STejun Heo 
1324c728a914STejun Heo 	if (lba48)
1325c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
1326c728a914STejun Heo 	else
1327c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
132893328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
132993328e11SAlan Cox 		(*max_sectors)--;
13301e999736SAlan Cox 	return 0;
13311e999736SAlan Cox }
13321e999736SAlan Cox 
13331e999736SAlan Cox /**
1334c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
1335c728a914STejun Heo  *	@dev: target device
13366b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
13371e999736SAlan Cox  *
1338c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
1339c728a914STejun Heo  *
1340c728a914STejun Heo  *	RETURNS:
1341c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
1342c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1343c728a914STejun Heo  *	errors.
13441e999736SAlan Cox  */
134505027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
13461e999736SAlan Cox {
1347c728a914STejun Heo 	unsigned int err_mask;
13481e999736SAlan Cox 	struct ata_taskfile tf;
1349c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
13501e999736SAlan Cox 
13511e999736SAlan Cox 	new_sectors--;
13521e999736SAlan Cox 
13531e999736SAlan Cox 	ata_tf_init(dev, &tf);
13541e999736SAlan Cox 
1355c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
13561e999736SAlan Cox 
1357c728a914STejun Heo 	if (lba48) {
1358c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
1359c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
13601e999736SAlan Cox 
13611e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
13621e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
13631e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
13641e582ba4STejun Heo 	} else {
13651e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
1366c728a914STejun Heo 
13671e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
13681e582ba4STejun Heo 	}
13691e582ba4STejun Heo 
13701e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1371c728a914STejun Heo 	tf.device |= ATA_LBA;
13721e999736SAlan Cox 
13731e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
13741e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
13751e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
13761e999736SAlan Cox 
13772b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1378c728a914STejun Heo 	if (err_mask) {
1379c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1380c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1381c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
1382c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1383c728a914STejun Heo 			return -EACCES;
1384c728a914STejun Heo 		return -EIO;
1385c728a914STejun Heo 	}
1386c728a914STejun Heo 
13871e999736SAlan Cox 	return 0;
13881e999736SAlan Cox }
13891e999736SAlan Cox 
13901e999736SAlan Cox /**
13911e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
13921e999736SAlan Cox  *	@dev: Device to resize
13931e999736SAlan Cox  *
13941e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
13951e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
13961e999736SAlan Cox  *	the drive has the HPA feature set enabled.
139705027adcSTejun Heo  *
139805027adcSTejun Heo  *	RETURNS:
139905027adcSTejun Heo  *	0 on success, -errno on failure.
14001e999736SAlan Cox  */
140105027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
14021e999736SAlan Cox {
140305027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
140405027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
140505027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
140605027adcSTejun Heo 	u64 native_sectors;
1407c728a914STejun Heo 	int rc;
14081e999736SAlan Cox 
140905027adcSTejun Heo 	/* do we need to do it? */
141005027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
141105027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
141205027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1413c728a914STejun Heo 		return 0;
14141e999736SAlan Cox 
141505027adcSTejun Heo 	/* read native max address */
141605027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
141705027adcSTejun Heo 	if (rc) {
141805027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
141905027adcSTejun Heo 		 * resizing from the next try.
142005027adcSTejun Heo 		 */
142105027adcSTejun Heo 		if (!ata_ignore_hpa) {
142205027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
142305027adcSTejun Heo 				       "broken, will skip HPA handling\n");
142405027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
142505027adcSTejun Heo 
142605027adcSTejun Heo 			/* we can continue if device aborted the command */
142705027adcSTejun Heo 			if (rc == -EACCES)
142805027adcSTejun Heo 				rc = 0;
142905027adcSTejun Heo 		}
143005027adcSTejun Heo 
143105027adcSTejun Heo 		return rc;
143205027adcSTejun Heo 	}
143305027adcSTejun Heo 
143405027adcSTejun Heo 	/* nothing to do? */
143505027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
143605027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
143705027adcSTejun Heo 			return 0;
143805027adcSTejun Heo 
143905027adcSTejun Heo 		if (native_sectors > sectors)
14401e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
144105027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
144205027adcSTejun Heo 				(unsigned long long)sectors,
144305027adcSTejun Heo 				(unsigned long long)native_sectors);
144405027adcSTejun Heo 		else if (native_sectors < sectors)
144505027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
144605027adcSTejun Heo 				"native sectors (%llu) is smaller than "
144705027adcSTejun Heo 				"sectors (%llu)\n",
144805027adcSTejun Heo 				(unsigned long long)native_sectors,
144905027adcSTejun Heo 				(unsigned long long)sectors);
145005027adcSTejun Heo 		return 0;
14511e999736SAlan Cox 	}
145237301a55STejun Heo 
145305027adcSTejun Heo 	/* let's unlock HPA */
145405027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
145505027adcSTejun Heo 	if (rc == -EACCES) {
145605027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
145705027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
145805027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
145905027adcSTejun Heo 			       (unsigned long long)sectors,
146005027adcSTejun Heo 			       (unsigned long long)native_sectors);
146105027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
146205027adcSTejun Heo 		return 0;
146305027adcSTejun Heo 	} else if (rc)
146405027adcSTejun Heo 		return rc;
146505027adcSTejun Heo 
146605027adcSTejun Heo 	/* re-read IDENTIFY data */
146705027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
146805027adcSTejun Heo 	if (rc) {
146905027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
147005027adcSTejun Heo 			       "data after HPA resizing\n");
147105027adcSTejun Heo 		return rc;
147205027adcSTejun Heo 	}
147305027adcSTejun Heo 
147405027adcSTejun Heo 	if (print_info) {
147505027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
147605027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
147705027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
147805027adcSTejun Heo 			(unsigned long long)sectors,
147905027adcSTejun Heo 			(unsigned long long)new_sectors,
148005027adcSTejun Heo 			(unsigned long long)native_sectors);
148105027adcSTejun Heo 	}
148205027adcSTejun Heo 
148305027adcSTejun Heo 	return 0;
14841e999736SAlan Cox }
14851e999736SAlan Cox 
1486c6fd2807SJeff Garzik /**
1487c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1488c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1489c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1490c6fd2807SJeff Garzik  *
1491c6fd2807SJeff Garzik  *	This function performs no actual function.
1492c6fd2807SJeff Garzik  *
1493c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1494c6fd2807SJeff Garzik  *
1495c6fd2807SJeff Garzik  *	LOCKING:
1496c6fd2807SJeff Garzik  *	caller.
1497c6fd2807SJeff Garzik  */
1498c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1499c6fd2807SJeff Garzik {
1500c6fd2807SJeff Garzik }
1501c6fd2807SJeff Garzik 
1502c6fd2807SJeff Garzik 
1503c6fd2807SJeff Garzik /**
1504c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1505c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1506c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1507c6fd2807SJeff Garzik  *
1508c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1509c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1510c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1511c6fd2807SJeff Garzik  *
1512c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1513c6fd2807SJeff Garzik  *
1514c6fd2807SJeff Garzik  *	LOCKING:
1515c6fd2807SJeff Garzik  *	caller.
1516c6fd2807SJeff Garzik  */
1517c6fd2807SJeff Garzik 
1518c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1519c6fd2807SJeff Garzik {
1520c6fd2807SJeff Garzik 	u8 tmp;
1521c6fd2807SJeff Garzik 
1522c6fd2807SJeff Garzik 	if (device == 0)
1523c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1524c6fd2807SJeff Garzik 	else
1525c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1526c6fd2807SJeff Garzik 
15270d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1528c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1529c6fd2807SJeff Garzik }
1530c6fd2807SJeff Garzik 
1531c6fd2807SJeff Garzik /**
1532c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1533c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1534c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1535c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1536c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1537c6fd2807SJeff Garzik  *
1538c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1539c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1540c6fd2807SJeff Garzik  *	ATA channel.
1541c6fd2807SJeff Garzik  *
1542c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1543c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1544c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1545c6fd2807SJeff Garzik  *
1546c6fd2807SJeff Garzik  *	LOCKING:
1547c6fd2807SJeff Garzik  *	caller.
1548c6fd2807SJeff Garzik  */
1549c6fd2807SJeff Garzik 
1550c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1551c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1552c6fd2807SJeff Garzik {
1553c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
155444877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
155544877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1556c6fd2807SJeff Garzik 
1557c6fd2807SJeff Garzik 	if (wait)
1558c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1559c6fd2807SJeff Garzik 
1560c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1561c6fd2807SJeff Garzik 
1562c6fd2807SJeff Garzik 	if (wait) {
15639af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1564c6fd2807SJeff Garzik 			msleep(150);
1565c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1566c6fd2807SJeff Garzik 	}
1567c6fd2807SJeff Garzik }
1568c6fd2807SJeff Garzik 
1569c6fd2807SJeff Garzik /**
1570c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1571c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1572c6fd2807SJeff Garzik  *
1573c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1574c6fd2807SJeff Garzik  *	page.
1575c6fd2807SJeff Garzik  *
1576c6fd2807SJeff Garzik  *	LOCKING:
1577c6fd2807SJeff Garzik  *	caller.
1578c6fd2807SJeff Garzik  */
1579c6fd2807SJeff Garzik 
1580c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1581c6fd2807SJeff Garzik {
1582c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1583c6fd2807SJeff Garzik 		"53==0x%04x  "
1584c6fd2807SJeff Garzik 		"63==0x%04x  "
1585c6fd2807SJeff Garzik 		"64==0x%04x  "
1586c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1587c6fd2807SJeff Garzik 		id[49],
1588c6fd2807SJeff Garzik 		id[53],
1589c6fd2807SJeff Garzik 		id[63],
1590c6fd2807SJeff Garzik 		id[64],
1591c6fd2807SJeff Garzik 		id[75]);
1592c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1593c6fd2807SJeff Garzik 		"81==0x%04x  "
1594c6fd2807SJeff Garzik 		"82==0x%04x  "
1595c6fd2807SJeff Garzik 		"83==0x%04x  "
1596c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1597c6fd2807SJeff Garzik 		id[80],
1598c6fd2807SJeff Garzik 		id[81],
1599c6fd2807SJeff Garzik 		id[82],
1600c6fd2807SJeff Garzik 		id[83],
1601c6fd2807SJeff Garzik 		id[84]);
1602c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1603c6fd2807SJeff Garzik 		"93==0x%04x\n",
1604c6fd2807SJeff Garzik 		id[88],
1605c6fd2807SJeff Garzik 		id[93]);
1606c6fd2807SJeff Garzik }
1607c6fd2807SJeff Garzik 
1608c6fd2807SJeff Garzik /**
1609c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1610c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1611c6fd2807SJeff Garzik  *
1612c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1613c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1614c6fd2807SJeff Garzik  *
1615c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1616c6fd2807SJeff Garzik  *
1617c6fd2807SJeff Garzik  *	LOCKING:
1618c6fd2807SJeff Garzik  *	None.
1619c6fd2807SJeff Garzik  *
1620c6fd2807SJeff Garzik  *	RETURNS:
1621c6fd2807SJeff Garzik  *	Computed xfermask
1622c6fd2807SJeff Garzik  */
16237dc951aeSTejun Heo unsigned long ata_id_xfermask(const u16 *id)
1624c6fd2807SJeff Garzik {
16257dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
1626c6fd2807SJeff Garzik 
1627c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1628c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1629c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1630c6fd2807SJeff Garzik 		pio_mask <<= 3;
1631c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1632c6fd2807SJeff Garzik 	} else {
1633c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1634c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1635c6fd2807SJeff Garzik 		 * a mask.
1636c6fd2807SJeff Garzik 		 */
16377a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
163846767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
163946767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
164046767aebSAlan Cox 		else
164146767aebSAlan Cox 			pio_mask = 1;
1642c6fd2807SJeff Garzik 
1643c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1644c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1645c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1646c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1647c6fd2807SJeff Garzik 		 * will get this right anyway
1648c6fd2807SJeff Garzik 		 */
1649c6fd2807SJeff Garzik 	}
1650c6fd2807SJeff Garzik 
1651c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1652c6fd2807SJeff Garzik 
1653b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1654b352e57dSAlan Cox 		/*
1655b352e57dSAlan Cox 		 *	Process compact flash extended modes
1656b352e57dSAlan Cox 		 */
1657b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1658b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1659b352e57dSAlan Cox 
1660b352e57dSAlan Cox 		if (pio)
1661b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1662b352e57dSAlan Cox 		if (pio > 1)
1663b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1664b352e57dSAlan Cox 		if (dma)
1665b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1666b352e57dSAlan Cox 		if (dma > 1)
1667b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1668b352e57dSAlan Cox 	}
1669b352e57dSAlan Cox 
1670c6fd2807SJeff Garzik 	udma_mask = 0;
1671c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1672c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1673c6fd2807SJeff Garzik 
1674c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1675c6fd2807SJeff Garzik }
1676c6fd2807SJeff Garzik 
1677c6fd2807SJeff Garzik /**
1678442eacc3SJeff Garzik  *	ata_pio_queue_task - Queue port_task
1679c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1680c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
168165f27f38SDavid Howells  *	@data: data for @fn to use
1682c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1683c6fd2807SJeff Garzik  *
1684c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1685c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1686c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1687c6fd2807SJeff Garzik  *	one task is active at any given time.
1688c6fd2807SJeff Garzik  *
1689c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1690442eacc3SJeff Garzik  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1691c6fd2807SJeff Garzik  *	synchronization.
1692c6fd2807SJeff Garzik  *
1693c6fd2807SJeff Garzik  *	LOCKING:
1694c6fd2807SJeff Garzik  *	Inherited from caller.
1695c6fd2807SJeff Garzik  */
1696442eacc3SJeff Garzik static void ata_pio_queue_task(struct ata_port *ap, void *data,
1697c6fd2807SJeff Garzik 			       unsigned long delay)
1698c6fd2807SJeff Garzik {
169965f27f38SDavid Howells 	ap->port_task_data = data;
1700c6fd2807SJeff Garzik 
170145a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
170245a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1703c6fd2807SJeff Garzik }
1704c6fd2807SJeff Garzik 
1705c6fd2807SJeff Garzik /**
1706c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1707c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1708c6fd2807SJeff Garzik  *
1709c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1710c6fd2807SJeff Garzik  *	be running or scheduled.
1711c6fd2807SJeff Garzik  *
1712c6fd2807SJeff Garzik  *	LOCKING:
1713c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1714c6fd2807SJeff Garzik  */
1715c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1716c6fd2807SJeff Garzik {
1717c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1718c6fd2807SJeff Garzik 
171945a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1720c6fd2807SJeff Garzik 
1721c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1722c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1723c6fd2807SJeff Garzik }
1724c6fd2807SJeff Garzik 
17257102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1726c6fd2807SJeff Garzik {
1727c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1728c6fd2807SJeff Garzik 
1729c6fd2807SJeff Garzik 	complete(waiting);
1730c6fd2807SJeff Garzik }
1731c6fd2807SJeff Garzik 
1732c6fd2807SJeff Garzik /**
17332432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1734c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1735c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1736c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1737c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
17385c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
17392432697bSTejun Heo  *	@n_elem: Number of sg entries
17402b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1741c6fd2807SJeff Garzik  *
1742c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1743c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1744c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1745c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1746c6fd2807SJeff Garzik  *	clean up after timeout.
1747c6fd2807SJeff Garzik  *
1748c6fd2807SJeff Garzik  *	LOCKING:
1749c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1750c6fd2807SJeff Garzik  *
1751c6fd2807SJeff Garzik  *	RETURNS:
1752c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1753c6fd2807SJeff Garzik  */
17542432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1755c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
175687260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
17572b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1758c6fd2807SJeff Garzik {
17599af5c9c9STejun Heo 	struct ata_link *link = dev->link;
17609af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1761c6fd2807SJeff Garzik 	u8 command = tf->command;
1762c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1763c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1764c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1765da917d69STejun Heo 	int preempted_nr_active_links;
1766c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1767c6fd2807SJeff Garzik 	unsigned long flags;
1768c6fd2807SJeff Garzik 	unsigned int err_mask;
1769c6fd2807SJeff Garzik 	int rc;
1770c6fd2807SJeff Garzik 
1771c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1772c6fd2807SJeff Garzik 
1773c6fd2807SJeff Garzik 	/* no internal command while frozen */
1774c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1775c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1776c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1777c6fd2807SJeff Garzik 	}
1778c6fd2807SJeff Garzik 
1779c6fd2807SJeff Garzik 	/* initialize internal qc */
1780c6fd2807SJeff Garzik 
1781c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1782c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1783c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1784c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1785c6fd2807SJeff Garzik 	 */
1786c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1787c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1788c6fd2807SJeff Garzik 	else
1789c6fd2807SJeff Garzik 		tag = 0;
1790c6fd2807SJeff Garzik 
1791c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1792c6fd2807SJeff Garzik 		BUG();
1793c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1794c6fd2807SJeff Garzik 
1795c6fd2807SJeff Garzik 	qc->tag = tag;
1796c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1797c6fd2807SJeff Garzik 	qc->ap = ap;
1798c6fd2807SJeff Garzik 	qc->dev = dev;
1799c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1800c6fd2807SJeff Garzik 
18019af5c9c9STejun Heo 	preempted_tag = link->active_tag;
18029af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1803c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1804da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
18059af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
18069af5c9c9STejun Heo 	link->sactive = 0;
1807c6fd2807SJeff Garzik 	ap->qc_active = 0;
1808da917d69STejun Heo 	ap->nr_active_links = 0;
1809c6fd2807SJeff Garzik 
1810c6fd2807SJeff Garzik 	/* prepare & issue qc */
1811c6fd2807SJeff Garzik 	qc->tf = *tf;
1812c6fd2807SJeff Garzik 	if (cdb)
1813c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1814c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1815c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1816c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
18172432697bSTejun Heo 		unsigned int i, buflen = 0;
181887260216SJens Axboe 		struct scatterlist *sg;
18192432697bSTejun Heo 
182087260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
182187260216SJens Axboe 			buflen += sg->length;
18222432697bSTejun Heo 
182387260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
182449c80429SBrian King 		qc->nbytes = buflen;
1825c6fd2807SJeff Garzik 	}
1826c6fd2807SJeff Garzik 
1827c6fd2807SJeff Garzik 	qc->private_data = &wait;
1828c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1829c6fd2807SJeff Garzik 
1830c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1831c6fd2807SJeff Garzik 
1832c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1833c6fd2807SJeff Garzik 
18342b789108STejun Heo 	if (!timeout)
18352b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
18362b789108STejun Heo 
18372b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1838c6fd2807SJeff Garzik 
1839c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1840c6fd2807SJeff Garzik 
1841c6fd2807SJeff Garzik 	if (!rc) {
1842c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1843c6fd2807SJeff Garzik 
1844c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1845c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1846c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1847c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1848c6fd2807SJeff Garzik 		 */
1849c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1850c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1851c6fd2807SJeff Garzik 
1852c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1853c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1854c6fd2807SJeff Garzik 			else
1855c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1856c6fd2807SJeff Garzik 
1857c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1858c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1859c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1860c6fd2807SJeff Garzik 		}
1861c6fd2807SJeff Garzik 
1862c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1863c6fd2807SJeff Garzik 	}
1864c6fd2807SJeff Garzik 
1865c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1866c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1867c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1868c6fd2807SJeff Garzik 
1869a51d644aSTejun Heo 	/* perform minimal error analysis */
1870a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1871a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1872a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1873a51d644aSTejun Heo 
1874a51d644aSTejun Heo 		if (!qc->err_mask)
1875c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1876a51d644aSTejun Heo 
1877a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1878a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1879c6fd2807SJeff Garzik 	}
1880c6fd2807SJeff Garzik 
1881c6fd2807SJeff Garzik 	/* finish up */
1882c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1883c6fd2807SJeff Garzik 
1884c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1885c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1886c6fd2807SJeff Garzik 
1887c6fd2807SJeff Garzik 	ata_qc_free(qc);
18889af5c9c9STejun Heo 	link->active_tag = preempted_tag;
18899af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1890c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1891da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1892c6fd2807SJeff Garzik 
1893c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1894c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1895c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1896c6fd2807SJeff Garzik 	 * port.
1897c6fd2807SJeff Garzik 	 *
1898c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1899c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1900c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1901c6fd2807SJeff Garzik 	 *
1902c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1903c6fd2807SJeff Garzik 	 */
1904c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1905c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1906c6fd2807SJeff Garzik 		ata_port_probe(ap);
1907c6fd2807SJeff Garzik 	}
1908c6fd2807SJeff Garzik 
1909c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1910c6fd2807SJeff Garzik 
1911c6fd2807SJeff Garzik 	return err_mask;
1912c6fd2807SJeff Garzik }
1913c6fd2807SJeff Garzik 
1914c6fd2807SJeff Garzik /**
191533480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
19162432697bSTejun Heo  *	@dev: Device to which the command is sent
19172432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
19182432697bSTejun Heo  *	@cdb: CDB for packet command
19192432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
19202432697bSTejun Heo  *	@buf: Data buffer of the command
19212432697bSTejun Heo  *	@buflen: Length of data buffer
19222b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
19232432697bSTejun Heo  *
19242432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
19252432697bSTejun Heo  *	buffer instead of sg list.
19262432697bSTejun Heo  *
19272432697bSTejun Heo  *	LOCKING:
19282432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
19292432697bSTejun Heo  *
19302432697bSTejun Heo  *	RETURNS:
19312432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
19322432697bSTejun Heo  */
19332432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
19342432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
19352b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
19362b789108STejun Heo 			   unsigned long timeout)
19372432697bSTejun Heo {
193833480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
193933480a0eSTejun Heo 	unsigned int n_elem = 0;
19402432697bSTejun Heo 
194133480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
194233480a0eSTejun Heo 		WARN_ON(!buf);
19432432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
194433480a0eSTejun Heo 		psg = &sg;
194533480a0eSTejun Heo 		n_elem++;
194633480a0eSTejun Heo 	}
19472432697bSTejun Heo 
19482b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
19492b789108STejun Heo 				    timeout);
19502432697bSTejun Heo }
19512432697bSTejun Heo 
19522432697bSTejun Heo /**
1953c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1954c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1955c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1956c6fd2807SJeff Garzik  *
1957c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1958c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1959c6fd2807SJeff Garzik  *
1960c6fd2807SJeff Garzik  *	LOCKING:
1961c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1962c6fd2807SJeff Garzik  *
1963c6fd2807SJeff Garzik  *	RETURNS:
1964c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1965c6fd2807SJeff Garzik  */
1966c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1967c6fd2807SJeff Garzik {
1968c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1969c6fd2807SJeff Garzik 
1970c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1971c6fd2807SJeff Garzik 
1972c6fd2807SJeff Garzik 	tf.command = cmd;
1973c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1974c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1975c6fd2807SJeff Garzik 
19762b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1977c6fd2807SJeff Garzik }
1978c6fd2807SJeff Garzik 
1979c6fd2807SJeff Garzik /**
1980c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1981c6fd2807SJeff Garzik  *	@adev: ATA device
1982c6fd2807SJeff Garzik  *
1983c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1984c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1985c6fd2807SJeff Garzik  */
1986c6fd2807SJeff Garzik 
1987c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1988c6fd2807SJeff Garzik {
1989432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1990432729f0SAlan Cox 	   as the caller should know this */
19919af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1992c6fd2807SJeff Garzik 		return 0;
1993432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1994432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1995c6fd2807SJeff Garzik 		return 1;
1996432729f0SAlan Cox 	/* We turn it on when possible */
1997432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1998432729f0SAlan Cox 		return 1;
1999432729f0SAlan Cox 	return 0;
2000432729f0SAlan Cox }
2001c6fd2807SJeff Garzik 
2002432729f0SAlan Cox /**
2003432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
2004432729f0SAlan Cox  *	@adev: ATA device
2005432729f0SAlan Cox  *
2006432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
2007432729f0SAlan Cox  *	-1 if no iordy mode is available.
2008432729f0SAlan Cox  */
2009432729f0SAlan Cox 
2010432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2011432729f0SAlan Cox {
2012c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
2013c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
2014432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
2015c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
2016c6fd2807SJeff Garzik 		if (pio) {
2017c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
2018c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
2019432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
2020432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
2021c6fd2807SJeff Garzik 		}
2022c6fd2807SJeff Garzik 	}
2023432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
2024c6fd2807SJeff Garzik }
2025c6fd2807SJeff Garzik 
2026c6fd2807SJeff Garzik /**
2027c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
2028c6fd2807SJeff Garzik  *	@dev: target device
2029c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
2030bff04647STejun Heo  *	@flags: ATA_READID_* flags
2031c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
2032c6fd2807SJeff Garzik  *
2033c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
2034c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2035c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
2036c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
2037c6fd2807SJeff Garzik  *
203850a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
203950a99018SAlan Cox  *	now we abort if we hit that case.
204050a99018SAlan Cox  *
2041c6fd2807SJeff Garzik  *	LOCKING:
2042c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2043c6fd2807SJeff Garzik  *
2044c6fd2807SJeff Garzik  *	RETURNS:
2045c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
2046c6fd2807SJeff Garzik  */
2047c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2048bff04647STejun Heo 		    unsigned int flags, u16 *id)
2049c6fd2807SJeff Garzik {
20509af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2051c6fd2807SJeff Garzik 	unsigned int class = *p_class;
2052c6fd2807SJeff Garzik 	struct ata_taskfile tf;
2053c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
2054c6fd2807SJeff Garzik 	const char *reason;
205554936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
2056c6fd2807SJeff Garzik 	int rc;
2057c6fd2807SJeff Garzik 
2058c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
205944877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2060c6fd2807SJeff Garzik 
2061c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
2062c6fd2807SJeff Garzik  retry:
2063c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
2064c6fd2807SJeff Garzik 
2065c6fd2807SJeff Garzik 	switch (class) {
2066c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
2067c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
2068c6fd2807SJeff Garzik 		break;
2069c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
2070c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
2071c6fd2807SJeff Garzik 		break;
2072c6fd2807SJeff Garzik 	default:
2073c6fd2807SJeff Garzik 		rc = -ENODEV;
2074c6fd2807SJeff Garzik 		reason = "unsupported class";
2075c6fd2807SJeff Garzik 		goto err_out;
2076c6fd2807SJeff Garzik 	}
2077c6fd2807SJeff Garzik 
2078c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
207981afe893STejun Heo 
208081afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
208181afe893STejun Heo 	 * sure those are properly initialized.
208281afe893STejun Heo 	 */
208381afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
208481afe893STejun Heo 
208581afe893STejun Heo 	/* Device presence detection is unreliable on some
208681afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
208781afe893STejun Heo 	 */
208881afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
2089c6fd2807SJeff Garzik 
2090c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
20912b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2092c6fd2807SJeff Garzik 	if (err_mask) {
2093800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
209455a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
209544877b4eSTejun Heo 				ap->print_id, dev->devno);
209655a8e2c8STejun Heo 			return -ENOENT;
209755a8e2c8STejun Heo 		}
209855a8e2c8STejun Heo 
209954936f8bSTejun Heo 		/* Device or controller might have reported the wrong
210054936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
210154936f8bSTejun Heo 		 * the current one is aborted by the device.
210254936f8bSTejun Heo 		 */
210354936f8bSTejun Heo 		if (may_fallback &&
210454936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
210554936f8bSTejun Heo 			may_fallback = 0;
210654936f8bSTejun Heo 
210754936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
210854936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
210954936f8bSTejun Heo 			else
211054936f8bSTejun Heo 				class = ATA_DEV_ATA;
211154936f8bSTejun Heo 			goto retry;
211254936f8bSTejun Heo 		}
211354936f8bSTejun Heo 
2114c6fd2807SJeff Garzik 		rc = -EIO;
2115c6fd2807SJeff Garzik 		reason = "I/O error";
2116c6fd2807SJeff Garzik 		goto err_out;
2117c6fd2807SJeff Garzik 	}
2118c6fd2807SJeff Garzik 
211954936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
212054936f8bSTejun Heo 	 * successfully at least once.
212154936f8bSTejun Heo 	 */
212254936f8bSTejun Heo 	may_fallback = 0;
212354936f8bSTejun Heo 
2124c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
2125c6fd2807SJeff Garzik 
2126c6fd2807SJeff Garzik 	/* sanity check */
2127c6fd2807SJeff Garzik 	rc = -EINVAL;
21286070068bSAlan Cox 	reason = "device reports invalid type";
21294a3381feSJeff Garzik 
21304a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
21314a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
21324a3381feSJeff Garzik 			goto err_out;
21334a3381feSJeff Garzik 	} else {
21344a3381feSJeff Garzik 		if (ata_id_is_ata(id))
2135c6fd2807SJeff Garzik 			goto err_out;
2136c6fd2807SJeff Garzik 	}
2137c6fd2807SJeff Garzik 
2138169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2139169439c2SMark Lord 		tried_spinup = 1;
2140169439c2SMark Lord 		/*
2141169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
2142169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
2143169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
2144169439c2SMark Lord 		 */
2145218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2146fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
2147169439c2SMark Lord 			rc = -EIO;
2148169439c2SMark Lord 			reason = "SPINUP failed";
2149169439c2SMark Lord 			goto err_out;
2150169439c2SMark Lord 		}
2151169439c2SMark Lord 		/*
2152169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
2153169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
2154169439c2SMark Lord 		 */
2155169439c2SMark Lord 		if (id[2] == 0x37c8)
2156169439c2SMark Lord 			goto retry;
2157169439c2SMark Lord 	}
2158169439c2SMark Lord 
2159bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2160c6fd2807SJeff Garzik 		/*
2161c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
2162c6fd2807SJeff Garzik 		 * SRST RESET
216350a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
216450a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2165c6fd2807SJeff Garzik 		 * anything else..
2166c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
216750a99018SAlan Cox 		 *
216850a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
216950a99018SAlan Cox 		 * shoud never trigger.
2170c6fd2807SJeff Garzik 		 */
2171c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2172c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2173c6fd2807SJeff Garzik 			if (err_mask) {
2174c6fd2807SJeff Garzik 				rc = -EIO;
2175c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
2176c6fd2807SJeff Garzik 				goto err_out;
2177c6fd2807SJeff Garzik 			}
2178c6fd2807SJeff Garzik 
2179c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
2180c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
2181c6fd2807SJeff Garzik 			 */
2182bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
2183c6fd2807SJeff Garzik 			goto retry;
2184c6fd2807SJeff Garzik 		}
2185c6fd2807SJeff Garzik 	}
2186c6fd2807SJeff Garzik 
2187c6fd2807SJeff Garzik 	*p_class = class;
2188c6fd2807SJeff Garzik 
2189c6fd2807SJeff Garzik 	return 0;
2190c6fd2807SJeff Garzik 
2191c6fd2807SJeff Garzik  err_out:
2192c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
2193c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2194c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2195c6fd2807SJeff Garzik 	return rc;
2196c6fd2807SJeff Garzik }
2197c6fd2807SJeff Garzik 
2198c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
2199c6fd2807SJeff Garzik {
22009af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
22019af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2202c6fd2807SJeff Garzik }
2203c6fd2807SJeff Garzik 
2204c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
2205c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
2206c6fd2807SJeff Garzik {
22079af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2208c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2209c6fd2807SJeff Garzik 
2210c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
2211c6fd2807SJeff Garzik 		desc[0] = '\0';
2212c6fd2807SJeff Garzik 		return;
2213c6fd2807SJeff Garzik 	}
221475683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
22156919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
22166919a0a6SAlan Cox 		return;
22176919a0a6SAlan Cox 	}
2218c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
2219cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2220c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
2221c6fd2807SJeff Garzik 	}
2222c6fd2807SJeff Garzik 
2223c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
2224c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2225c6fd2807SJeff Garzik 	else
2226c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2227c6fd2807SJeff Garzik }
2228c6fd2807SJeff Garzik 
2229c6fd2807SJeff Garzik /**
2230c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2231c6fd2807SJeff Garzik  *	@dev: Target device to configure
2232c6fd2807SJeff Garzik  *
2233c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
2234c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
2235c6fd2807SJeff Garzik  *
2236c6fd2807SJeff Garzik  *	LOCKING:
2237c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2238c6fd2807SJeff Garzik  *
2239c6fd2807SJeff Garzik  *	RETURNS:
2240c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
2241c6fd2807SJeff Garzik  */
2242efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
2243c6fd2807SJeff Garzik {
22449af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
22459af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
22466746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2247c6fd2807SJeff Garzik 	const u16 *id = dev->id;
22487dc951aeSTejun Heo 	unsigned long xfer_mask;
2249b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
22503f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
22513f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
2252c6fd2807SJeff Garzik 	int rc;
2253c6fd2807SJeff Garzik 
2254c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
225544877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
225644877b4eSTejun Heo 			       __FUNCTION__);
2257c6fd2807SJeff Garzik 		return 0;
2258c6fd2807SJeff Garzik 	}
2259c6fd2807SJeff Garzik 
2260c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
226144877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2262c6fd2807SJeff Garzik 
226375683fe7STejun Heo 	/* set horkage */
226475683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
226533267325STejun Heo 	ata_force_horkage(dev);
226675683fe7STejun Heo 
22676746544cSTejun Heo 	/* let ACPI work its magic */
22686746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
22696746544cSTejun Heo 	if (rc)
22706746544cSTejun Heo 		return rc;
227108573a86SKristen Carlson Accardi 
227205027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
227305027adcSTejun Heo 	rc = ata_hpa_resize(dev);
227405027adcSTejun Heo 	if (rc)
227505027adcSTejun Heo 		return rc;
227605027adcSTejun Heo 
2277c6fd2807SJeff Garzik 	/* print device capabilities */
2278c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2279c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2280c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2281c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2282c6fd2807SJeff Garzik 			       __FUNCTION__,
2283c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
2284c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
2285c6fd2807SJeff Garzik 
2286c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
2287c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2288c6fd2807SJeff Garzik 	dev->max_sectors = 0;
2289c6fd2807SJeff Garzik 	dev->cdb_len = 0;
2290c6fd2807SJeff Garzik 	dev->n_sectors = 0;
2291c6fd2807SJeff Garzik 	dev->cylinders = 0;
2292c6fd2807SJeff Garzik 	dev->heads = 0;
2293c6fd2807SJeff Garzik 	dev->sectors = 0;
2294c6fd2807SJeff Garzik 
2295c6fd2807SJeff Garzik 	/*
2296c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
2297c6fd2807SJeff Garzik 	 */
2298c6fd2807SJeff Garzik 
2299c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
2300c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
2301c6fd2807SJeff Garzik 
2302c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2303c6fd2807SJeff Garzik 		ata_dump_id(id);
2304c6fd2807SJeff Garzik 
2305ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2306ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2307ef143d57SAlbert Lee 			sizeof(fwrevbuf));
2308ef143d57SAlbert Lee 
2309ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2310ef143d57SAlbert Lee 			sizeof(modelbuf));
2311ef143d57SAlbert Lee 
2312c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
2313c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
2314b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
2315b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
231644877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
231744877b4eSTejun Heo 					       "supports DRM functions and may "
231844877b4eSTejun Heo 					       "not be fully accessable.\n");
2319b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
2320ae8d4ee7SAlan Cox 		} else {
2321b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2322ae8d4ee7SAlan Cox 			/* Warn the user if the device has TPM extensions */
2323ae8d4ee7SAlan Cox 			if (ata_id_has_tpm(id))
2324ae8d4ee7SAlan Cox 				ata_dev_printk(dev, KERN_WARNING,
2325ae8d4ee7SAlan Cox 					       "supports DRM functions and may "
2326ae8d4ee7SAlan Cox 					       "not be fully accessable.\n");
2327ae8d4ee7SAlan Cox 		}
2328b352e57dSAlan Cox 
2329c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
2330c6fd2807SJeff Garzik 
23313f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
23323f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
23333f64f565SEric D. Mudama 
2334c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
2335c6fd2807SJeff Garzik 			const char *lba_desc;
2336c6fd2807SJeff Garzik 			char ncq_desc[20];
2337c6fd2807SJeff Garzik 
2338c6fd2807SJeff Garzik 			lba_desc = "LBA";
2339c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
2340c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
2341c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
2342c6fd2807SJeff Garzik 				lba_desc = "LBA48";
23436fc49adbSTejun Heo 
23446fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
23456fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
23466fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2347c6fd2807SJeff Garzik 			}
2348c6fd2807SJeff Garzik 
2349c6fd2807SJeff Garzik 			/* config NCQ */
2350c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2351c6fd2807SJeff Garzik 
2352c6fd2807SJeff Garzik 			/* print device info to dmesg */
23533f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
23543f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
23553f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
23563f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
23573f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
23583f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
23593f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2360c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
23613f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
23623f64f565SEric D. Mudama 			}
2363c6fd2807SJeff Garzik 		} else {
2364c6fd2807SJeff Garzik 			/* CHS */
2365c6fd2807SJeff Garzik 
2366c6fd2807SJeff Garzik 			/* Default translation */
2367c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2368c6fd2807SJeff Garzik 			dev->heads	= id[3];
2369c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2370c6fd2807SJeff Garzik 
2371c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2372c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2373c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2374c6fd2807SJeff Garzik 				dev->heads     = id[55];
2375c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2376c6fd2807SJeff Garzik 			}
2377c6fd2807SJeff Garzik 
2378c6fd2807SJeff Garzik 			/* print device info to dmesg */
23793f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2380c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
23813f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
23823f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
23833f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
23843f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
23853f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
23863f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
23873f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
23883f64f565SEric D. Mudama 					dev->heads, dev->sectors);
23893f64f565SEric D. Mudama 			}
2390c6fd2807SJeff Garzik 		}
2391c6fd2807SJeff Garzik 
2392c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2393c6fd2807SJeff Garzik 	}
2394c6fd2807SJeff Garzik 
2395c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2396c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2397854c73a2STejun Heo 		const char *cdb_intr_string = "";
2398854c73a2STejun Heo 		const char *atapi_an_string = "";
239991163006STejun Heo 		const char *dma_dir_string = "";
24007d77b247STejun Heo 		u32 sntf;
2401c6fd2807SJeff Garzik 
2402c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2403c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2404c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2405c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2406c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2407c6fd2807SJeff Garzik 			rc = -EINVAL;
2408c6fd2807SJeff Garzik 			goto err_out_nosup;
2409c6fd2807SJeff Garzik 		}
2410c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2411c6fd2807SJeff Garzik 
24127d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
24137d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
24147d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
24157d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
24169f45cbd3SKristen Carlson Accardi 		 */
24177d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
24187d77b247STejun Heo 		    (!ap->nr_pmp_links ||
24197d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2420854c73a2STejun Heo 			unsigned int err_mask;
2421854c73a2STejun Heo 
24229f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2423218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2424218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2425854c73a2STejun Heo 			if (err_mask)
24269f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2427854c73a2STejun Heo 					"failed to enable ATAPI AN "
2428854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2429854c73a2STejun Heo 			else {
24309f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2431854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2432854c73a2STejun Heo 			}
24339f45cbd3SKristen Carlson Accardi 		}
24349f45cbd3SKristen Carlson Accardi 
2435c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2436c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2437c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2438c6fd2807SJeff Garzik 		}
2439c6fd2807SJeff Garzik 
244091163006STejun Heo 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
244191163006STejun Heo 			dev->flags |= ATA_DFLAG_DMADIR;
244291163006STejun Heo 			dma_dir_string = ", DMADIR";
244391163006STejun Heo 		}
244491163006STejun Heo 
2445c6fd2807SJeff Garzik 		/* print device info to dmesg */
2446c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2447ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
244891163006STejun Heo 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2449ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2450c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
245191163006STejun Heo 				       cdb_intr_string, atapi_an_string,
245291163006STejun Heo 				       dma_dir_string);
2453c6fd2807SJeff Garzik 	}
2454c6fd2807SJeff Garzik 
2455914ed354STejun Heo 	/* determine max_sectors */
2456914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2457914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2458914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2459914ed354STejun Heo 
2460ca77329fSKristen Carlson Accardi 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2461ca77329fSKristen Carlson Accardi 		if (ata_id_has_hipm(dev->id))
2462ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_HIPM;
2463ca77329fSKristen Carlson Accardi 		if (ata_id_has_dipm(dev->id))
2464ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_DIPM;
2465ca77329fSKristen Carlson Accardi 	}
2466ca77329fSKristen Carlson Accardi 
2467c5038fc0SAlan Cox 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2468c5038fc0SAlan Cox 	   200 sectors */
2469c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2470c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2471c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2472c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2473c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2474c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2475c6fd2807SJeff Garzik 	}
2476c6fd2807SJeff Garzik 
2477f8d8e579STony Battersby 	if ((dev->class == ATA_DEV_ATAPI) &&
2478f442cd86SAlbert Lee 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2479f8d8e579STony Battersby 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2480f442cd86SAlbert Lee 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2481f442cd86SAlbert Lee 	}
2482f8d8e579STony Battersby 
248375683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
248403ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
248503ec52deSTejun Heo 					 dev->max_sectors);
248618d6e9d5SAlbert Lee 
2487ca77329fSKristen Carlson Accardi 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2488ca77329fSKristen Carlson Accardi 		dev->horkage |= ATA_HORKAGE_IPM;
2489ca77329fSKristen Carlson Accardi 
2490ca77329fSKristen Carlson Accardi 		/* reset link pm_policy for this port to no pm */
2491ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
2492ca77329fSKristen Carlson Accardi 	}
2493ca77329fSKristen Carlson Accardi 
2494c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2495cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2496c6fd2807SJeff Garzik 
2497c5038fc0SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2498c5038fc0SAlan Cox 		/* Let the user know. We don't want to disallow opens for
2499c5038fc0SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
2500c5038fc0SAlan Cox 		   idiot. Do this after the dev_config call as some controllers
2501c5038fc0SAlan Cox 		   with buggy firmware may want to avoid reporting false device
2502c5038fc0SAlan Cox 		   bugs */
2503c5038fc0SAlan Cox 
2504c5038fc0SAlan Cox 		if (print_info) {
2505c5038fc0SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
2506c5038fc0SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
2507c5038fc0SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
2508c5038fc0SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
2509c5038fc0SAlan Cox 		}
2510c5038fc0SAlan Cox 	}
2511c5038fc0SAlan Cox 
2512c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2513c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2514c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2515c6fd2807SJeff Garzik 	return 0;
2516c6fd2807SJeff Garzik 
2517c6fd2807SJeff Garzik err_out_nosup:
2518c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2519c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2520c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2521c6fd2807SJeff Garzik 	return rc;
2522c6fd2807SJeff Garzik }
2523c6fd2807SJeff Garzik 
2524c6fd2807SJeff Garzik /**
25252e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2526be0d18dfSAlan Cox  *	@ap: port
2527be0d18dfSAlan Cox  *
25282e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2529be0d18dfSAlan Cox  *	detection.
2530be0d18dfSAlan Cox  */
2531be0d18dfSAlan Cox 
2532be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2533be0d18dfSAlan Cox {
2534be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2535be0d18dfSAlan Cox }
2536be0d18dfSAlan Cox 
2537be0d18dfSAlan Cox /**
25382e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2539be0d18dfSAlan Cox  *	@ap: port
2540be0d18dfSAlan Cox  *
25412e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2542be0d18dfSAlan Cox  *	detection.
2543be0d18dfSAlan Cox  */
2544be0d18dfSAlan Cox 
2545be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2546be0d18dfSAlan Cox {
2547be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2548be0d18dfSAlan Cox }
2549be0d18dfSAlan Cox 
2550be0d18dfSAlan Cox /**
2551be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2552be0d18dfSAlan Cox  *	@ap: port
2553be0d18dfSAlan Cox  *
2554be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2555be0d18dfSAlan Cox  */
2556be0d18dfSAlan Cox 
2557be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2558be0d18dfSAlan Cox {
2559be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2560be0d18dfSAlan Cox }
2561be0d18dfSAlan Cox 
2562be0d18dfSAlan Cox /**
2563c88f90c3STejun Heo  *	ata_cable_ignore	-	return ignored PATA cable.
2564c88f90c3STejun Heo  *	@ap: port
2565c88f90c3STejun Heo  *
2566c88f90c3STejun Heo  *	Helper method for drivers which don't use cable type to limit
2567c88f90c3STejun Heo  *	transfer mode.
2568c88f90c3STejun Heo  */
2569c88f90c3STejun Heo int ata_cable_ignore(struct ata_port *ap)
2570c88f90c3STejun Heo {
2571c88f90c3STejun Heo 	return ATA_CBL_PATA_IGN;
2572c88f90c3STejun Heo }
2573c88f90c3STejun Heo 
2574c88f90c3STejun Heo /**
2575be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2576be0d18dfSAlan Cox  *	@ap: port
2577be0d18dfSAlan Cox  *
2578be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2579be0d18dfSAlan Cox  */
2580be0d18dfSAlan Cox 
2581be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2582be0d18dfSAlan Cox {
2583be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2584be0d18dfSAlan Cox }
2585be0d18dfSAlan Cox 
2586be0d18dfSAlan Cox /**
2587c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2588c6fd2807SJeff Garzik  *	@ap: Bus to probe
2589c6fd2807SJeff Garzik  *
2590c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2591c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2592c6fd2807SJeff Garzik  *	the bus.
2593c6fd2807SJeff Garzik  *
2594c6fd2807SJeff Garzik  *	LOCKING:
2595c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2596c6fd2807SJeff Garzik  *
2597c6fd2807SJeff Garzik  *	RETURNS:
2598c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2599c6fd2807SJeff Garzik  */
2600c6fd2807SJeff Garzik 
2601c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2602c6fd2807SJeff Garzik {
2603c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2604c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2605f58229f8STejun Heo 	int rc;
2606c6fd2807SJeff Garzik 	struct ata_device *dev;
2607c6fd2807SJeff Garzik 
2608c6fd2807SJeff Garzik 	ata_port_probe(ap);
2609c6fd2807SJeff Garzik 
2610f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2611f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2612c6fd2807SJeff Garzik 
2613c6fd2807SJeff Garzik  retry:
2614cdeab114STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2615cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2616cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2617cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2618cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2619cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2620cdeab114STejun Heo 		 * bus as we may be talking too fast.
2621cdeab114STejun Heo 		 */
2622cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2623cdeab114STejun Heo 
2624cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2625cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2626cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2627cdeab114STejun Heo 		 * configuring devices.
2628cdeab114STejun Heo 		 */
2629cdeab114STejun Heo 		if (ap->ops->set_piomode)
2630cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2631cdeab114STejun Heo 	}
2632cdeab114STejun Heo 
2633c6fd2807SJeff Garzik 	/* reset and determine device classes */
2634c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2635c6fd2807SJeff Garzik 
2636f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2637c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2638c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2639c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2640c6fd2807SJeff Garzik 		else
2641c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2642c6fd2807SJeff Garzik 
2643c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2644c6fd2807SJeff Garzik 	}
2645c6fd2807SJeff Garzik 
2646c6fd2807SJeff Garzik 	ata_port_probe(ap);
2647c6fd2807SJeff Garzik 
2648f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2649f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2650f31f0cc2SJeff Garzik 	   the slave device */
2651f31f0cc2SJeff Garzik 
2652f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2653f58229f8STejun Heo 		if (tries[dev->devno])
2654f58229f8STejun Heo 			dev->class = classes[dev->devno];
2655c6fd2807SJeff Garzik 
2656c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2657c6fd2807SJeff Garzik 			continue;
2658c6fd2807SJeff Garzik 
2659bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2660bff04647STejun Heo 				     dev->id);
2661c6fd2807SJeff Garzik 		if (rc)
2662c6fd2807SJeff Garzik 			goto fail;
2663f31f0cc2SJeff Garzik 	}
2664f31f0cc2SJeff Garzik 
2665be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2666be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2667be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2668be0d18dfSAlan Cox 
2669614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2670614fe29bSAlan Cox 	   reported cable types and sensed types */
2671614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2672614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2673614fe29bSAlan Cox 			continue;
2674614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2675614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2676614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2677614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2678614fe29bSAlan Cox 	}
2679614fe29bSAlan Cox 
2680f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2681f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2682f31f0cc2SJeff Garzik 
2683f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2684f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2685f31f0cc2SJeff Garzik 			continue;
2686c6fd2807SJeff Garzik 
26879af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2688efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
26899af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2690c6fd2807SJeff Garzik 		if (rc)
2691c6fd2807SJeff Garzik 			goto fail;
2692c6fd2807SJeff Garzik 	}
2693c6fd2807SJeff Garzik 
2694c6fd2807SJeff Garzik 	/* configure transfer mode */
26950260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
26964ae72a1eSTejun Heo 	if (rc)
2697c6fd2807SJeff Garzik 		goto fail;
2698c6fd2807SJeff Garzik 
2699f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2700f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2701c6fd2807SJeff Garzik 			return 0;
2702c6fd2807SJeff Garzik 
2703c6fd2807SJeff Garzik 	/* no device present, disable port */
2704c6fd2807SJeff Garzik 	ata_port_disable(ap);
2705c6fd2807SJeff Garzik 	return -ENODEV;
2706c6fd2807SJeff Garzik 
2707c6fd2807SJeff Garzik  fail:
27084ae72a1eSTejun Heo 	tries[dev->devno]--;
27094ae72a1eSTejun Heo 
2710c6fd2807SJeff Garzik 	switch (rc) {
2711c6fd2807SJeff Garzik 	case -EINVAL:
27124ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2713c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2714c6fd2807SJeff Garzik 		break;
27154ae72a1eSTejun Heo 
27164ae72a1eSTejun Heo 	case -ENODEV:
27174ae72a1eSTejun Heo 		/* give it just one more chance */
27184ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2719c6fd2807SJeff Garzik 	case -EIO:
27204ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
27214ae72a1eSTejun Heo 			/* This is the last chance, better to slow
27224ae72a1eSTejun Heo 			 * down than lose it.
27234ae72a1eSTejun Heo 			 */
2724936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
27254ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
27264ae72a1eSTejun Heo 		}
2727c6fd2807SJeff Garzik 	}
2728c6fd2807SJeff Garzik 
27294ae72a1eSTejun Heo 	if (!tries[dev->devno])
2730c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2731c6fd2807SJeff Garzik 
2732c6fd2807SJeff Garzik 	goto retry;
2733c6fd2807SJeff Garzik }
2734c6fd2807SJeff Garzik 
2735c6fd2807SJeff Garzik /**
2736c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2737c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2738c6fd2807SJeff Garzik  *
2739c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2740c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2741c6fd2807SJeff Garzik  *
2742cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2743c6fd2807SJeff Garzik  *	serialization.
2744c6fd2807SJeff Garzik  */
2745c6fd2807SJeff Garzik 
2746c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2747c6fd2807SJeff Garzik {
2748c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2749c6fd2807SJeff Garzik }
2750c6fd2807SJeff Garzik 
2751c6fd2807SJeff Garzik /**
2752c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2753936fd732STejun Heo  *	@link: SATA link to printk link status about
2754c6fd2807SJeff Garzik  *
2755c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2756c6fd2807SJeff Garzik  *
2757c6fd2807SJeff Garzik  *	LOCKING:
2758c6fd2807SJeff Garzik  *	None.
2759c6fd2807SJeff Garzik  */
2760936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2761c6fd2807SJeff Garzik {
2762c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2763c6fd2807SJeff Garzik 
2764936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2765c6fd2807SJeff Garzik 		return;
2766936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2767c6fd2807SJeff Garzik 
2768936fd732STejun Heo 	if (ata_link_online(link)) {
2769c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2770936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2771c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2772c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2773c6fd2807SJeff Garzik 	} else {
2774936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2775c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2776c6fd2807SJeff Garzik 				sstatus, scontrol);
2777c6fd2807SJeff Garzik 	}
2778c6fd2807SJeff Garzik }
2779c6fd2807SJeff Garzik 
2780c6fd2807SJeff Garzik /**
2781c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2782c6fd2807SJeff Garzik  *	@adev: device
2783c6fd2807SJeff Garzik  *
2784c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2785c6fd2807SJeff Garzik  *	present NULL is returned
2786c6fd2807SJeff Garzik  */
2787c6fd2807SJeff Garzik 
2788c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2789c6fd2807SJeff Garzik {
27909af5c9c9STejun Heo 	struct ata_link *link = adev->link;
27919af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2792c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2793c6fd2807SJeff Garzik 		return NULL;
2794c6fd2807SJeff Garzik 	return pair;
2795c6fd2807SJeff Garzik }
2796c6fd2807SJeff Garzik 
2797c6fd2807SJeff Garzik /**
2798c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2799c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2800c6fd2807SJeff Garzik  *
2801c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2802c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2803c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2804c6fd2807SJeff Garzik  *	on this port.
2805c6fd2807SJeff Garzik  *
2806cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2807c6fd2807SJeff Garzik  *	serialization.
2808c6fd2807SJeff Garzik  */
2809c6fd2807SJeff Garzik 
2810c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2811c6fd2807SJeff Garzik {
28129af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
28139af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2814c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2815c6fd2807SJeff Garzik }
2816c6fd2807SJeff Garzik 
2817c6fd2807SJeff Garzik /**
2818c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2819936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2820c6fd2807SJeff Garzik  *
2821936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2822c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2823c6fd2807SJeff Garzik  *	using sata_set_spd().
2824c6fd2807SJeff Garzik  *
2825c6fd2807SJeff Garzik  *	LOCKING:
2826c6fd2807SJeff Garzik  *	Inherited from caller.
2827c6fd2807SJeff Garzik  *
2828c6fd2807SJeff Garzik  *	RETURNS:
2829c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2830c6fd2807SJeff Garzik  */
2831936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2832c6fd2807SJeff Garzik {
2833c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2834c6fd2807SJeff Garzik 	int rc, highbit;
2835c6fd2807SJeff Garzik 
2836936fd732STejun Heo 	if (!sata_scr_valid(link))
2837008a7896STejun Heo 		return -EOPNOTSUPP;
2838008a7896STejun Heo 
2839008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2840936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2841008a7896STejun Heo 	 */
2842936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2843008a7896STejun Heo 	if (rc == 0)
2844008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2845008a7896STejun Heo 	else
2846936fd732STejun Heo 		spd = link->sata_spd;
2847c6fd2807SJeff Garzik 
2848936fd732STejun Heo 	mask = link->sata_spd_limit;
2849c6fd2807SJeff Garzik 	if (mask <= 1)
2850c6fd2807SJeff Garzik 		return -EINVAL;
2851008a7896STejun Heo 
2852008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2853c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2854c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2855c6fd2807SJeff Garzik 
2856008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2857008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2858008a7896STejun Heo 	 */
2859008a7896STejun Heo 	if (spd > 1)
2860008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2861008a7896STejun Heo 	else
2862008a7896STejun Heo 		mask &= 1;
2863008a7896STejun Heo 
2864008a7896STejun Heo 	/* were we already at the bottom? */
2865c6fd2807SJeff Garzik 	if (!mask)
2866c6fd2807SJeff Garzik 		return -EINVAL;
2867c6fd2807SJeff Garzik 
2868936fd732STejun Heo 	link->sata_spd_limit = mask;
2869c6fd2807SJeff Garzik 
2870936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2871c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2872c6fd2807SJeff Garzik 
2873c6fd2807SJeff Garzik 	return 0;
2874c6fd2807SJeff Garzik }
2875c6fd2807SJeff Garzik 
2876936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2877c6fd2807SJeff Garzik {
28785270222fSTejun Heo 	struct ata_link *host_link = &link->ap->link;
28795270222fSTejun Heo 	u32 limit, target, spd;
2880c6fd2807SJeff Garzik 
28815270222fSTejun Heo 	limit = link->sata_spd_limit;
28825270222fSTejun Heo 
28835270222fSTejun Heo 	/* Don't configure downstream link faster than upstream link.
28845270222fSTejun Heo 	 * It doesn't speed up anything and some PMPs choke on such
28855270222fSTejun Heo 	 * configuration.
28865270222fSTejun Heo 	 */
28875270222fSTejun Heo 	if (!ata_is_host_link(link) && host_link->sata_spd)
28885270222fSTejun Heo 		limit &= (1 << host_link->sata_spd) - 1;
28895270222fSTejun Heo 
28905270222fSTejun Heo 	if (limit == UINT_MAX)
28915270222fSTejun Heo 		target = 0;
2892c6fd2807SJeff Garzik 	else
28935270222fSTejun Heo 		target = fls(limit);
2894c6fd2807SJeff Garzik 
2895c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
28965270222fSTejun Heo 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2897c6fd2807SJeff Garzik 
28985270222fSTejun Heo 	return spd != target;
2899c6fd2807SJeff Garzik }
2900c6fd2807SJeff Garzik 
2901c6fd2807SJeff Garzik /**
2902c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2903936fd732STejun Heo  *	@link: Link in question
2904c6fd2807SJeff Garzik  *
2905c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2906936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2907c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2908c6fd2807SJeff Garzik  *	configuration.
2909c6fd2807SJeff Garzik  *
2910c6fd2807SJeff Garzik  *	LOCKING:
2911c6fd2807SJeff Garzik  *	Inherited from caller.
2912c6fd2807SJeff Garzik  *
2913c6fd2807SJeff Garzik  *	RETURNS:
2914c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2915c6fd2807SJeff Garzik  */
2916936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2917c6fd2807SJeff Garzik {
2918c6fd2807SJeff Garzik 	u32 scontrol;
2919c6fd2807SJeff Garzik 
2920936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2921db64bcf3STejun Heo 		return 1;
2922c6fd2807SJeff Garzik 
2923936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2924c6fd2807SJeff Garzik }
2925c6fd2807SJeff Garzik 
2926c6fd2807SJeff Garzik /**
2927c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2928936fd732STejun Heo  *	@link: Link to set SATA spd for
2929c6fd2807SJeff Garzik  *
2930936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2931c6fd2807SJeff Garzik  *
2932c6fd2807SJeff Garzik  *	LOCKING:
2933c6fd2807SJeff Garzik  *	Inherited from caller.
2934c6fd2807SJeff Garzik  *
2935c6fd2807SJeff Garzik  *	RETURNS:
2936c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2937c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2938c6fd2807SJeff Garzik  */
2939936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2940c6fd2807SJeff Garzik {
2941c6fd2807SJeff Garzik 	u32 scontrol;
2942c6fd2807SJeff Garzik 	int rc;
2943c6fd2807SJeff Garzik 
2944936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2945c6fd2807SJeff Garzik 		return rc;
2946c6fd2807SJeff Garzik 
2947936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2948c6fd2807SJeff Garzik 		return 0;
2949c6fd2807SJeff Garzik 
2950936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2951c6fd2807SJeff Garzik 		return rc;
2952c6fd2807SJeff Garzik 
2953c6fd2807SJeff Garzik 	return 1;
2954c6fd2807SJeff Garzik }
2955c6fd2807SJeff Garzik 
2956c6fd2807SJeff Garzik /*
2957c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2958c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2959c6fd2807SJeff Garzik  */
2960c6fd2807SJeff Garzik /*
2961b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2962c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2963b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2964b352e57dSAlan Cox  *
2965b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2966c6fd2807SJeff Garzik  */
2967c6fd2807SJeff Garzik 
2968c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
296970cd071eSTejun Heo /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
297070cd071eSTejun Heo 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
297170cd071eSTejun Heo 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
297270cd071eSTejun Heo 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
297370cd071eSTejun Heo 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
297470cd071eSTejun Heo 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
297570cd071eSTejun Heo 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
297670cd071eSTejun Heo 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2977c6fd2807SJeff Garzik 
297870cd071eSTejun Heo 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
297970cd071eSTejun Heo 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
298070cd071eSTejun Heo 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2981c6fd2807SJeff Garzik 
298270cd071eSTejun Heo 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
298370cd071eSTejun Heo 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
298470cd071eSTejun Heo 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2985b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
298670cd071eSTejun Heo 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2987c6fd2807SJeff Garzik 
2988c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
298970cd071eSTejun Heo 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
299070cd071eSTejun Heo 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
299170cd071eSTejun Heo 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
299270cd071eSTejun Heo 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
299370cd071eSTejun Heo 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
299470cd071eSTejun Heo 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
299570cd071eSTejun Heo 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2996c6fd2807SJeff Garzik 
2997c6fd2807SJeff Garzik 	{ 0xFF }
2998c6fd2807SJeff Garzik };
2999c6fd2807SJeff Garzik 
3000c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
3001c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
3002c6fd2807SJeff Garzik 
3003c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3004c6fd2807SJeff Garzik {
3005c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
3006c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
3007c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
3008c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
3009c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
3010c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
3011c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
3012c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
3013c6fd2807SJeff Garzik }
3014c6fd2807SJeff Garzik 
3015c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3016c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
3017c6fd2807SJeff Garzik {
3018c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
3019c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
3020c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
3021c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
3022c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
3023c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3024c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
3025c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
3026c6fd2807SJeff Garzik }
3027c6fd2807SJeff Garzik 
30286357357cSTejun Heo const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3029c6fd2807SJeff Garzik {
303070cd071eSTejun Heo 	const struct ata_timing *t = ata_timing;
3031c6fd2807SJeff Garzik 
303270cd071eSTejun Heo 	while (xfer_mode > t->mode)
303370cd071eSTejun Heo 		t++;
303470cd071eSTejun Heo 
303570cd071eSTejun Heo 	if (xfer_mode == t->mode)
3036c6fd2807SJeff Garzik 		return t;
303770cd071eSTejun Heo 	return NULL;
3038c6fd2807SJeff Garzik }
3039c6fd2807SJeff Garzik 
3040c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3041c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
3042c6fd2807SJeff Garzik {
3043c6fd2807SJeff Garzik 	const struct ata_timing *s;
3044c6fd2807SJeff Garzik 	struct ata_timing p;
3045c6fd2807SJeff Garzik 
3046c6fd2807SJeff Garzik 	/*
3047c6fd2807SJeff Garzik 	 * Find the mode.
3048c6fd2807SJeff Garzik 	 */
3049c6fd2807SJeff Garzik 
3050c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
3051c6fd2807SJeff Garzik 		return -EINVAL;
3052c6fd2807SJeff Garzik 
3053c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
3054c6fd2807SJeff Garzik 
3055c6fd2807SJeff Garzik 	/*
3056c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
3057c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
3058c6fd2807SJeff Garzik 	 */
3059c6fd2807SJeff Garzik 
3060c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
3061c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
3062c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3063c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3064c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3065c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3066c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3067c6fd2807SJeff Garzik 		}
3068c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3069c6fd2807SJeff Garzik 	}
3070c6fd2807SJeff Garzik 
3071c6fd2807SJeff Garzik 	/*
3072c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
3073c6fd2807SJeff Garzik 	 */
3074c6fd2807SJeff Garzik 
3075c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
3076c6fd2807SJeff Garzik 
3077c6fd2807SJeff Garzik 	/*
3078c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3079c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
3080c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
3081c6fd2807SJeff Garzik 	 */
3082c6fd2807SJeff Garzik 
3083fd3367afSAlan 	if (speed > XFER_PIO_6) {
3084c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3085c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3086c6fd2807SJeff Garzik 	}
3087c6fd2807SJeff Garzik 
3088c6fd2807SJeff Garzik 	/*
3089c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
3090c6fd2807SJeff Garzik 	 */
3091c6fd2807SJeff Garzik 
3092c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
3093c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3094c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
3095c6fd2807SJeff Garzik 	}
3096c6fd2807SJeff Garzik 
3097c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
3098c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
3099c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
3100c6fd2807SJeff Garzik 	}
31014f701d1eSAlan Cox 
31024f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
31034f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
31044f701d1eSAlan Cox 	   if so we must correct this */
31054f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
31064f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
3107c6fd2807SJeff Garzik 
3108c6fd2807SJeff Garzik 	return 0;
3109c6fd2807SJeff Garzik }
3110c6fd2807SJeff Garzik 
3111c6fd2807SJeff Garzik /**
3112a0f79b92STejun Heo  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3113a0f79b92STejun Heo  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3114a0f79b92STejun Heo  *	@cycle: cycle duration in ns
3115a0f79b92STejun Heo  *
3116a0f79b92STejun Heo  *	Return matching xfer mode for @cycle.  The returned mode is of
3117a0f79b92STejun Heo  *	the transfer type specified by @xfer_shift.  If @cycle is too
3118a0f79b92STejun Heo  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3119a0f79b92STejun Heo  *	than the fastest known mode, the fasted mode is returned.
3120a0f79b92STejun Heo  *
3121a0f79b92STejun Heo  *	LOCKING:
3122a0f79b92STejun Heo  *	None.
3123a0f79b92STejun Heo  *
3124a0f79b92STejun Heo  *	RETURNS:
3125a0f79b92STejun Heo  *	Matching xfer_mode, 0xff if no match found.
3126a0f79b92STejun Heo  */
3127a0f79b92STejun Heo u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3128a0f79b92STejun Heo {
3129a0f79b92STejun Heo 	u8 base_mode = 0xff, last_mode = 0xff;
3130a0f79b92STejun Heo 	const struct ata_xfer_ent *ent;
3131a0f79b92STejun Heo 	const struct ata_timing *t;
3132a0f79b92STejun Heo 
3133a0f79b92STejun Heo 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3134a0f79b92STejun Heo 		if (ent->shift == xfer_shift)
3135a0f79b92STejun Heo 			base_mode = ent->base;
3136a0f79b92STejun Heo 
3137a0f79b92STejun Heo 	for (t = ata_timing_find_mode(base_mode);
3138a0f79b92STejun Heo 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3139a0f79b92STejun Heo 		unsigned short this_cycle;
3140a0f79b92STejun Heo 
3141a0f79b92STejun Heo 		switch (xfer_shift) {
3142a0f79b92STejun Heo 		case ATA_SHIFT_PIO:
3143a0f79b92STejun Heo 		case ATA_SHIFT_MWDMA:
3144a0f79b92STejun Heo 			this_cycle = t->cycle;
3145a0f79b92STejun Heo 			break;
3146a0f79b92STejun Heo 		case ATA_SHIFT_UDMA:
3147a0f79b92STejun Heo 			this_cycle = t->udma;
3148a0f79b92STejun Heo 			break;
3149a0f79b92STejun Heo 		default:
3150a0f79b92STejun Heo 			return 0xff;
3151a0f79b92STejun Heo 		}
3152a0f79b92STejun Heo 
3153a0f79b92STejun Heo 		if (cycle > this_cycle)
3154a0f79b92STejun Heo 			break;
3155a0f79b92STejun Heo 
3156a0f79b92STejun Heo 		last_mode = t->mode;
3157a0f79b92STejun Heo 	}
3158a0f79b92STejun Heo 
3159a0f79b92STejun Heo 	return last_mode;
3160a0f79b92STejun Heo }
3161a0f79b92STejun Heo 
3162a0f79b92STejun Heo /**
3163c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3164c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
3165458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
3166c6fd2807SJeff Garzik  *
3167c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
3168c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
3169c6fd2807SJeff Garzik  *	will apply the limit.
3170c6fd2807SJeff Garzik  *
3171c6fd2807SJeff Garzik  *	LOCKING:
3172c6fd2807SJeff Garzik  *	Inherited from caller.
3173c6fd2807SJeff Garzik  *
3174c6fd2807SJeff Garzik  *	RETURNS:
3175c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
3176c6fd2807SJeff Garzik  */
3177458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3178c6fd2807SJeff Garzik {
3179458337dbSTejun Heo 	char buf[32];
31807dc951aeSTejun Heo 	unsigned long orig_mask, xfer_mask;
31817dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
3182458337dbSTejun Heo 	int quiet, highbit;
3183c6fd2807SJeff Garzik 
3184458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
3185458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
3186458337dbSTejun Heo 
3187458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3188458337dbSTejun Heo 						  dev->mwdma_mask,
3189c6fd2807SJeff Garzik 						  dev->udma_mask);
3190458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3191c6fd2807SJeff Garzik 
3192458337dbSTejun Heo 	switch (sel) {
3193458337dbSTejun Heo 	case ATA_DNXFER_PIO:
3194458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
3195458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
3196458337dbSTejun Heo 		break;
3197458337dbSTejun Heo 
3198458337dbSTejun Heo 	case ATA_DNXFER_DMA:
3199458337dbSTejun Heo 		if (udma_mask) {
3200458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
3201458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
3202458337dbSTejun Heo 			if (!udma_mask)
3203458337dbSTejun Heo 				return -ENOENT;
3204458337dbSTejun Heo 		} else if (mwdma_mask) {
3205458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
3206458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
3207458337dbSTejun Heo 			if (!mwdma_mask)
3208458337dbSTejun Heo 				return -ENOENT;
3209458337dbSTejun Heo 		}
3210458337dbSTejun Heo 		break;
3211458337dbSTejun Heo 
3212458337dbSTejun Heo 	case ATA_DNXFER_40C:
3213458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
3214458337dbSTejun Heo 		break;
3215458337dbSTejun Heo 
3216458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
3217458337dbSTejun Heo 		pio_mask &= 1;
3218458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
3219458337dbSTejun Heo 		mwdma_mask = 0;
3220458337dbSTejun Heo 		udma_mask = 0;
3221458337dbSTejun Heo 		break;
3222458337dbSTejun Heo 
3223458337dbSTejun Heo 	default:
3224458337dbSTejun Heo 		BUG();
3225458337dbSTejun Heo 	}
3226458337dbSTejun Heo 
3227458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3228458337dbSTejun Heo 
3229458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3230458337dbSTejun Heo 		return -ENOENT;
3231458337dbSTejun Heo 
3232458337dbSTejun Heo 	if (!quiet) {
3233458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3234458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
3235458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
3236458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3237458337dbSTejun Heo 		else
3238458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
3239458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
3240458337dbSTejun Heo 
3241458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
3242458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
3243458337dbSTejun Heo 	}
3244c6fd2807SJeff Garzik 
3245c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3246c6fd2807SJeff Garzik 			    &dev->udma_mask);
3247c6fd2807SJeff Garzik 
3248c6fd2807SJeff Garzik 	return 0;
3249c6fd2807SJeff Garzik }
3250c6fd2807SJeff Garzik 
3251c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
3252c6fd2807SJeff Garzik {
32539af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
32544055dee7STejun Heo 	const char *dev_err_whine = "";
32554055dee7STejun Heo 	int ign_dev_err = 0;
3256c6fd2807SJeff Garzik 	unsigned int err_mask;
3257c6fd2807SJeff Garzik 	int rc;
3258c6fd2807SJeff Garzik 
3259c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
3260c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3261c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
3262c6fd2807SJeff Garzik 
3263c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
32642dcb407eSJeff Garzik 
32654055dee7STejun Heo 	if (err_mask & ~AC_ERR_DEV)
32664055dee7STejun Heo 		goto fail;
32672dcb407eSJeff Garzik 
32684055dee7STejun Heo 	/* revalidate */
3269baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3270422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3271baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3272c6fd2807SJeff Garzik 	if (rc)
3273c6fd2807SJeff Garzik 		return rc;
3274c6fd2807SJeff Garzik 
32754055dee7STejun Heo 	/* Old CFA may refuse this command, which is just fine */
32764055dee7STejun Heo 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
32774055dee7STejun Heo 		ign_dev_err = 1;
32784055dee7STejun Heo 
32794055dee7STejun Heo 	/* Some very old devices and some bad newer ones fail any kind of
32804055dee7STejun Heo 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
32814055dee7STejun Heo 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
32824055dee7STejun Heo 			dev->pio_mode <= XFER_PIO_2)
32834055dee7STejun Heo 		ign_dev_err = 1;
32844055dee7STejun Heo 
32854055dee7STejun Heo 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
32864055dee7STejun Heo 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
32874055dee7STejun Heo 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
32884055dee7STejun Heo 	    dev->dma_mode == XFER_MW_DMA_0 &&
32894055dee7STejun Heo 	    (dev->id[63] >> 8) & 1)
32904055dee7STejun Heo 		ign_dev_err = 1;
32914055dee7STejun Heo 
32924055dee7STejun Heo 	/* if the device is actually configured correctly, ignore dev err */
32934055dee7STejun Heo 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
32944055dee7STejun Heo 		ign_dev_err = 1;
32954055dee7STejun Heo 
32964055dee7STejun Heo 	if (err_mask & AC_ERR_DEV) {
32974055dee7STejun Heo 		if (!ign_dev_err)
32984055dee7STejun Heo 			goto fail;
32994055dee7STejun Heo 		else
33004055dee7STejun Heo 			dev_err_whine = " (device error ignored)";
33014055dee7STejun Heo 	}
33024055dee7STejun Heo 
3303c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3304c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
3305c6fd2807SJeff Garzik 
33064055dee7STejun Heo 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
33074055dee7STejun Heo 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
33084055dee7STejun Heo 		       dev_err_whine);
33094055dee7STejun Heo 
3310c6fd2807SJeff Garzik 	return 0;
33114055dee7STejun Heo 
33124055dee7STejun Heo  fail:
33134055dee7STejun Heo 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
33144055dee7STejun Heo 		       "(err_mask=0x%x)\n", err_mask);
33154055dee7STejun Heo 	return -EIO;
3316c6fd2807SJeff Garzik }
3317c6fd2807SJeff Garzik 
3318c6fd2807SJeff Garzik /**
331904351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
33200260731fSTejun Heo  *	@link: link on which timings will be programmed
33211967b7ffSJoe Perches  *	@r_failed_dev: out parameter for failed device
3322c6fd2807SJeff Garzik  *
332304351821SAlan  *	Standard implementation of the function used to tune and set
332404351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
332504351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
3326c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
3327c6fd2807SJeff Garzik  *
3328c6fd2807SJeff Garzik  *	LOCKING:
3329c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3330c6fd2807SJeff Garzik  *
3331c6fd2807SJeff Garzik  *	RETURNS:
3332c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3333c6fd2807SJeff Garzik  */
333404351821SAlan 
33350260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3336c6fd2807SJeff Garzik {
33370260731fSTejun Heo 	struct ata_port *ap = link->ap;
3338c6fd2807SJeff Garzik 	struct ata_device *dev;
3339f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
3340c6fd2807SJeff Garzik 
3341c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
3342f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
33437dc951aeSTejun Heo 		unsigned long pio_mask, dma_mask;
3344b3a70601SAlan Cox 		unsigned int mode_mask;
3345c6fd2807SJeff Garzik 
3346c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3347c6fd2807SJeff Garzik 			continue;
3348c6fd2807SJeff Garzik 
3349b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
3350b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
3351b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
3352b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
3353b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
3354b3a70601SAlan Cox 
3355c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
335633267325STejun Heo 		ata_force_xfermask(dev);
3357c6fd2807SJeff Garzik 
3358c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3359c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3360b3a70601SAlan Cox 
3361b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
3362b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3363b3a70601SAlan Cox 		else
3364b3a70601SAlan Cox 			dma_mask = 0;
3365b3a70601SAlan Cox 
3366c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3367c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3368c6fd2807SJeff Garzik 
3369c6fd2807SJeff Garzik 		found = 1;
337070cd071eSTejun Heo 		if (dev->dma_mode != 0xff)
3371c6fd2807SJeff Garzik 			used_dma = 1;
3372c6fd2807SJeff Garzik 	}
3373c6fd2807SJeff Garzik 	if (!found)
3374c6fd2807SJeff Garzik 		goto out;
3375c6fd2807SJeff Garzik 
3376c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
3377f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3378c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3379c6fd2807SJeff Garzik 			continue;
3380c6fd2807SJeff Garzik 
338170cd071eSTejun Heo 		if (dev->pio_mode == 0xff) {
3382c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3383c6fd2807SJeff Garzik 			rc = -EINVAL;
3384c6fd2807SJeff Garzik 			goto out;
3385c6fd2807SJeff Garzik 		}
3386c6fd2807SJeff Garzik 
3387c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
3388c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
3389c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
3390c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
3391c6fd2807SJeff Garzik 	}
3392c6fd2807SJeff Garzik 
3393c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
3394f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
339570cd071eSTejun Heo 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3396c6fd2807SJeff Garzik 			continue;
3397c6fd2807SJeff Garzik 
3398c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
3399c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3400c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
3401c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
3402c6fd2807SJeff Garzik 	}
3403c6fd2807SJeff Garzik 
3404c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
3405f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
340618d90debSAlan 		/* don't update suspended devices' xfer mode */
34079666f400STejun Heo 		if (!ata_dev_enabled(dev))
3408c6fd2807SJeff Garzik 			continue;
3409c6fd2807SJeff Garzik 
3410c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3411c6fd2807SJeff Garzik 		if (rc)
3412c6fd2807SJeff Garzik 			goto out;
3413c6fd2807SJeff Garzik 	}
3414c6fd2807SJeff Garzik 
3415c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3416c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3417c6fd2807SJeff Garzik 	 */
3418cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3419032af1ceSAlan 		ap->host->simplex_claimed = ap;
3420c6fd2807SJeff Garzik 
3421c6fd2807SJeff Garzik  out:
3422c6fd2807SJeff Garzik 	if (rc)
3423c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3424c6fd2807SJeff Garzik 	return rc;
3425c6fd2807SJeff Garzik }
3426c6fd2807SJeff Garzik 
3427c6fd2807SJeff Garzik /**
3428c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3429c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3430c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3431c6fd2807SJeff Garzik  *
3432c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3433c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3434c6fd2807SJeff Garzik  *	other threads.
3435c6fd2807SJeff Garzik  *
3436c6fd2807SJeff Garzik  *	LOCKING:
3437cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3438c6fd2807SJeff Garzik  */
3439c6fd2807SJeff Garzik 
3440c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3441c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3442c6fd2807SJeff Garzik {
3443c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3444c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3445c6fd2807SJeff Garzik }
3446c6fd2807SJeff Garzik 
3447c6fd2807SJeff Garzik /**
3448c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3449c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3450c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3451c6fd2807SJeff Garzik  *	@tmout: overall timeout
3452c6fd2807SJeff Garzik  *
3453c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3454c6fd2807SJeff Garzik  *	or a timeout occurs.
3455c6fd2807SJeff Garzik  *
3456d1adc1bbSTejun Heo  *	LOCKING:
3457d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3458d1adc1bbSTejun Heo  *
3459d1adc1bbSTejun Heo  *	RETURNS:
3460d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3461c6fd2807SJeff Garzik  */
3462d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3463c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3464c6fd2807SJeff Garzik {
3465c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3466c6fd2807SJeff Garzik 	u8 status;
3467c6fd2807SJeff Garzik 
3468c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3469c6fd2807SJeff Garzik 	timer_start = jiffies;
3470c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3471d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3472d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3473c6fd2807SJeff Garzik 		msleep(50);
3474c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3475c6fd2807SJeff Garzik 	}
3476c6fd2807SJeff Garzik 
3477d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3478c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
347935aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
348035aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3481c6fd2807SJeff Garzik 
3482c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3483d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3484d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3485c6fd2807SJeff Garzik 		msleep(50);
3486c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3487c6fd2807SJeff Garzik 	}
3488c6fd2807SJeff Garzik 
3489d1adc1bbSTejun Heo 	if (status == 0xff)
3490d1adc1bbSTejun Heo 		return -ENODEV;
3491d1adc1bbSTejun Heo 
3492c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3493c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
349435aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
349535aa7a43SJeff Garzik 				tmout / HZ, status);
3496d1adc1bbSTejun Heo 		return -EBUSY;
3497c6fd2807SJeff Garzik 	}
3498c6fd2807SJeff Garzik 
3499c6fd2807SJeff Garzik 	return 0;
3500c6fd2807SJeff Garzik }
3501c6fd2807SJeff Garzik 
3502d4b2bab4STejun Heo /**
350388ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
350488ff6eafSTejun Heo  *	@ap: port containing status register to be polled
350588ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
350688ff6eafSTejun Heo  *
350788ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
350888ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
350988ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
351088ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
351188ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
351288ff6eafSTejun Heo  *	status.
351388ff6eafSTejun Heo  *
351488ff6eafSTejun Heo  *	LOCKING:
351588ff6eafSTejun Heo  *	Kernel thread context (may sleep).
351688ff6eafSTejun Heo  */
351788ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
351888ff6eafSTejun Heo {
351988ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
352088ff6eafSTejun Heo 
352188ff6eafSTejun Heo 	if (time_before(until, deadline))
352288ff6eafSTejun Heo 		deadline = until;
352388ff6eafSTejun Heo 
352488ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
352588ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
352688ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
352788ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
352888ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
352988ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
353088ff6eafSTejun Heo 	 * delay here as well.
353188ff6eafSTejun Heo 	 *
353288ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
353388ff6eafSTejun Heo 	 */
353488ff6eafSTejun Heo 	msleep(150);
353588ff6eafSTejun Heo 
353688ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
353788ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
353888ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
353988ff6eafSTejun Heo 	 * than that.
35401974e201STejun Heo 	 *
35411974e201STejun Heo 	 * Note that some PATA controllers (pata_ali) explode if
35421974e201STejun Heo 	 * status register is read more than once when there's no
35431974e201STejun Heo 	 * device attached.
354488ff6eafSTejun Heo 	 */
35451974e201STejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
354688ff6eafSTejun Heo 		while (1) {
354788ff6eafSTejun Heo 			u8 status = ata_chk_status(ap);
354888ff6eafSTejun Heo 
354988ff6eafSTejun Heo 			if (status != 0xff || time_after(jiffies, deadline))
355088ff6eafSTejun Heo 				return;
355188ff6eafSTejun Heo 
355288ff6eafSTejun Heo 			msleep(50);
355388ff6eafSTejun Heo 		}
355488ff6eafSTejun Heo 	}
35551974e201STejun Heo }
355688ff6eafSTejun Heo 
355788ff6eafSTejun Heo /**
3558d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3559d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3560d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3561d4b2bab4STejun Heo  *
3562d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3563d4b2bab4STejun Heo  *	occurs.
3564d4b2bab4STejun Heo  *
3565d4b2bab4STejun Heo  *	LOCKING:
3566d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3567d4b2bab4STejun Heo  *
3568d4b2bab4STejun Heo  *	RETURNS:
3569d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3570d4b2bab4STejun Heo  */
3571d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3572d4b2bab4STejun Heo {
3573d4b2bab4STejun Heo 	unsigned long start = jiffies;
3574d4b2bab4STejun Heo 	int warned = 0;
3575d4b2bab4STejun Heo 
3576d4b2bab4STejun Heo 	while (1) {
3577d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3578d4b2bab4STejun Heo 		unsigned long now = jiffies;
3579d4b2bab4STejun Heo 
3580d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3581d4b2bab4STejun Heo 			return 0;
3582936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3583d4b2bab4STejun Heo 			return -ENODEV;
3584d4b2bab4STejun Heo 		if (time_after(now, deadline))
3585d4b2bab4STejun Heo 			return -EBUSY;
3586d4b2bab4STejun Heo 
3587d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3588d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3589d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3590d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3591d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3592d4b2bab4STejun Heo 			warned = 1;
3593d4b2bab4STejun Heo 		}
3594d4b2bab4STejun Heo 
3595d4b2bab4STejun Heo 		msleep(50);
3596d4b2bab4STejun Heo 	}
3597d4b2bab4STejun Heo }
3598d4b2bab4STejun Heo 
3599d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3600d4b2bab4STejun Heo 			      unsigned long deadline)
3601c6fd2807SJeff Garzik {
3602c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3603c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3604c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
36059b89391cSTejun Heo 	int rc, ret = 0;
3606c6fd2807SJeff Garzik 
3607c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3608c6fd2807SJeff Garzik 	 * BSY bit to clear
3609c6fd2807SJeff Garzik 	 */
3610d4b2bab4STejun Heo 	if (dev0) {
3611d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
36129b89391cSTejun Heo 		if (rc) {
36139b89391cSTejun Heo 			if (rc != -ENODEV)
3614d4b2bab4STejun Heo 				return rc;
36159b89391cSTejun Heo 			ret = rc;
36169b89391cSTejun Heo 		}
3617d4b2bab4STejun Heo 	}
3618c6fd2807SJeff Garzik 
3619e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3620e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3621c6fd2807SJeff Garzik 	 */
3622e141d999STejun Heo 	if (dev1) {
3623e141d999STejun Heo 		int i;
3624c6fd2807SJeff Garzik 
3625c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3626e141d999STejun Heo 
3627e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3628e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3629e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3630e141d999STejun Heo 		 */
3631e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3632e141d999STejun Heo 			u8 nsect, lbal;
3633e141d999STejun Heo 
36340d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
36350d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3636c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3637c6fd2807SJeff Garzik 				break;
3638c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3639c6fd2807SJeff Garzik 		}
3640e141d999STejun Heo 
3641d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
36429b89391cSTejun Heo 		if (rc) {
36439b89391cSTejun Heo 			if (rc != -ENODEV)
3644d4b2bab4STejun Heo 				return rc;
36459b89391cSTejun Heo 			ret = rc;
36469b89391cSTejun Heo 		}
3647d4b2bab4STejun Heo 	}
3648c6fd2807SJeff Garzik 
3649c6fd2807SJeff Garzik 	/* is all this really necessary? */
3650c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3651c6fd2807SJeff Garzik 	if (dev1)
3652c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3653c6fd2807SJeff Garzik 	if (dev0)
3654c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3655d4b2bab4STejun Heo 
36569b89391cSTejun Heo 	return ret;
3657c6fd2807SJeff Garzik }
3658c6fd2807SJeff Garzik 
3659d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3660d4b2bab4STejun Heo 			     unsigned long deadline)
3661c6fd2807SJeff Garzik {
3662c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3663c6fd2807SJeff Garzik 
366444877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3665c6fd2807SJeff Garzik 
3666c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
36670d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3668c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
36690d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3670c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
36710d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3672c6fd2807SJeff Garzik 
367388ff6eafSTejun Heo 	/* wait a while before checking status */
367488ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3675c6fd2807SJeff Garzik 
3676c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3677c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3678c6fd2807SJeff Garzik 	 * pulldown resistor.
3679c6fd2807SJeff Garzik 	 */
3680150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
36819b89391cSTejun Heo 		return -ENODEV;
3682c6fd2807SJeff Garzik 
3683d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3684c6fd2807SJeff Garzik }
3685c6fd2807SJeff Garzik 
3686c6fd2807SJeff Garzik /**
3687c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3688c6fd2807SJeff Garzik  *	@ap: port to reset
3689c6fd2807SJeff Garzik  *
3690c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3691c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3692c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3693c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3694c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3695c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3696c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3697c6fd2807SJeff Garzik  *
3698c6fd2807SJeff Garzik  *	LOCKING:
3699c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3700cca3974eSJeff Garzik  *	Obtains host lock.
3701c6fd2807SJeff Garzik  *
3702c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3703c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3704c6fd2807SJeff Garzik  */
3705c6fd2807SJeff Garzik 
3706c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3707c6fd2807SJeff Garzik {
37089af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3709c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3710c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3711c6fd2807SJeff Garzik 	u8 err;
3712c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
37139b89391cSTejun Heo 	int rc;
3714c6fd2807SJeff Garzik 
371544877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3716c6fd2807SJeff Garzik 
3717c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3718c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3719c6fd2807SJeff Garzik 		dev0 = 1;
3720c6fd2807SJeff Garzik 	else {
3721c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3722c6fd2807SJeff Garzik 		if (slave_possible)
3723c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3724c6fd2807SJeff Garzik 	}
3725c6fd2807SJeff Garzik 
3726c6fd2807SJeff Garzik 	if (dev0)
3727c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3728c6fd2807SJeff Garzik 	if (dev1)
3729c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3730c6fd2807SJeff Garzik 
3731c6fd2807SJeff Garzik 	/* select device 0 again */
3732c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3733c6fd2807SJeff Garzik 
3734c6fd2807SJeff Garzik 	/* issue bus reset */
37359b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
37369b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
37379b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3738c6fd2807SJeff Garzik 			goto err_out;
37399b89391cSTejun Heo 	}
3740c6fd2807SJeff Garzik 
3741c6fd2807SJeff Garzik 	/*
3742c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3743c6fd2807SJeff Garzik 	 */
37443f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3745c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
37463f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3747c6fd2807SJeff Garzik 
3748c6fd2807SJeff Garzik 	/* is double-select really necessary? */
37499af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3750c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
37519af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3752c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3753c6fd2807SJeff Garzik 
3754c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
37559af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
37569af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3757c6fd2807SJeff Garzik 		goto err_out;
3758c6fd2807SJeff Garzik 
3759c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3760c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
37610d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3762c6fd2807SJeff Garzik 	}
3763c6fd2807SJeff Garzik 
3764c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3765c6fd2807SJeff Garzik 	return;
3766c6fd2807SJeff Garzik 
3767c6fd2807SJeff Garzik err_out:
3768c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3769ac8869d5SJeff Garzik 	ata_port_disable(ap);
3770c6fd2807SJeff Garzik 
3771c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3772c6fd2807SJeff Garzik }
3773c6fd2807SJeff Garzik 
3774c6fd2807SJeff Garzik /**
3775936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3776936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3777c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3778d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3779c6fd2807SJeff Garzik  *
3780936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3781c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3782c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3783d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3784d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3785c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3786c6fd2807SJeff Garzik  *
3787d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3788d4b2bab4STejun Heo  *	two is used.
3789d4b2bab4STejun Heo  *
3790c6fd2807SJeff Garzik  *	LOCKING:
3791c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3792c6fd2807SJeff Garzik  *
3793c6fd2807SJeff Garzik  *	RETURNS:
3794c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3795c6fd2807SJeff Garzik  */
3796936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3797d4b2bab4STejun Heo 		       unsigned long deadline)
3798c6fd2807SJeff Garzik {
3799c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3800d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3801d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3802c6fd2807SJeff Garzik 	u32 last, cur;
3803c6fd2807SJeff Garzik 	int rc;
3804c6fd2807SJeff Garzik 
3805d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3806d4b2bab4STejun Heo 	if (time_before(t, deadline))
3807d4b2bab4STejun Heo 		deadline = t;
3808d4b2bab4STejun Heo 
3809936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3810c6fd2807SJeff Garzik 		return rc;
3811c6fd2807SJeff Garzik 	cur &= 0xf;
3812c6fd2807SJeff Garzik 
3813c6fd2807SJeff Garzik 	last = cur;
3814c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3815c6fd2807SJeff Garzik 
3816c6fd2807SJeff Garzik 	while (1) {
3817c6fd2807SJeff Garzik 		msleep(interval_msec);
3818936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3819c6fd2807SJeff Garzik 			return rc;
3820c6fd2807SJeff Garzik 		cur &= 0xf;
3821c6fd2807SJeff Garzik 
3822c6fd2807SJeff Garzik 		/* DET stable? */
3823c6fd2807SJeff Garzik 		if (cur == last) {
3824d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3825c6fd2807SJeff Garzik 				continue;
3826c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3827c6fd2807SJeff Garzik 				return 0;
3828c6fd2807SJeff Garzik 			continue;
3829c6fd2807SJeff Garzik 		}
3830c6fd2807SJeff Garzik 
3831c6fd2807SJeff Garzik 		/* unstable, start over */
3832c6fd2807SJeff Garzik 		last = cur;
3833c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3834c6fd2807SJeff Garzik 
3835f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3836f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3837f1545154STejun Heo 		 */
3838d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3839f1545154STejun Heo 			return -EPIPE;
3840c6fd2807SJeff Garzik 	}
3841c6fd2807SJeff Garzik }
3842c6fd2807SJeff Garzik 
3843c6fd2807SJeff Garzik /**
3844936fd732STejun Heo  *	sata_link_resume - resume SATA link
3845936fd732STejun Heo  *	@link: ATA link to resume SATA
3846c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3847d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3848c6fd2807SJeff Garzik  *
3849936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3850c6fd2807SJeff Garzik  *
3851c6fd2807SJeff Garzik  *	LOCKING:
3852c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3853c6fd2807SJeff Garzik  *
3854c6fd2807SJeff Garzik  *	RETURNS:
3855c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3856c6fd2807SJeff Garzik  */
3857936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3858d4b2bab4STejun Heo 		     unsigned long deadline)
3859c6fd2807SJeff Garzik {
3860c6fd2807SJeff Garzik 	u32 scontrol;
3861c6fd2807SJeff Garzik 	int rc;
3862c6fd2807SJeff Garzik 
3863936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3864c6fd2807SJeff Garzik 		return rc;
3865c6fd2807SJeff Garzik 
3866c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3867c6fd2807SJeff Garzik 
3868936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3869c6fd2807SJeff Garzik 		return rc;
3870c6fd2807SJeff Garzik 
3871c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3872c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3873c6fd2807SJeff Garzik 	 */
3874c6fd2807SJeff Garzik 	msleep(200);
3875c6fd2807SJeff Garzik 
3876936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3877c6fd2807SJeff Garzik }
3878c6fd2807SJeff Garzik 
3879c6fd2807SJeff Garzik /**
3880c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3881cc0680a5STejun Heo  *	@link: ATA link to be reset
3882d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3883c6fd2807SJeff Garzik  *
3884cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3885b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3886b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3887b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3888b8cffc6aSTejun Heo  *	should just whine, not fail.
3889c6fd2807SJeff Garzik  *
3890c6fd2807SJeff Garzik  *	LOCKING:
3891c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3892c6fd2807SJeff Garzik  *
3893c6fd2807SJeff Garzik  *	RETURNS:
3894c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3895c6fd2807SJeff Garzik  */
3896cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3897c6fd2807SJeff Garzik {
3898cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3899936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3900c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3901c6fd2807SJeff Garzik 	int rc;
3902c6fd2807SJeff Garzik 
390331daabdaSTejun Heo 	/* handle link resume */
3904c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
39050c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3906c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3907c6fd2807SJeff Garzik 
3908633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3909633273a3STejun Heo 	 * is supported.
3910633273a3STejun Heo 	 */
3911633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3912633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3913633273a3STejun Heo 
3914c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3915c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3916c6fd2807SJeff Garzik 		return 0;
3917c6fd2807SJeff Garzik 
3918936fd732STejun Heo 	/* if SATA, resume link */
3919a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3920936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3921b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3922b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3923cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3924c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3925c6fd2807SJeff Garzik 	}
3926c6fd2807SJeff Garzik 
3927c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3928c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3929c6fd2807SJeff Garzik 	 */
39300c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3931b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
39326dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3933cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3934b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3935b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3936b8cffc6aSTejun Heo 		}
3937b8cffc6aSTejun Heo 	}
3938c6fd2807SJeff Garzik 
3939c6fd2807SJeff Garzik 	return 0;
3940c6fd2807SJeff Garzik }
3941c6fd2807SJeff Garzik 
3942c6fd2807SJeff Garzik /**
3943c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3944cc0680a5STejun Heo  *	@link: ATA link to reset
3945c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3946d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3947c6fd2807SJeff Garzik  *
3948c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3949c6fd2807SJeff Garzik  *
3950c6fd2807SJeff Garzik  *	LOCKING:
3951c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3952c6fd2807SJeff Garzik  *
3953c6fd2807SJeff Garzik  *	RETURNS:
3954c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3955c6fd2807SJeff Garzik  */
3956cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3957d4b2bab4STejun Heo 		      unsigned long deadline)
3958c6fd2807SJeff Garzik {
3959cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3960c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3961d4b2bab4STejun Heo 	unsigned int devmask = 0;
3962d4b2bab4STejun Heo 	int rc;
3963c6fd2807SJeff Garzik 	u8 err;
3964c6fd2807SJeff Garzik 
3965c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3966c6fd2807SJeff Garzik 
3967936fd732STejun Heo 	if (ata_link_offline(link)) {
3968c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3969c6fd2807SJeff Garzik 		goto out;
3970c6fd2807SJeff Garzik 	}
3971c6fd2807SJeff Garzik 
3972c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3973c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3974c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3975c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3976c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3977c6fd2807SJeff Garzik 
3978c6fd2807SJeff Garzik 	/* select device 0 again */
3979c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3980c6fd2807SJeff Garzik 
3981c6fd2807SJeff Garzik 	/* issue bus reset */
3982c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3983d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
39849b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3985936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3986cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3987d4b2bab4STejun Heo 		return rc;
3988c6fd2807SJeff Garzik 	}
3989c6fd2807SJeff Garzik 
3990c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
39913f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
39923f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3993c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
39943f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
39953f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3996c6fd2807SJeff Garzik 
3997c6fd2807SJeff Garzik  out:
3998c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3999c6fd2807SJeff Garzik 	return 0;
4000c6fd2807SJeff Garzik }
4001c6fd2807SJeff Garzik 
4002c6fd2807SJeff Garzik /**
4003cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
4004cc0680a5STejun Heo  *	@link: link to reset
4005b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
4006d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
4007c6fd2807SJeff Garzik  *
4008cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
4009c6fd2807SJeff Garzik  *
4010c6fd2807SJeff Garzik  *	LOCKING:
4011c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4012c6fd2807SJeff Garzik  *
4013c6fd2807SJeff Garzik  *	RETURNS:
4014c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
4015c6fd2807SJeff Garzik  */
4016cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4017d4b2bab4STejun Heo 			unsigned long deadline)
4018c6fd2807SJeff Garzik {
4019c6fd2807SJeff Garzik 	u32 scontrol;
4020c6fd2807SJeff Garzik 	int rc;
4021c6fd2807SJeff Garzik 
4022c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
4023c6fd2807SJeff Garzik 
4024936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
4025c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
4026c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
4027c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
4028c6fd2807SJeff Garzik 		 * and Sil3124.
4029c6fd2807SJeff Garzik 		 */
4030936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4031b6103f6dSTejun Heo 			goto out;
4032c6fd2807SJeff Garzik 
4033cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
4034c6fd2807SJeff Garzik 
4035936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4036b6103f6dSTejun Heo 			goto out;
4037c6fd2807SJeff Garzik 
4038936fd732STejun Heo 		sata_set_spd(link);
4039c6fd2807SJeff Garzik 	}
4040c6fd2807SJeff Garzik 
4041c6fd2807SJeff Garzik 	/* issue phy wake/reset */
4042936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4043b6103f6dSTejun Heo 		goto out;
4044c6fd2807SJeff Garzik 
4045c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
4046c6fd2807SJeff Garzik 
4047936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4048b6103f6dSTejun Heo 		goto out;
4049c6fd2807SJeff Garzik 
4050c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4051c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
4052c6fd2807SJeff Garzik 	 */
4053c6fd2807SJeff Garzik 	msleep(1);
4054c6fd2807SJeff Garzik 
4055936fd732STejun Heo 	/* bring link back */
4056936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
4057b6103f6dSTejun Heo  out:
4058b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
4059b6103f6dSTejun Heo 	return rc;
4060b6103f6dSTejun Heo }
4061b6103f6dSTejun Heo 
4062b6103f6dSTejun Heo /**
4063b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
4064cc0680a5STejun Heo  *	@link: link to reset
4065b6103f6dSTejun Heo  *	@class: resulting class of attached device
4066d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
4067b6103f6dSTejun Heo  *
4068b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
4069b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
4070b6103f6dSTejun Heo  *
4071b6103f6dSTejun Heo  *	LOCKING:
4072b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
4073b6103f6dSTejun Heo  *
4074b6103f6dSTejun Heo  *	RETURNS:
4075b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
4076b6103f6dSTejun Heo  */
4077cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4078d4b2bab4STejun Heo 		       unsigned long deadline)
4079b6103f6dSTejun Heo {
4080cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
4081936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4082b6103f6dSTejun Heo 	int rc;
4083b6103f6dSTejun Heo 
4084b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
4085b6103f6dSTejun Heo 
4086b6103f6dSTejun Heo 	/* do hardreset */
4087cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
4088b6103f6dSTejun Heo 	if (rc) {
4089cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
4090b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
4091b6103f6dSTejun Heo 		return rc;
4092b6103f6dSTejun Heo 	}
4093c6fd2807SJeff Garzik 
4094c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
4095936fd732STejun Heo 	if (ata_link_offline(link)) {
4096c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
4097c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
4098c6fd2807SJeff Garzik 		return 0;
4099c6fd2807SJeff Garzik 	}
4100c6fd2807SJeff Garzik 
410188ff6eafSTejun Heo 	/* wait a while before checking status */
410288ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
410334fee227STejun Heo 
4104633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
4105633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
4106633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
4107633273a3STejun Heo 	 * second and request follow-up SRST.
4108633273a3STejun Heo 	 */
4109633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
4110633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
4111633273a3STejun Heo 		return -EAGAIN;
4112633273a3STejun Heo 	}
4113633273a3STejun Heo 
4114d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
41159b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
41169b89391cSTejun Heo 	if (rc) {
4117cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
4118d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
4119d4b2bab4STejun Heo 		return rc;
4120c6fd2807SJeff Garzik 	}
4121c6fd2807SJeff Garzik 
4122c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
4123c6fd2807SJeff Garzik 
41243f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
4125c6fd2807SJeff Garzik 
4126c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
4127c6fd2807SJeff Garzik 	return 0;
4128c6fd2807SJeff Garzik }
4129c6fd2807SJeff Garzik 
4130c6fd2807SJeff Garzik /**
4131c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
4132cc0680a5STejun Heo  *	@link: the target ata_link
4133c6fd2807SJeff Garzik  *	@classes: classes of attached devices
4134c6fd2807SJeff Garzik  *
4135c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
4136c6fd2807SJeff Garzik  *	the device might have been reset more than once using
4137c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
4138c6fd2807SJeff Garzik  *
4139c6fd2807SJeff Garzik  *	LOCKING:
4140c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4141c6fd2807SJeff Garzik  */
4142cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4143c6fd2807SJeff Garzik {
4144cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
4145c6fd2807SJeff Garzik 	u32 serror;
4146c6fd2807SJeff Garzik 
4147c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
4148c6fd2807SJeff Garzik 
4149c6fd2807SJeff Garzik 	/* print link status */
4150936fd732STejun Heo 	sata_print_link_status(link);
4151c6fd2807SJeff Garzik 
4152c6fd2807SJeff Garzik 	/* clear SError */
4153936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4154936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
4155f7fe7ad4STejun Heo 	link->eh_info.serror = 0;
4156c6fd2807SJeff Garzik 
4157c6fd2807SJeff Garzik 	/* is double-select really necessary? */
4158c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
4159c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
4160c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
4161c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
4162c6fd2807SJeff Garzik 
4163c6fd2807SJeff Garzik 	/* bail out if no device is present */
4164c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4165c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
4166c6fd2807SJeff Garzik 		return;
4167c6fd2807SJeff Garzik 	}
4168c6fd2807SJeff Garzik 
4169c6fd2807SJeff Garzik 	/* set up device control */
41700d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
41710d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
4172c6fd2807SJeff Garzik 
4173c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
4174c6fd2807SJeff Garzik }
4175c6fd2807SJeff Garzik 
4176c6fd2807SJeff Garzik /**
4177c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
4178c6fd2807SJeff Garzik  *	@dev: device to compare against
4179c6fd2807SJeff Garzik  *	@new_class: class of the new device
4180c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
4181c6fd2807SJeff Garzik  *
4182c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
4183c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
4184c6fd2807SJeff Garzik  *	@new_id.
4185c6fd2807SJeff Garzik  *
4186c6fd2807SJeff Garzik  *	LOCKING:
4187c6fd2807SJeff Garzik  *	None.
4188c6fd2807SJeff Garzik  *
4189c6fd2807SJeff Garzik  *	RETURNS:
4190c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4191c6fd2807SJeff Garzik  */
4192c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4193c6fd2807SJeff Garzik 			       const u16 *new_id)
4194c6fd2807SJeff Garzik {
4195c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
4196a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4197a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4198c6fd2807SJeff Garzik 
4199c6fd2807SJeff Garzik 	if (dev->class != new_class) {
4200c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4201c6fd2807SJeff Garzik 			       dev->class, new_class);
4202c6fd2807SJeff Garzik 		return 0;
4203c6fd2807SJeff Garzik 	}
4204c6fd2807SJeff Garzik 
4205a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4206a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4207a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4208a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4209c6fd2807SJeff Garzik 
4210c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
4211c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4212c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
4213c6fd2807SJeff Garzik 		return 0;
4214c6fd2807SJeff Garzik 	}
4215c6fd2807SJeff Garzik 
4216c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
4217c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4218c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
4219c6fd2807SJeff Garzik 		return 0;
4220c6fd2807SJeff Garzik 	}
4221c6fd2807SJeff Garzik 
4222c6fd2807SJeff Garzik 	return 1;
4223c6fd2807SJeff Garzik }
4224c6fd2807SJeff Garzik 
4225c6fd2807SJeff Garzik /**
4226fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
42273fae450cSHenrik Kretzschmar  *	@dev: target ATA device
4228bff04647STejun Heo  *	@readid_flags: read ID flags
4229c6fd2807SJeff Garzik  *
4230c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
4231c6fd2807SJeff Garzik  *	the port.
4232c6fd2807SJeff Garzik  *
4233c6fd2807SJeff Garzik  *	LOCKING:
4234c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4235c6fd2807SJeff Garzik  *
4236c6fd2807SJeff Garzik  *	RETURNS:
4237c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
4238c6fd2807SJeff Garzik  */
4239fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4240c6fd2807SJeff Garzik {
4241c6fd2807SJeff Garzik 	unsigned int class = dev->class;
42429af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
4243c6fd2807SJeff Garzik 	int rc;
4244c6fd2807SJeff Garzik 
4245c6fd2807SJeff Garzik 	/* read ID data */
4246bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4247c6fd2807SJeff Garzik 	if (rc)
4248fe30911bSTejun Heo 		return rc;
4249c6fd2807SJeff Garzik 
4250c6fd2807SJeff Garzik 	/* is the device still there? */
4251fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
4252fe30911bSTejun Heo 		return -ENODEV;
4253c6fd2807SJeff Garzik 
4254c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4255fe30911bSTejun Heo 	return 0;
4256fe30911bSTejun Heo }
4257fe30911bSTejun Heo 
4258fe30911bSTejun Heo /**
4259fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
4260fe30911bSTejun Heo  *	@dev: device to revalidate
4261422c9daaSTejun Heo  *	@new_class: new class code
4262fe30911bSTejun Heo  *	@readid_flags: read ID flags
4263fe30911bSTejun Heo  *
4264fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4265fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
4266fe30911bSTejun Heo  *
4267fe30911bSTejun Heo  *	LOCKING:
4268fe30911bSTejun Heo  *	Kernel thread context (may sleep)
4269fe30911bSTejun Heo  *
4270fe30911bSTejun Heo  *	RETURNS:
4271fe30911bSTejun Heo  *	0 on success, negative errno otherwise
4272fe30911bSTejun Heo  */
4273422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4274422c9daaSTejun Heo 		       unsigned int readid_flags)
4275fe30911bSTejun Heo {
42766ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
4277fe30911bSTejun Heo 	int rc;
4278fe30911bSTejun Heo 
4279fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
4280fe30911bSTejun Heo 		return -ENODEV;
4281fe30911bSTejun Heo 
4282422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4283422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
4284422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4285422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4286422c9daaSTejun Heo 			       dev->class, new_class);
4287422c9daaSTejun Heo 		rc = -ENODEV;
4288422c9daaSTejun Heo 		goto fail;
4289422c9daaSTejun Heo 	}
4290422c9daaSTejun Heo 
4291fe30911bSTejun Heo 	/* re-read ID */
4292fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
4293fe30911bSTejun Heo 	if (rc)
4294fe30911bSTejun Heo 		goto fail;
4295c6fd2807SJeff Garzik 
4296c6fd2807SJeff Garzik 	/* configure device according to the new ID */
4297efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
42986ddcd3b0STejun Heo 	if (rc)
42996ddcd3b0STejun Heo 		goto fail;
43006ddcd3b0STejun Heo 
43016ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
4302b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4303b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
43046ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
43056ddcd3b0STejun Heo 			       "%llu != %llu\n",
43066ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
43076ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
43088270bec4STejun Heo 
43098270bec4STejun Heo 		/* restore original n_sectors */
43108270bec4STejun Heo 		dev->n_sectors = n_sectors;
43118270bec4STejun Heo 
43126ddcd3b0STejun Heo 		rc = -ENODEV;
43136ddcd3b0STejun Heo 		goto fail;
43146ddcd3b0STejun Heo 	}
43156ddcd3b0STejun Heo 
4316c6fd2807SJeff Garzik 	return 0;
4317c6fd2807SJeff Garzik 
4318c6fd2807SJeff Garzik  fail:
4319c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4320c6fd2807SJeff Garzik 	return rc;
4321c6fd2807SJeff Garzik }
4322c6fd2807SJeff Garzik 
43236919a0a6SAlan Cox struct ata_blacklist_entry {
43246919a0a6SAlan Cox 	const char *model_num;
43256919a0a6SAlan Cox 	const char *model_rev;
43266919a0a6SAlan Cox 	unsigned long horkage;
43276919a0a6SAlan Cox };
43286919a0a6SAlan Cox 
43296919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
43306919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
43316919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
43326919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
43336919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
43346919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
43356919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
43366919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
43376919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
43386919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
43396919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
43406919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
43416919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
43426919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
43436919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
43446919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
43456919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
43466919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
43476919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
43486919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
43496919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
43506919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
43516919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
43526919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
43536919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
43546919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
43556919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
43566919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
43576919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
43586919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
43596919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
436039f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
43613af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
43623af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
43633af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
43646919a0a6SAlan Cox 
436518d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
436640a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
436718d6e9d5SAlbert Lee 
43686919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
43696919a0a6SAlan Cox 
43706919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
43716919a0a6SAlan Cox 	/* NCQ is slow */
43726919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4373459ad688STejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
437409125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
437509125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
43767acfaf30SPaul Rolland 	/* NCQ is broken */
4377539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
43780e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4379da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4380e41bd3e8STejun Heo 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4381539cc7c7SJeff Garzik 
438236e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
438336e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
438436e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
438536e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
438636e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
43876919a0a6SAlan Cox 
438816c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
438916c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
439016c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
439116c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
439216c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
43936919a0a6SAlan Cox 
439493328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
439593328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
439693328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
4397b152fcd3SMikko Rapeli 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
439893328e11SAlan Cox 
43996bbfd53dSAlan Cox 	/* Devices which get the IVB wrong */
44006bbfd53dSAlan Cox 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
44016bbfd53dSAlan Cox 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4402e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4403e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4404e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
44056bbfd53dSAlan Cox 
44066919a0a6SAlan Cox 	/* End Marker */
44076919a0a6SAlan Cox 	{ }
4408c6fd2807SJeff Garzik };
4409c6fd2807SJeff Garzik 
4410741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4411539cc7c7SJeff Garzik {
4412539cc7c7SJeff Garzik 	const char *p;
4413539cc7c7SJeff Garzik 	int len;
4414539cc7c7SJeff Garzik 
4415539cc7c7SJeff Garzik 	/*
4416539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4417539cc7c7SJeff Garzik 	 */
4418539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4419539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4420539cc7c7SJeff Garzik 		len = p - patt;
4421317b50b8SAndrew Paprocki 	else {
4422539cc7c7SJeff Garzik 		len = strlen(name);
4423317b50b8SAndrew Paprocki 		if (!len) {
4424317b50b8SAndrew Paprocki 			if (!*patt)
4425317b50b8SAndrew Paprocki 				return 0;
4426317b50b8SAndrew Paprocki 			return -1;
4427317b50b8SAndrew Paprocki 		}
4428317b50b8SAndrew Paprocki 	}
4429539cc7c7SJeff Garzik 
4430539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4431539cc7c7SJeff Garzik }
4432539cc7c7SJeff Garzik 
443375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4434c6fd2807SJeff Garzik {
44358bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
44368bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
44376919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4438c6fd2807SJeff Garzik 
44398bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
44408bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4441c6fd2807SJeff Garzik 
44426919a0a6SAlan Cox 	while (ad->model_num) {
4443539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
44446919a0a6SAlan Cox 			if (ad->model_rev == NULL)
44456919a0a6SAlan Cox 				return ad->horkage;
4446539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
44476919a0a6SAlan Cox 				return ad->horkage;
4448c6fd2807SJeff Garzik 		}
44496919a0a6SAlan Cox 		ad++;
4450c6fd2807SJeff Garzik 	}
4451c6fd2807SJeff Garzik 	return 0;
4452c6fd2807SJeff Garzik }
4453c6fd2807SJeff Garzik 
44546919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
44556919a0a6SAlan Cox {
44566919a0a6SAlan Cox 	/* We don't support polling DMA.
44576919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
44586919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
44596919a0a6SAlan Cox 	 */
44609af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
44616919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
44626919a0a6SAlan Cox 		return 1;
446375683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
44646919a0a6SAlan Cox }
44656919a0a6SAlan Cox 
4466c6fd2807SJeff Garzik /**
44676bbfd53dSAlan Cox  *	ata_is_40wire		-	check drive side detection
44686bbfd53dSAlan Cox  *	@dev: device
44696bbfd53dSAlan Cox  *
44706bbfd53dSAlan Cox  *	Perform drive side detection decoding, allowing for device vendors
44716bbfd53dSAlan Cox  *	who can't follow the documentation.
44726bbfd53dSAlan Cox  */
44736bbfd53dSAlan Cox 
44746bbfd53dSAlan Cox static int ata_is_40wire(struct ata_device *dev)
44756bbfd53dSAlan Cox {
44766bbfd53dSAlan Cox 	if (dev->horkage & ATA_HORKAGE_IVB)
44776bbfd53dSAlan Cox 		return ata_drive_40wire_relaxed(dev->id);
44786bbfd53dSAlan Cox 	return ata_drive_40wire(dev->id);
44796bbfd53dSAlan Cox }
44806bbfd53dSAlan Cox 
44816bbfd53dSAlan Cox /**
4482c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4483c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4484c6fd2807SJeff Garzik  *
4485c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4486c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4487c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4488c6fd2807SJeff Garzik  *	blacklist, etc...
4489c6fd2807SJeff Garzik  *
4490c6fd2807SJeff Garzik  *	LOCKING:
4491c6fd2807SJeff Garzik  *	None.
4492c6fd2807SJeff Garzik  */
4493c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4494c6fd2807SJeff Garzik {
44959af5c9c9STejun Heo 	struct ata_link *link = dev->link;
44969af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4497cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4498c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4499c6fd2807SJeff Garzik 
4500c6fd2807SJeff Garzik 	/* controller modes available */
4501c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4502c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4503c6fd2807SJeff Garzik 
45048343f889SRobert Hancock 	/* drive modes available */
4505c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4506c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4507c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4508c6fd2807SJeff Garzik 
4509b352e57dSAlan Cox 	/*
4510b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4511b352e57dSAlan Cox 	 *	cable
4512b352e57dSAlan Cox 	 */
4513b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4514b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4515b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4516b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4517b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4518b352e57dSAlan Cox 	}
4519b352e57dSAlan Cox 
4520c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4521c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4522c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4523c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4524c6fd2807SJeff Garzik 	}
4525c6fd2807SJeff Garzik 
452614d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
452714d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4528c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4529c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4530c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4531c6fd2807SJeff Garzik 	}
4532c6fd2807SJeff Garzik 
4533e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4534e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4535e424675fSJeff Garzik 
4536c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4537a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4538c6fd2807SJeff Garzik 
45398343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
45408343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
45418343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
45428343f889SRobert Hancock 	 * solely limited by the cable.
45438343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
45448343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
45458343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
45468343f889SRobert Hancock 	 */
45478343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
45488343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
45498343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
45506bbfd53dSAlan Cox 		    (ata_is_40wire(dev) &&
45518343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
45528343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
45538343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
45548343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
45558343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
45568343f889SRobert Hancock 		}
45578343f889SRobert Hancock 
4558c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4559c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4560c6fd2807SJeff Garzik }
4561c6fd2807SJeff Garzik 
4562c6fd2807SJeff Garzik /**
4563c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4564c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4565c6fd2807SJeff Garzik  *
4566c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4567c6fd2807SJeff Garzik  *	on port @ap.
4568c6fd2807SJeff Garzik  *
4569c6fd2807SJeff Garzik  *	LOCKING:
4570c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4571c6fd2807SJeff Garzik  *
4572c6fd2807SJeff Garzik  *	RETURNS:
4573c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4574c6fd2807SJeff Garzik  */
4575c6fd2807SJeff Garzik 
4576c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4577c6fd2807SJeff Garzik {
4578c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4579c6fd2807SJeff Garzik 	unsigned int err_mask;
4580c6fd2807SJeff Garzik 
4581c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4582c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4583c6fd2807SJeff Garzik 
4584464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4585464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4586464cf177STejun Heo 	 */
4587c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4588c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4589c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4590464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4591c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4592b9f8ab2dSAlan Cox 	/* If we are using IORDY we must send the mode setting command */
4593b9f8ab2dSAlan Cox 	if (ata_pio_need_iordy(dev))
4594c6fd2807SJeff Garzik 		tf.nsect = dev->xfer_mode;
4595b9f8ab2dSAlan Cox 	/* If the device has IORDY and the controller does not - turn it off */
4596b9f8ab2dSAlan Cox  	else if (ata_id_has_iordy(dev->id))
4597b9f8ab2dSAlan Cox 		tf.nsect = 0x01;
4598b9f8ab2dSAlan Cox 	else /* In the ancient relic department - skip all of this */
4599b9f8ab2dSAlan Cox 		return 0;
4600c6fd2807SJeff Garzik 
46012b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4602c6fd2807SJeff Garzik 
4603c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4604c6fd2807SJeff Garzik 	return err_mask;
4605c6fd2807SJeff Garzik }
4606c6fd2807SJeff Garzik /**
4607218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
46089f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
46099f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4610218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
46119f45cbd3SKristen Carlson Accardi  *
46129f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4613218f3d30SJeff Garzik  *	on port @ap with sector count
46149f45cbd3SKristen Carlson Accardi  *
46159f45cbd3SKristen Carlson Accardi  *	LOCKING:
46169f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
46179f45cbd3SKristen Carlson Accardi  *
46189f45cbd3SKristen Carlson Accardi  *	RETURNS:
46199f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
46209f45cbd3SKristen Carlson Accardi  */
4621218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4622218f3d30SJeff Garzik 					u8 feature)
46239f45cbd3SKristen Carlson Accardi {
46249f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
46259f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
46269f45cbd3SKristen Carlson Accardi 
46279f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
46289f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
46299f45cbd3SKristen Carlson Accardi 
46309f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
46319f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
46329f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
46339f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
46349f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4635218f3d30SJeff Garzik 	tf.nsect = feature;
46369f45cbd3SKristen Carlson Accardi 
46372b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
46389f45cbd3SKristen Carlson Accardi 
46399f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
46409f45cbd3SKristen Carlson Accardi 	return err_mask;
46419f45cbd3SKristen Carlson Accardi }
46429f45cbd3SKristen Carlson Accardi 
46439f45cbd3SKristen Carlson Accardi /**
4644c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4645c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4646c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4647c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4648c6fd2807SJeff Garzik  *
4649c6fd2807SJeff Garzik  *	LOCKING:
4650c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4651c6fd2807SJeff Garzik  *
4652c6fd2807SJeff Garzik  *	RETURNS:
4653c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4654c6fd2807SJeff Garzik  */
4655c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4656c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4657c6fd2807SJeff Garzik {
4658c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4659c6fd2807SJeff Garzik 	unsigned int err_mask;
4660c6fd2807SJeff Garzik 
4661c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4662c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4663c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4664c6fd2807SJeff Garzik 
4665c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4666c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4667c6fd2807SJeff Garzik 
4668c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4669c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4670c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4671c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4672c6fd2807SJeff Garzik 	tf.nsect = sectors;
4673c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4674c6fd2807SJeff Garzik 
46752b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
467618b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
467718b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
467818b2466cSAlan Cox 	   drive reported working geometry */
467918b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
468018b2466cSAlan Cox 		err_mask = 0;
4681c6fd2807SJeff Garzik 
4682c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4683c6fd2807SJeff Garzik 	return err_mask;
4684c6fd2807SJeff Garzik }
4685c6fd2807SJeff Garzik 
4686c6fd2807SJeff Garzik /**
4687c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4688c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4689c6fd2807SJeff Garzik  *
4690c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4691c6fd2807SJeff Garzik  *
4692c6fd2807SJeff Garzik  *	LOCKING:
4693cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4694c6fd2807SJeff Garzik  */
469570e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4696c6fd2807SJeff Garzik {
4697c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4698ff2aeb1eSTejun Heo 	struct scatterlist *sg = qc->sg;
4699c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4700c6fd2807SJeff Garzik 
4701c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4702c6fd2807SJeff Garzik 
4703dde20207SJames Bottomley 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4704c6fd2807SJeff Garzik 
4705dde20207SJames Bottomley 	if (qc->n_elem)
4706dde20207SJames Bottomley 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4707c6fd2807SJeff Garzik 
4708c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4709ff2aeb1eSTejun Heo 	qc->sg = NULL;
4710c6fd2807SJeff Garzik }
4711c6fd2807SJeff Garzik 
4712c6fd2807SJeff Garzik /**
4713c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4714c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4715c6fd2807SJeff Garzik  *
4716c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4717c6fd2807SJeff Garzik  *	associated with the current disk command.
4718c6fd2807SJeff Garzik  *
4719c6fd2807SJeff Garzik  *	LOCKING:
4720cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4721c6fd2807SJeff Garzik  *
4722c6fd2807SJeff Garzik  */
4723c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4724c6fd2807SJeff Garzik {
4725c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4726c6fd2807SJeff Garzik 	struct scatterlist *sg;
4727ff2aeb1eSTejun Heo 	unsigned int si, pi;
4728c6fd2807SJeff Garzik 
4729ff2aeb1eSTejun Heo 	pi = 0;
4730ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
4731c6fd2807SJeff Garzik 		u32 addr, offset;
4732c6fd2807SJeff Garzik 		u32 sg_len, len;
4733c6fd2807SJeff Garzik 
4734c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4735c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4736c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4737c6fd2807SJeff Garzik 		 */
4738c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4739c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4740c6fd2807SJeff Garzik 
4741c6fd2807SJeff Garzik 		while (sg_len) {
4742c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4743c6fd2807SJeff Garzik 			len = sg_len;
4744c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4745c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4746c6fd2807SJeff Garzik 
4747ff2aeb1eSTejun Heo 			ap->prd[pi].addr = cpu_to_le32(addr);
4748ff2aeb1eSTejun Heo 			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4749ff2aeb1eSTejun Heo 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4750c6fd2807SJeff Garzik 
4751ff2aeb1eSTejun Heo 			pi++;
4752c6fd2807SJeff Garzik 			sg_len -= len;
4753c6fd2807SJeff Garzik 			addr += len;
4754c6fd2807SJeff Garzik 		}
4755c6fd2807SJeff Garzik 	}
4756c6fd2807SJeff Garzik 
4757ff2aeb1eSTejun Heo 	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4758c6fd2807SJeff Garzik }
4759b9a4197eSTejun Heo 
4760c6fd2807SJeff Garzik /**
4761d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4762d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4763d26fc955SAlan Cox  *
4764d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4765d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4766d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4767d26fc955SAlan Cox  *	controllers that don't follow the spec.
4768d26fc955SAlan Cox  *
4769d26fc955SAlan Cox  *	LOCKING:
4770d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4771d26fc955SAlan Cox  *
4772d26fc955SAlan Cox  */
4773d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4774d26fc955SAlan Cox {
4775d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4776d26fc955SAlan Cox 	struct scatterlist *sg;
4777ff2aeb1eSTejun Heo 	unsigned int si, pi;
4778d26fc955SAlan Cox 
4779ff2aeb1eSTejun Heo 	pi = 0;
4780ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
4781d26fc955SAlan Cox 		u32 addr, offset;
4782d26fc955SAlan Cox 		u32 sg_len, len, blen;
4783d26fc955SAlan Cox 
4784d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4785d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4786d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4787d26fc955SAlan Cox 		 */
4788d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4789d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4790d26fc955SAlan Cox 
4791d26fc955SAlan Cox 		while (sg_len) {
4792d26fc955SAlan Cox 			offset = addr & 0xffff;
4793d26fc955SAlan Cox 			len = sg_len;
4794d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4795d26fc955SAlan Cox 				len = 0x10000 - offset;
4796d26fc955SAlan Cox 
4797d26fc955SAlan Cox 			blen = len & 0xffff;
4798ff2aeb1eSTejun Heo 			ap->prd[pi].addr = cpu_to_le32(addr);
4799d26fc955SAlan Cox 			if (blen == 0) {
4800d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4801d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4802ff2aeb1eSTejun Heo 				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4803d26fc955SAlan Cox 				blen = 0x8000;
4804ff2aeb1eSTejun Heo 				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4805d26fc955SAlan Cox 			}
4806ff2aeb1eSTejun Heo 			ap->prd[pi].flags_len = cpu_to_le32(blen);
4807ff2aeb1eSTejun Heo 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4808d26fc955SAlan Cox 
4809ff2aeb1eSTejun Heo 			pi++;
4810d26fc955SAlan Cox 			sg_len -= len;
4811d26fc955SAlan Cox 			addr += len;
4812d26fc955SAlan Cox 		}
4813d26fc955SAlan Cox 	}
4814d26fc955SAlan Cox 
4815ff2aeb1eSTejun Heo 	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4816d26fc955SAlan Cox }
4817d26fc955SAlan Cox 
4818d26fc955SAlan Cox /**
4819c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4820c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4821c6fd2807SJeff Garzik  *
4822c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4823c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4824c6fd2807SJeff Garzik  *	supplied PACKET command.
4825c6fd2807SJeff Garzik  *
4826c6fd2807SJeff Garzik  *	LOCKING:
4827cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4828c6fd2807SJeff Garzik  *
4829c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4830c6fd2807SJeff Garzik  *               nonzero otherwise
4831c6fd2807SJeff Garzik  */
4832c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4833c6fd2807SJeff Garzik {
4834c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4835c6fd2807SJeff Garzik 
4836b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4837b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4838b9a4197eSTejun Heo 	 */
4839b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
48406f23a31dSAlbert Lee 		return 1;
48416f23a31dSAlbert Lee 
4842c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4843b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4844c6fd2807SJeff Garzik 
4845b9a4197eSTejun Heo 	return 0;
4846c6fd2807SJeff Garzik }
4847b9a4197eSTejun Heo 
4848c6fd2807SJeff Garzik /**
484931cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
485031cc23b3STejun Heo  *	@qc: ATA command in question
485131cc23b3STejun Heo  *
485231cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
485331cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
485431cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
485531cc23b3STejun Heo  *	whether a new command @qc can be issued.
485631cc23b3STejun Heo  *
485731cc23b3STejun Heo  *	LOCKING:
485831cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
485931cc23b3STejun Heo  *
486031cc23b3STejun Heo  *	RETURNS:
486131cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
486231cc23b3STejun Heo  */
486331cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
486431cc23b3STejun Heo {
486531cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
486631cc23b3STejun Heo 
486731cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
486831cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
486931cc23b3STejun Heo 			return 0;
487031cc23b3STejun Heo 	} else {
487131cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
487231cc23b3STejun Heo 			return 0;
487331cc23b3STejun Heo 	}
487431cc23b3STejun Heo 
487531cc23b3STejun Heo 	return ATA_DEFER_LINK;
487631cc23b3STejun Heo }
487731cc23b3STejun Heo 
487831cc23b3STejun Heo /**
4879c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4880c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4881c6fd2807SJeff Garzik  *
4882c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4883c6fd2807SJeff Garzik  *
4884c6fd2807SJeff Garzik  *	LOCKING:
4885cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4886c6fd2807SJeff Garzik  */
4887c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4888c6fd2807SJeff Garzik {
4889c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4890c6fd2807SJeff Garzik 		return;
4891c6fd2807SJeff Garzik 
4892c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4893c6fd2807SJeff Garzik }
4894c6fd2807SJeff Garzik 
4895d26fc955SAlan Cox /**
4896d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4897d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4898d26fc955SAlan Cox  *
4899d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4900d26fc955SAlan Cox  *
4901d26fc955SAlan Cox  *	LOCKING:
4902d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4903d26fc955SAlan Cox  */
4904d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4905d26fc955SAlan Cox {
4906d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4907d26fc955SAlan Cox 		return;
4908d26fc955SAlan Cox 
4909d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4910d26fc955SAlan Cox }
4911d26fc955SAlan Cox 
4912c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4913c6fd2807SJeff Garzik 
4914c6fd2807SJeff Garzik /**
4915c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4916c6fd2807SJeff Garzik  *	@qc: Command to be associated
4917c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4918c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4919c6fd2807SJeff Garzik  *
4920c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4921c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4922c6fd2807SJeff Garzik  *	elements.
4923c6fd2807SJeff Garzik  *
4924c6fd2807SJeff Garzik  *	LOCKING:
4925cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4926c6fd2807SJeff Garzik  */
4927c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4928c6fd2807SJeff Garzik 		 unsigned int n_elem)
4929c6fd2807SJeff Garzik {
4930ff2aeb1eSTejun Heo 	qc->sg = sg;
4931c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4932ff2aeb1eSTejun Heo 	qc->cursg = qc->sg;
4933ff2aeb1eSTejun Heo }
4934ff2aeb1eSTejun Heo 
4935c6fd2807SJeff Garzik /**
4936c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4937c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4938c6fd2807SJeff Garzik  *
4939c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4940c6fd2807SJeff Garzik  *
4941c6fd2807SJeff Garzik  *	LOCKING:
4942cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4943c6fd2807SJeff Garzik  *
4944c6fd2807SJeff Garzik  *	RETURNS:
4945c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4946c6fd2807SJeff Garzik  *
4947c6fd2807SJeff Garzik  */
4948c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4949c6fd2807SJeff Garzik {
4950c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4951dde20207SJames Bottomley 	unsigned int n_elem;
4952c6fd2807SJeff Garzik 
495344877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4954c6fd2807SJeff Garzik 
4955dde20207SJames Bottomley 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4956dde20207SJames Bottomley 	if (n_elem < 1)
4957c6fd2807SJeff Garzik 		return -1;
4958c6fd2807SJeff Garzik 
4959dde20207SJames Bottomley 	DPRINTK("%d sg elements mapped\n", n_elem);
4960dde20207SJames Bottomley 
4961dde20207SJames Bottomley 	qc->n_elem = n_elem;
4962f92a2636STejun Heo 	qc->flags |= ATA_QCFLAG_DMAMAP;
4963c6fd2807SJeff Garzik 
4964c6fd2807SJeff Garzik 	return 0;
4965c6fd2807SJeff Garzik }
4966c6fd2807SJeff Garzik 
4967c6fd2807SJeff Garzik /**
4968c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4969c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4970c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4971c6fd2807SJeff Garzik  *
4972c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4973c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4974c6fd2807SJeff Garzik  *	vice-versa.
4975c6fd2807SJeff Garzik  *
4976c6fd2807SJeff Garzik  *	LOCKING:
4977c6fd2807SJeff Garzik  *	Inherited from caller.
4978c6fd2807SJeff Garzik  */
4979c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4980c6fd2807SJeff Garzik {
4981c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4982c6fd2807SJeff Garzik 	unsigned int i;
4983c6fd2807SJeff Garzik 
4984c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4985c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4986c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4987c6fd2807SJeff Garzik }
4988c6fd2807SJeff Garzik 
4989c6fd2807SJeff Garzik /**
49900d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
499155dba312STejun Heo  *	@dev: device to target
4992c6fd2807SJeff Garzik  *	@buf: data buffer
4993c6fd2807SJeff Garzik  *	@buflen: buffer length
49940affa456SLinus Nilsson  *	@rw: read/write
4995c6fd2807SJeff Garzik  *
4996c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4997c6fd2807SJeff Garzik  *
4998c6fd2807SJeff Garzik  *	LOCKING:
4999c6fd2807SJeff Garzik  *	Inherited from caller.
500055dba312STejun Heo  *
500155dba312STejun Heo  *	RETURNS:
500255dba312STejun Heo  *	Bytes consumed.
5003c6fd2807SJeff Garzik  */
500455dba312STejun Heo unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
500555dba312STejun Heo 			   unsigned int buflen, int rw)
5006c6fd2807SJeff Garzik {
500755dba312STejun Heo 	struct ata_port *ap = dev->link->ap;
500855dba312STejun Heo 	void __iomem *data_addr = ap->ioaddr.data_addr;
5009c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
5010c6fd2807SJeff Garzik 
5011c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
501255dba312STejun Heo 	if (rw == READ)
501355dba312STejun Heo 		ioread16_rep(data_addr, buf, words);
5014c6fd2807SJeff Garzik 	else
501555dba312STejun Heo 		iowrite16_rep(data_addr, buf, words);
5016c6fd2807SJeff Garzik 
5017c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
5018c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
50194ca4e439SAl Viro 		__le16 align_buf[1] = { 0 };
5020c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
5021c6fd2807SJeff Garzik 
502255dba312STejun Heo 		if (rw == READ) {
502355dba312STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(data_addr));
5024c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
502555dba312STejun Heo 		} else {
502655dba312STejun Heo 			memcpy(align_buf, trailing_buf, 1);
502755dba312STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
5028c6fd2807SJeff Garzik 		}
502955dba312STejun Heo 		words++;
5030c6fd2807SJeff Garzik 	}
503155dba312STejun Heo 
503255dba312STejun Heo 	return words << 1;
5033c6fd2807SJeff Garzik }
5034c6fd2807SJeff Garzik 
5035c6fd2807SJeff Garzik /**
50360d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
503755dba312STejun Heo  *	@dev: device to target
5038c6fd2807SJeff Garzik  *	@buf: data buffer
5039c6fd2807SJeff Garzik  *	@buflen: buffer length
50400affa456SLinus Nilsson  *	@rw: read/write
5041c6fd2807SJeff Garzik  *
5042c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
5043c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
5044c6fd2807SJeff Garzik  *
5045c6fd2807SJeff Garzik  *	LOCKING:
5046c6fd2807SJeff Garzik  *	Inherited from caller.
504755dba312STejun Heo  *
504855dba312STejun Heo  *	RETURNS:
504955dba312STejun Heo  *	Bytes consumed.
5050c6fd2807SJeff Garzik  */
505155dba312STejun Heo unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
505255dba312STejun Heo 				 unsigned int buflen, int rw)
5053c6fd2807SJeff Garzik {
5054c6fd2807SJeff Garzik 	unsigned long flags;
505555dba312STejun Heo 	unsigned int consumed;
505655dba312STejun Heo 
5057c6fd2807SJeff Garzik 	local_irq_save(flags);
505855dba312STejun Heo 	consumed = ata_data_xfer(dev, buf, buflen, rw);
5059c6fd2807SJeff Garzik 	local_irq_restore(flags);
506055dba312STejun Heo 
506155dba312STejun Heo 	return consumed;
5062c6fd2807SJeff Garzik }
5063c6fd2807SJeff Garzik 
5064c6fd2807SJeff Garzik 
5065c6fd2807SJeff Garzik /**
50665a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
5067c6fd2807SJeff Garzik  *	@qc: Command on going
5068c6fd2807SJeff Garzik  *
50695a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
5070c6fd2807SJeff Garzik  *
5071c6fd2807SJeff Garzik  *	LOCKING:
5072c6fd2807SJeff Garzik  *	Inherited from caller.
5073c6fd2807SJeff Garzik  */
5074c6fd2807SJeff Garzik 
5075c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
5076c6fd2807SJeff Garzik {
5077c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5078c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5079c6fd2807SJeff Garzik 	struct page *page;
5080c6fd2807SJeff Garzik 	unsigned int offset;
5081c6fd2807SJeff Garzik 	unsigned char *buf;
5082c6fd2807SJeff Garzik 
50835a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
5084c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5085c6fd2807SJeff Garzik 
508645711f1aSJens Axboe 	page = sg_page(qc->cursg);
508787260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
5088c6fd2807SJeff Garzik 
5089c6fd2807SJeff Garzik 	/* get the current page and offset */
5090c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5091c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5092c6fd2807SJeff Garzik 
5093c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5094c6fd2807SJeff Garzik 
5095c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5096c6fd2807SJeff Garzik 		unsigned long flags;
5097c6fd2807SJeff Garzik 
5098c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
5099c6fd2807SJeff Garzik 		local_irq_save(flags);
5100c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5101c6fd2807SJeff Garzik 
5102c6fd2807SJeff Garzik 		/* do the actual data transfer */
51035a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5104c6fd2807SJeff Garzik 
5105c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5106c6fd2807SJeff Garzik 		local_irq_restore(flags);
5107c6fd2807SJeff Garzik 	} else {
5108c6fd2807SJeff Garzik 		buf = page_address(page);
51095a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5110c6fd2807SJeff Garzik 	}
5111c6fd2807SJeff Garzik 
51125a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
51135a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
5114c6fd2807SJeff Garzik 
511587260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
511687260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5117c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5118c6fd2807SJeff Garzik 	}
5119c6fd2807SJeff Garzik }
5120c6fd2807SJeff Garzik 
5121c6fd2807SJeff Garzik /**
51225a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
5123c6fd2807SJeff Garzik  *	@qc: Command on going
5124c6fd2807SJeff Garzik  *
51255a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
5126c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
5127c6fd2807SJeff Garzik  *
5128c6fd2807SJeff Garzik  *	LOCKING:
5129c6fd2807SJeff Garzik  *	Inherited from caller.
5130c6fd2807SJeff Garzik  */
5131c6fd2807SJeff Garzik 
5132c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
5133c6fd2807SJeff Garzik {
5134c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
5135c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
5136c6fd2807SJeff Garzik 		unsigned int nsect;
5137c6fd2807SJeff Garzik 
5138c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
5139c6fd2807SJeff Garzik 
51405a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5141726f0785STejun Heo 			    qc->dev->multi_count);
5142c6fd2807SJeff Garzik 		while (nsect--)
5143c6fd2807SJeff Garzik 			ata_pio_sector(qc);
5144c6fd2807SJeff Garzik 	} else
5145c6fd2807SJeff Garzik 		ata_pio_sector(qc);
51464cc980b3SAlbert Lee 
51474cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
5148c6fd2807SJeff Garzik }
5149c6fd2807SJeff Garzik 
5150c6fd2807SJeff Garzik /**
5151c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
5152c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
5153c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
5154c6fd2807SJeff Garzik  *
5155c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
5156c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
5157c6fd2807SJeff Garzik  *
5158c6fd2807SJeff Garzik  *	LOCKING:
5159c6fd2807SJeff Garzik  *	caller.
5160c6fd2807SJeff Garzik  */
5161c6fd2807SJeff Garzik 
5162c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5163c6fd2807SJeff Garzik {
5164c6fd2807SJeff Garzik 	/* send SCSI cdb */
5165c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
5166c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
5167c6fd2807SJeff Garzik 
5168c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5169c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
5170c6fd2807SJeff Garzik 
5171c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
51720dc36888STejun Heo 	case ATAPI_PROT_PIO:
5173c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
5174c6fd2807SJeff Garzik 		break;
51750dc36888STejun Heo 	case ATAPI_PROT_NODATA:
5176c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5177c6fd2807SJeff Garzik 		break;
51780dc36888STejun Heo 	case ATAPI_PROT_DMA:
5179c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5180c6fd2807SJeff Garzik 		/* initiate bmdma */
5181c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
5182c6fd2807SJeff Garzik 		break;
5183c6fd2807SJeff Garzik 	}
5184c6fd2807SJeff Garzik }
5185c6fd2807SJeff Garzik 
5186c6fd2807SJeff Garzik /**
5187c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
5188c6fd2807SJeff Garzik  *	@qc: Command on going
5189c6fd2807SJeff Garzik  *	@bytes: number of bytes
5190c6fd2807SJeff Garzik  *
5191c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5192c6fd2807SJeff Garzik  *
5193c6fd2807SJeff Garzik  *	LOCKING:
5194c6fd2807SJeff Garzik  *	Inherited from caller.
5195c6fd2807SJeff Garzik  *
5196c6fd2807SJeff Garzik  */
5197140b5e59STejun Heo static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5198c6fd2807SJeff Garzik {
519956c819dfSTejun Heo 	int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5200c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
520156c819dfSTejun Heo 	struct ata_device *dev = qc->dev;
520256c819dfSTejun Heo 	struct ata_eh_info *ehi = &dev->link->eh_info;
5203140b5e59STejun Heo 	struct scatterlist *sg;
5204c6fd2807SJeff Garzik 	struct page *page;
5205c6fd2807SJeff Garzik 	unsigned char *buf;
520656c819dfSTejun Heo 	unsigned int offset, count, consumed;
5207c6fd2807SJeff Garzik 
5208c6fd2807SJeff Garzik next_sg:
5209140b5e59STejun Heo 	sg = qc->cursg;
5210140b5e59STejun Heo 	if (unlikely(!sg)) {
5211fa2fc7f4SJames Bottomley 		ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5212140b5e59STejun Heo 				  "buf=%u cur=%u bytes=%u",
5213140b5e59STejun Heo 				  qc->nbytes, qc->curbytes, bytes);
5214140b5e59STejun Heo 		return -1;
5215c6fd2807SJeff Garzik 	}
5216c6fd2807SJeff Garzik 
521745711f1aSJens Axboe 	page = sg_page(sg);
5218c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5219c6fd2807SJeff Garzik 
5220c6fd2807SJeff Garzik 	/* get the current page and offset */
5221c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5222c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5223c6fd2807SJeff Garzik 
5224c6fd2807SJeff Garzik 	/* don't overrun current sg */
5225c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5226c6fd2807SJeff Garzik 
5227c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5228c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5229c6fd2807SJeff Garzik 
5230c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5231c6fd2807SJeff Garzik 
5232c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5233c6fd2807SJeff Garzik 		unsigned long flags;
5234c6fd2807SJeff Garzik 
5235c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5236c6fd2807SJeff Garzik 		local_irq_save(flags);
5237c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5238c6fd2807SJeff Garzik 
5239c6fd2807SJeff Garzik 		/* do the actual data transfer */
524056c819dfSTejun Heo 		consumed = ap->ops->data_xfer(dev,  buf + offset, count, rw);
5241c6fd2807SJeff Garzik 
5242c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5243c6fd2807SJeff Garzik 		local_irq_restore(flags);
5244c6fd2807SJeff Garzik 	} else {
5245c6fd2807SJeff Garzik 		buf = page_address(page);
524656c819dfSTejun Heo 		consumed = ap->ops->data_xfer(dev,  buf + offset, count, rw);
5247c6fd2807SJeff Garzik 	}
5248c6fd2807SJeff Garzik 
524956c819dfSTejun Heo 	bytes -= min(bytes, consumed);
5250c6fd2807SJeff Garzik 	qc->curbytes += count;
5251c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5252c6fd2807SJeff Garzik 
5253c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
525487260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5255c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5256c6fd2807SJeff Garzik 	}
5257c6fd2807SJeff Garzik 
525856c819dfSTejun Heo 	/* consumed can be larger than count only for the last transfer */
525956c819dfSTejun Heo 	WARN_ON(qc->cursg && count != consumed);
526056c819dfSTejun Heo 
5261c6fd2807SJeff Garzik 	if (bytes)
5262c6fd2807SJeff Garzik 		goto next_sg;
5263140b5e59STejun Heo 	return 0;
5264c6fd2807SJeff Garzik }
5265c6fd2807SJeff Garzik 
5266c6fd2807SJeff Garzik /**
5267c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5268c6fd2807SJeff Garzik  *	@qc: Command on going
5269c6fd2807SJeff Garzik  *
5270c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5271c6fd2807SJeff Garzik  *
5272c6fd2807SJeff Garzik  *	LOCKING:
5273c6fd2807SJeff Garzik  *	Inherited from caller.
5274c6fd2807SJeff Garzik  */
5275c6fd2807SJeff Garzik 
5276c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5277c6fd2807SJeff Garzik {
5278c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5279c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
528056c819dfSTejun Heo 	struct ata_eh_info *ehi = &dev->link->eh_info;
5281c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5282c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5283c6fd2807SJeff Garzik 
5284c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5285c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5286c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5287c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5288c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5289c6fd2807SJeff Garzik 	 */
5290c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5291c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5292c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5293c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5294c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5295c6fd2807SJeff Garzik 
5296c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
52970106372dSAlbert Lee 	if (unlikely(ireason & (1 << 0)))
529856c819dfSTejun Heo 		goto atapi_check;
5299c6fd2807SJeff Garzik 
5300c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5301c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
53020106372dSAlbert Lee 	if (unlikely(do_write != i_write))
530356c819dfSTejun Heo 		goto atapi_check;
53040106372dSAlbert Lee 
53050106372dSAlbert Lee 	if (unlikely(!bytes))
530656c819dfSTejun Heo 		goto atapi_check;
5307c6fd2807SJeff Garzik 
530844877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5309c6fd2807SJeff Garzik 
531056c819dfSTejun Heo 	if (unlikely(__atapi_pio_bytes(qc, bytes)))
5311140b5e59STejun Heo 		goto err_out;
53124cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5313c6fd2807SJeff Garzik 
5314c6fd2807SJeff Garzik 	return;
5315c6fd2807SJeff Garzik 
531656c819dfSTejun Heo  atapi_check:
531756c819dfSTejun Heo 	ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
531856c819dfSTejun Heo 			  ireason, bytes);
5319c6fd2807SJeff Garzik  err_out:
5320c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5321c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5322c6fd2807SJeff Garzik }
5323c6fd2807SJeff Garzik 
5324c6fd2807SJeff Garzik /**
5325c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5326c6fd2807SJeff Garzik  *	@ap: the target ata_port
5327c6fd2807SJeff Garzik  *	@qc: qc on going
5328c6fd2807SJeff Garzik  *
5329c6fd2807SJeff Garzik  *	RETURNS:
5330c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5331c6fd2807SJeff Garzik  */
5332c6fd2807SJeff Garzik 
5333c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5334c6fd2807SJeff Garzik {
5335c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5336c6fd2807SJeff Garzik 		return 1;
5337c6fd2807SJeff Garzik 
5338c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5339c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5340c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5341c6fd2807SJeff Garzik 		    return 1;
5342c6fd2807SJeff Garzik 
5343405e66b3STejun Heo 		if (ata_is_atapi(qc->tf.protocol) &&
5344c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5345c6fd2807SJeff Garzik 			return 1;
5346c6fd2807SJeff Garzik 	}
5347c6fd2807SJeff Garzik 
5348c6fd2807SJeff Garzik 	return 0;
5349c6fd2807SJeff Garzik }
5350c6fd2807SJeff Garzik 
5351c6fd2807SJeff Garzik /**
5352c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5353c6fd2807SJeff Garzik  *	@qc: Command to complete
5354c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5355c6fd2807SJeff Garzik  *
5356c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5357c6fd2807SJeff Garzik  *
5358c6fd2807SJeff Garzik  *	LOCKING:
5359cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5360c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5361c6fd2807SJeff Garzik  */
5362c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5363c6fd2807SJeff Garzik {
5364c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5365c6fd2807SJeff Garzik 	unsigned long flags;
5366c6fd2807SJeff Garzik 
5367c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5368c6fd2807SJeff Garzik 		if (in_wq) {
5369c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5370c6fd2807SJeff Garzik 
5371cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5372cca3974eSJeff Garzik 			 * released.
5373c6fd2807SJeff Garzik 			 */
5374c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5375c6fd2807SJeff Garzik 			if (qc) {
5376c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
537783625006SAkira Iguchi 					ap->ops->irq_on(ap);
5378c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5379c6fd2807SJeff Garzik 				} else
5380c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5381c6fd2807SJeff Garzik 			}
5382c6fd2807SJeff Garzik 
5383c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5384c6fd2807SJeff Garzik 		} else {
5385c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5386c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5387c6fd2807SJeff Garzik 			else
5388c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5389c6fd2807SJeff Garzik 		}
5390c6fd2807SJeff Garzik 	} else {
5391c6fd2807SJeff Garzik 		if (in_wq) {
5392c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
539383625006SAkira Iguchi 			ap->ops->irq_on(ap);
5394c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5395c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5396c6fd2807SJeff Garzik 		} else
5397c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5398c6fd2807SJeff Garzik 	}
5399c6fd2807SJeff Garzik }
5400c6fd2807SJeff Garzik 
5401c6fd2807SJeff Garzik /**
5402c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5403c6fd2807SJeff Garzik  *	@ap: the target ata_port
5404c6fd2807SJeff Garzik  *	@qc: qc on going
5405c6fd2807SJeff Garzik  *	@status: current device status
5406c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5407c6fd2807SJeff Garzik  *
5408c6fd2807SJeff Garzik  *	RETURNS:
5409c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5410c6fd2807SJeff Garzik  */
5411c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5412c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5413c6fd2807SJeff Garzik {
5414c6fd2807SJeff Garzik 	unsigned long flags = 0;
5415c6fd2807SJeff Garzik 	int poll_next;
5416c6fd2807SJeff Garzik 
5417c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5418c6fd2807SJeff Garzik 
5419c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5420c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5421c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5422c6fd2807SJeff Garzik 	 */
5423c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5424c6fd2807SJeff Garzik 
5425c6fd2807SJeff Garzik fsm_start:
5426c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
542744877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5428c6fd2807SJeff Garzik 
5429c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5430c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5431c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5432c6fd2807SJeff Garzik 
5433c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5434c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5435c6fd2807SJeff Garzik 		 * takes over after sending the data.
5436c6fd2807SJeff Garzik 		 */
5437c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5438c6fd2807SJeff Garzik 
5439c6fd2807SJeff Garzik 		/* check device status */
5440c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5441c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5442c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5443c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5444c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5445c6fd2807SJeff Garzik 			else
5446c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5447c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5448c6fd2807SJeff Garzik 
5449c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5450c6fd2807SJeff Garzik 			goto fsm_start;
5451c6fd2807SJeff Garzik 		}
5452c6fd2807SJeff Garzik 
5453c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5454c6fd2807SJeff Garzik 		 * when it finds something wrong.
5455c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5456c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5457c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5458c6fd2807SJeff Garzik 		 */
5459c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
54602d3b8eeaSAlbert Lee 			/* Some ATAPI tape drives forget to clear the ERR bit
54612d3b8eeaSAlbert Lee 			 * when doing the next command (mostly request sense).
54622d3b8eeaSAlbert Lee 			 * We ignore ERR here to workaround and proceed sending
54632d3b8eeaSAlbert Lee 			 * the CDB.
54642d3b8eeaSAlbert Lee 			 */
54652d3b8eeaSAlbert Lee 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
54662d3b8eeaSAlbert Lee 				ata_port_printk(ap, KERN_WARNING,
54672d3b8eeaSAlbert Lee 						"DRQ=1 with device error, "
54682d3b8eeaSAlbert Lee 						"dev_stat 0x%X\n", status);
5469c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5470c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5471c6fd2807SJeff Garzik 				goto fsm_start;
5472c6fd2807SJeff Garzik 			}
54732d3b8eeaSAlbert Lee 		}
5474c6fd2807SJeff Garzik 
5475c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5476c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5477c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5478c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5479c6fd2807SJeff Garzik 		 */
5480c6fd2807SJeff Garzik 		if (in_wq)
5481c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5482c6fd2807SJeff Garzik 
5483c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5484c6fd2807SJeff Garzik 			/* PIO data out protocol.
5485c6fd2807SJeff Garzik 			 * send first data block.
5486c6fd2807SJeff Garzik 			 */
5487c6fd2807SJeff Garzik 
5488c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5489c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5490c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5491c6fd2807SJeff Garzik 			 */
5492c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5493c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5494c6fd2807SJeff Garzik 		} else
5495c6fd2807SJeff Garzik 			/* send CDB */
5496c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5497c6fd2807SJeff Garzik 
5498c6fd2807SJeff Garzik 		if (in_wq)
5499c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5500c6fd2807SJeff Garzik 
5501c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5502c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5503c6fd2807SJeff Garzik 		 */
5504c6fd2807SJeff Garzik 		break;
5505c6fd2807SJeff Garzik 
5506c6fd2807SJeff Garzik 	case HSM_ST:
5507c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
55080dc36888STejun Heo 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
5509c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5510c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5511c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5512c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5513c6fd2807SJeff Garzik 				 */
5514c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5515c6fd2807SJeff Garzik 				goto fsm_start;
5516c6fd2807SJeff Garzik 			}
5517c6fd2807SJeff Garzik 
5518c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5519c6fd2807SJeff Garzik 			 * when it finds something wrong.
5520c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5521c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5522c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5523c6fd2807SJeff Garzik 			 */
5524c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
552544877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
552644877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
552744877b4eSTejun Heo 						status);
5528c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5529c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5530c6fd2807SJeff Garzik 				goto fsm_start;
5531c6fd2807SJeff Garzik 			}
5532c6fd2807SJeff Garzik 
5533c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5534c6fd2807SJeff Garzik 
5535c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5536c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5537c6fd2807SJeff Garzik 				goto fsm_start;
5538c6fd2807SJeff Garzik 
5539c6fd2807SJeff Garzik 		} else {
5540c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5541c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5542c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5543c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5544c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5545c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5546c6fd2807SJeff Garzik 				else
554755a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
554855a8e2c8STejun Heo 					 * Phantom devices also trigger this
554955a8e2c8STejun Heo 					 * condition.  Mark hint.
555055a8e2c8STejun Heo 					 */
555155a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
555255a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5553c6fd2807SJeff Garzik 
5554c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5555c6fd2807SJeff Garzik 				goto fsm_start;
5556c6fd2807SJeff Garzik 			}
5557c6fd2807SJeff Garzik 
5558c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5559c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5560c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5561c6fd2807SJeff Garzik 			 * block of junk data before changing the
5562c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5563c6fd2807SJeff Garzik 			 *
5564c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5565c6fd2807SJeff Garzik 			 * sense since the data block has been
5566c6fd2807SJeff Garzik 			 * transferred to the device.
5567c6fd2807SJeff Garzik 			 */
5568c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5569c6fd2807SJeff Garzik 				/* data might be corrputed */
5570c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5571c6fd2807SJeff Garzik 
5572c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5573c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5574c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5575c6fd2807SJeff Garzik 				}
5576c6fd2807SJeff Garzik 
5577c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5578c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5579c6fd2807SJeff Garzik 
5580c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5581c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5582c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5583c6fd2807SJeff Garzik 				 */
5584c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5585c6fd2807SJeff Garzik 				goto fsm_start;
5586c6fd2807SJeff Garzik 			}
5587c6fd2807SJeff Garzik 
5588c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5589c6fd2807SJeff Garzik 
5590c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5591c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5592c6fd2807SJeff Garzik 				/* all data read */
5593c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5594c6fd2807SJeff Garzik 				goto fsm_start;
5595c6fd2807SJeff Garzik 			}
5596c6fd2807SJeff Garzik 		}
5597c6fd2807SJeff Garzik 
5598c6fd2807SJeff Garzik 		poll_next = 1;
5599c6fd2807SJeff Garzik 		break;
5600c6fd2807SJeff Garzik 
5601c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5602c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5603c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5604c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5605c6fd2807SJeff Garzik 			goto fsm_start;
5606c6fd2807SJeff Garzik 		}
5607c6fd2807SJeff Garzik 
5608c6fd2807SJeff Garzik 		/* no more data to transfer */
5609c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
561044877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5611c6fd2807SJeff Garzik 
5612c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5613c6fd2807SJeff Garzik 
5614c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5615c6fd2807SJeff Garzik 
5616c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5617c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5618c6fd2807SJeff Garzik 
5619c6fd2807SJeff Garzik 		poll_next = 0;
5620c6fd2807SJeff Garzik 		break;
5621c6fd2807SJeff Garzik 
5622c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5623c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5624c6fd2807SJeff Garzik 		 * know what's wrong and recover
5625c6fd2807SJeff Garzik 		 */
5626c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5627c6fd2807SJeff Garzik 
5628c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5629c6fd2807SJeff Garzik 
5630c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5631c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5632c6fd2807SJeff Garzik 
5633c6fd2807SJeff Garzik 		poll_next = 0;
5634c6fd2807SJeff Garzik 		break;
5635c6fd2807SJeff Garzik 	default:
5636c6fd2807SJeff Garzik 		poll_next = 0;
5637c6fd2807SJeff Garzik 		BUG();
5638c6fd2807SJeff Garzik 	}
5639c6fd2807SJeff Garzik 
5640c6fd2807SJeff Garzik 	return poll_next;
5641c6fd2807SJeff Garzik }
5642c6fd2807SJeff Garzik 
564365f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5644c6fd2807SJeff Garzik {
564565f27f38SDavid Howells 	struct ata_port *ap =
564665f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
564765f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5648c6fd2807SJeff Garzik 	u8 status;
5649c6fd2807SJeff Garzik 	int poll_next;
5650c6fd2807SJeff Garzik 
5651c6fd2807SJeff Garzik fsm_start:
5652c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5653c6fd2807SJeff Garzik 
5654c6fd2807SJeff Garzik 	/*
5655c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5656c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5657c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5658c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5659c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5660c6fd2807SJeff Garzik 	 */
5661c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5662c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5663c6fd2807SJeff Garzik 		msleep(2);
5664c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5665c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5666442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
5667c6fd2807SJeff Garzik 			return;
5668c6fd2807SJeff Garzik 		}
5669c6fd2807SJeff Garzik 	}
5670c6fd2807SJeff Garzik 
5671c6fd2807SJeff Garzik 	/* move the HSM */
5672c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5673c6fd2807SJeff Garzik 
5674c6fd2807SJeff Garzik 	/* another command or interrupt handler
5675c6fd2807SJeff Garzik 	 * may be running at this point.
5676c6fd2807SJeff Garzik 	 */
5677c6fd2807SJeff Garzik 	if (poll_next)
5678c6fd2807SJeff Garzik 		goto fsm_start;
5679c6fd2807SJeff Garzik }
5680c6fd2807SJeff Garzik 
5681c6fd2807SJeff Garzik /**
5682c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5683c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5684c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5685c6fd2807SJeff Garzik  *
5686c6fd2807SJeff Garzik  *	LOCKING:
5687c6fd2807SJeff Garzik  *	None.
5688c6fd2807SJeff Garzik  */
5689c6fd2807SJeff Garzik 
5690c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5691c6fd2807SJeff Garzik {
5692c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5693c6fd2807SJeff Garzik 	unsigned int i;
5694c6fd2807SJeff Garzik 
5695c6fd2807SJeff Garzik 	/* no command while frozen */
5696c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5697c6fd2807SJeff Garzik 		return NULL;
5698c6fd2807SJeff Garzik 
5699c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5700c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5701c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5702c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5703c6fd2807SJeff Garzik 			break;
5704c6fd2807SJeff Garzik 		}
5705c6fd2807SJeff Garzik 
5706c6fd2807SJeff Garzik 	if (qc)
5707c6fd2807SJeff Garzik 		qc->tag = i;
5708c6fd2807SJeff Garzik 
5709c6fd2807SJeff Garzik 	return qc;
5710c6fd2807SJeff Garzik }
5711c6fd2807SJeff Garzik 
5712c6fd2807SJeff Garzik /**
5713c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5714c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5715c6fd2807SJeff Garzik  *
5716c6fd2807SJeff Garzik  *	LOCKING:
5717c6fd2807SJeff Garzik  *	None.
5718c6fd2807SJeff Garzik  */
5719c6fd2807SJeff Garzik 
5720c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5721c6fd2807SJeff Garzik {
57229af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5723c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5724c6fd2807SJeff Garzik 
5725c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5726c6fd2807SJeff Garzik 	if (qc) {
5727c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5728c6fd2807SJeff Garzik 		qc->ap = ap;
5729c6fd2807SJeff Garzik 		qc->dev = dev;
5730c6fd2807SJeff Garzik 
5731c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5732c6fd2807SJeff Garzik 	}
5733c6fd2807SJeff Garzik 
5734c6fd2807SJeff Garzik 	return qc;
5735c6fd2807SJeff Garzik }
5736c6fd2807SJeff Garzik 
5737c6fd2807SJeff Garzik /**
5738c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5739c6fd2807SJeff Garzik  *	@qc: Command to complete
5740c6fd2807SJeff Garzik  *
5741c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5742c6fd2807SJeff Garzik  *	in case something prevents using it.
5743c6fd2807SJeff Garzik  *
5744c6fd2807SJeff Garzik  *	LOCKING:
5745cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5746c6fd2807SJeff Garzik  */
5747c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5748c6fd2807SJeff Garzik {
5749c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5750c6fd2807SJeff Garzik 	unsigned int tag;
5751c6fd2807SJeff Garzik 
5752c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5753c6fd2807SJeff Garzik 
5754c6fd2807SJeff Garzik 	qc->flags = 0;
5755c6fd2807SJeff Garzik 	tag = qc->tag;
5756c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5757c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5758c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5759c6fd2807SJeff Garzik 	}
5760c6fd2807SJeff Garzik }
5761c6fd2807SJeff Garzik 
5762c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5763c6fd2807SJeff Garzik {
5764c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57659af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5766c6fd2807SJeff Garzik 
5767c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5768c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5769c6fd2807SJeff Garzik 
5770c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5771c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5772c6fd2807SJeff Garzik 
5773c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5774da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
57759af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5776da917d69STejun Heo 		if (!link->sactive)
5777da917d69STejun Heo 			ap->nr_active_links--;
5778da917d69STejun Heo 	} else {
57799af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5780da917d69STejun Heo 		ap->nr_active_links--;
5781da917d69STejun Heo 	}
5782da917d69STejun Heo 
5783da917d69STejun Heo 	/* clear exclusive status */
5784da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5785da917d69STejun Heo 		     ap->excl_link == link))
5786da917d69STejun Heo 		ap->excl_link = NULL;
5787c6fd2807SJeff Garzik 
5788c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5789c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5790c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5791c6fd2807SJeff Garzik 	 */
5792c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5793c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5794c6fd2807SJeff Garzik 
5795c6fd2807SJeff Garzik 	/* call completion callback */
5796c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5797c6fd2807SJeff Garzik }
5798c6fd2807SJeff Garzik 
579939599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
580039599a53STejun Heo {
580139599a53STejun Heo 	struct ata_port *ap = qc->ap;
580239599a53STejun Heo 
580339599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
58044742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
580539599a53STejun Heo }
580639599a53STejun Heo 
580700115e0fSTejun Heo static void ata_verify_xfer(struct ata_queued_cmd *qc)
580800115e0fSTejun Heo {
580900115e0fSTejun Heo 	struct ata_device *dev = qc->dev;
581000115e0fSTejun Heo 
581100115e0fSTejun Heo 	if (ata_tag_internal(qc->tag))
581200115e0fSTejun Heo 		return;
581300115e0fSTejun Heo 
581400115e0fSTejun Heo 	if (ata_is_nodata(qc->tf.protocol))
581500115e0fSTejun Heo 		return;
581600115e0fSTejun Heo 
581700115e0fSTejun Heo 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
581800115e0fSTejun Heo 		return;
581900115e0fSTejun Heo 
582000115e0fSTejun Heo 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
582100115e0fSTejun Heo }
582200115e0fSTejun Heo 
5823c6fd2807SJeff Garzik /**
5824c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5825c6fd2807SJeff Garzik  *	@qc: Command to complete
5826c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5827c6fd2807SJeff Garzik  *
5828c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5829c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5830c6fd2807SJeff Garzik  *
5831c6fd2807SJeff Garzik  *	LOCKING:
5832cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5833c6fd2807SJeff Garzik  */
5834c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5835c6fd2807SJeff Garzik {
5836c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5837c6fd2807SJeff Garzik 
5838c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5839c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5840c6fd2807SJeff Garzik 	 *
5841c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5842c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5843c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5844c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5845c6fd2807SJeff Garzik 	 *
5846c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5847c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5848c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5849c6fd2807SJeff Garzik 	 * taken care of.
5850c6fd2807SJeff Garzik 	 */
5851c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
58524dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
58534dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
58544dbfa39bSTejun Heo 
5855c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5856c6fd2807SJeff Garzik 
5857c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5858c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5859c6fd2807SJeff Garzik 
5860c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5861c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5862c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
586339599a53STejun Heo 				fill_result_tf(qc);
5864c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5865c6fd2807SJeff Garzik 				return;
5866c6fd2807SJeff Garzik 			}
5867c6fd2807SJeff Garzik 		}
5868c6fd2807SJeff Garzik 
5869c6fd2807SJeff Garzik 		/* read result TF if requested */
5870c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
587139599a53STejun Heo 			fill_result_tf(qc);
5872c6fd2807SJeff Garzik 
58734dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
58744dbfa39bSTejun Heo 		 * completion.
58754dbfa39bSTejun Heo 		 */
58764dbfa39bSTejun Heo 		switch (qc->tf.command) {
58774dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
58784dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
58794dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
58804dbfa39bSTejun Heo 				break;
58814dbfa39bSTejun Heo 			/* fall through */
58824dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
58834dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
58844dbfa39bSTejun Heo 			/* revalidate device */
58854dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
58864dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
58874dbfa39bSTejun Heo 			break;
5888054a5fbaSTejun Heo 
5889054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5890054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5891054a5fbaSTejun Heo 			break;
58924dbfa39bSTejun Heo 		}
58934dbfa39bSTejun Heo 
589400115e0fSTejun Heo 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
589500115e0fSTejun Heo 			ata_verify_xfer(qc);
589600115e0fSTejun Heo 
5897c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5898c6fd2807SJeff Garzik 	} else {
5899c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5900c6fd2807SJeff Garzik 			return;
5901c6fd2807SJeff Garzik 
5902c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5903c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
590439599a53STejun Heo 			fill_result_tf(qc);
5905c6fd2807SJeff Garzik 
5906c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5907c6fd2807SJeff Garzik 	}
5908c6fd2807SJeff Garzik }
5909c6fd2807SJeff Garzik 
5910c6fd2807SJeff Garzik /**
5911c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5912c6fd2807SJeff Garzik  *	@ap: port in question
5913c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5914c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5915c6fd2807SJeff Garzik  *
5916c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5917c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5918c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5919c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5920c6fd2807SJeff Garzik  *
5921c6fd2807SJeff Garzik  *	LOCKING:
5922cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5923c6fd2807SJeff Garzik  *
5924c6fd2807SJeff Garzik  *	RETURNS:
5925c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5926c6fd2807SJeff Garzik  */
5927c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5928c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5929c6fd2807SJeff Garzik {
5930c6fd2807SJeff Garzik 	int nr_done = 0;
5931c6fd2807SJeff Garzik 	u32 done_mask;
5932c6fd2807SJeff Garzik 	int i;
5933c6fd2807SJeff Garzik 
5934c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5935c6fd2807SJeff Garzik 
5936c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5937c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5938c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5939c6fd2807SJeff Garzik 		return -EINVAL;
5940c6fd2807SJeff Garzik 	}
5941c6fd2807SJeff Garzik 
5942c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5943c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5944c6fd2807SJeff Garzik 
5945c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5946c6fd2807SJeff Garzik 			continue;
5947c6fd2807SJeff Garzik 
5948c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5949c6fd2807SJeff Garzik 			if (finish_qc)
5950c6fd2807SJeff Garzik 				finish_qc(qc);
5951c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5952c6fd2807SJeff Garzik 			nr_done++;
5953c6fd2807SJeff Garzik 		}
5954c6fd2807SJeff Garzik 	}
5955c6fd2807SJeff Garzik 
5956c6fd2807SJeff Garzik 	return nr_done;
5957c6fd2807SJeff Garzik }
5958c6fd2807SJeff Garzik 
5959c6fd2807SJeff Garzik /**
5960c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5961c6fd2807SJeff Garzik  *	@qc: command to issue to device
5962c6fd2807SJeff Garzik  *
5963c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5964c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5965c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5966c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5967c6fd2807SJeff Garzik  *
5968c6fd2807SJeff Garzik  *	LOCKING:
5969cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5970c6fd2807SJeff Garzik  */
5971c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5972c6fd2807SJeff Garzik {
5973c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
59749af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5975405e66b3STejun Heo 	u8 prot = qc->tf.protocol;
5976c6fd2807SJeff Garzik 
5977c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5978c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5979c6fd2807SJeff Garzik 	 * request ATAPI sense.
5980c6fd2807SJeff Garzik 	 */
59819af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5982c6fd2807SJeff Garzik 
59831973a023STejun Heo 	if (ata_is_ncq(prot)) {
59849af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5985da917d69STejun Heo 
5986da917d69STejun Heo 		if (!link->sactive)
5987da917d69STejun Heo 			ap->nr_active_links++;
59889af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5989c6fd2807SJeff Garzik 	} else {
59909af5c9c9STejun Heo 		WARN_ON(link->sactive);
5991da917d69STejun Heo 
5992da917d69STejun Heo 		ap->nr_active_links++;
59939af5c9c9STejun Heo 		link->active_tag = qc->tag;
5994c6fd2807SJeff Garzik 	}
5995c6fd2807SJeff Garzik 
5996c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5997c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5998c6fd2807SJeff Garzik 
5999f92a2636STejun Heo 	/* We guarantee to LLDs that they will have at least one
6000f92a2636STejun Heo 	 * non-zero sg if the command is a data command.
6001f92a2636STejun Heo 	 */
6002ff2aeb1eSTejun Heo 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
6003f92a2636STejun Heo 
6004405e66b3STejun Heo 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
6005f92a2636STejun Heo 				 (ap->flags & ATA_FLAG_PIO_DMA)))
6006c6fd2807SJeff Garzik 		if (ata_sg_setup(qc))
6007c6fd2807SJeff Garzik 			goto sg_err;
6008c6fd2807SJeff Garzik 
6009054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
6010054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6011054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
6012054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6013054a5fbaSTejun Heo 		ata_link_abort(link);
6014054a5fbaSTejun Heo 		return;
6015054a5fbaSTejun Heo 	}
6016054a5fbaSTejun Heo 
6017c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
6018c6fd2807SJeff Garzik 
6019c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
6020c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
6021c6fd2807SJeff Garzik 		goto err;
6022c6fd2807SJeff Garzik 	return;
6023c6fd2807SJeff Garzik 
6024c6fd2807SJeff Garzik sg_err:
6025c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
6026c6fd2807SJeff Garzik err:
6027c6fd2807SJeff Garzik 	ata_qc_complete(qc);
6028c6fd2807SJeff Garzik }
6029c6fd2807SJeff Garzik 
6030c6fd2807SJeff Garzik /**
6031c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6032c6fd2807SJeff Garzik  *	@qc: command to issue to device
6033c6fd2807SJeff Garzik  *
6034c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
6035c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
6036c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
6037c6fd2807SJeff Garzik  *	is slightly different.
6038c6fd2807SJeff Garzik  *
6039c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
6040c6fd2807SJeff Garzik  *
6041c6fd2807SJeff Garzik  *	LOCKING:
6042cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6043c6fd2807SJeff Garzik  *
6044c6fd2807SJeff Garzik  *	RETURNS:
6045c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
6046c6fd2807SJeff Garzik  */
6047c6fd2807SJeff Garzik 
6048c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6049c6fd2807SJeff Garzik {
6050c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
6051c6fd2807SJeff Garzik 
6052c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
6053c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
6054c6fd2807SJeff Garzik 	 */
6055c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
6056c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
6057c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
6058e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
60590dc36888STejun Heo 		case ATAPI_PROT_PIO:
60600dc36888STejun Heo 		case ATAPI_PROT_NODATA:
6061c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
6062c6fd2807SJeff Garzik 			break;
60630dc36888STejun Heo 		case ATAPI_PROT_DMA:
6064c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6065c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
6066c6fd2807SJeff Garzik 				BUG();
6067c6fd2807SJeff Garzik 			break;
6068c6fd2807SJeff Garzik 		default:
6069c6fd2807SJeff Garzik 			break;
6070c6fd2807SJeff Garzik 		}
6071c6fd2807SJeff Garzik 	}
6072c6fd2807SJeff Garzik 
6073c6fd2807SJeff Garzik 	/* select the device */
6074c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
6075c6fd2807SJeff Garzik 
6076c6fd2807SJeff Garzik 	/* start the command */
6077c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
6078c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
6079c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6080c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6081c6fd2807SJeff Garzik 
6082c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6083c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6084c6fd2807SJeff Garzik 
6085c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6086442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6087c6fd2807SJeff Garzik 
6088c6fd2807SJeff Garzik 		break;
6089c6fd2807SJeff Garzik 
6090c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
6091c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6092c6fd2807SJeff Garzik 
6093c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6094c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6095c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
6096c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6097c6fd2807SJeff Garzik 		break;
6098c6fd2807SJeff Garzik 
6099c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
6100c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6101c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6102c6fd2807SJeff Garzik 
6103c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6104c6fd2807SJeff Garzik 
6105c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
6106c6fd2807SJeff Garzik 			/* PIO data out protocol */
6107c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
6108442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6109c6fd2807SJeff Garzik 
6110c6fd2807SJeff Garzik 			/* always send first data block using
6111c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
6112c6fd2807SJeff Garzik 			 */
6113c6fd2807SJeff Garzik 		} else {
6114c6fd2807SJeff Garzik 			/* PIO data in protocol */
6115c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
6116c6fd2807SJeff Garzik 
6117c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
6118442eacc3SJeff Garzik 				ata_pio_queue_task(ap, qc, 0);
6119c6fd2807SJeff Garzik 
6120c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
6121c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
6122c6fd2807SJeff Garzik 			 */
6123c6fd2807SJeff Garzik 		}
6124c6fd2807SJeff Garzik 
6125c6fd2807SJeff Garzik 		break;
6126c6fd2807SJeff Garzik 
61270dc36888STejun Heo 	case ATAPI_PROT_PIO:
61280dc36888STejun Heo 	case ATAPI_PROT_NODATA:
6129c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6130c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6131c6fd2807SJeff Garzik 
6132c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6133c6fd2807SJeff Garzik 
6134c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6135c6fd2807SJeff Garzik 
6136c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6137c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6138c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
6139442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6140c6fd2807SJeff Garzik 		break;
6141c6fd2807SJeff Garzik 
61420dc36888STejun Heo 	case ATAPI_PROT_DMA:
6143c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6144c6fd2807SJeff Garzik 
6145c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6146c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6147c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6148c6fd2807SJeff Garzik 
6149c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6150c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6151442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6152c6fd2807SJeff Garzik 		break;
6153c6fd2807SJeff Garzik 
6154c6fd2807SJeff Garzik 	default:
6155c6fd2807SJeff Garzik 		WARN_ON(1);
6156c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
6157c6fd2807SJeff Garzik 	}
6158c6fd2807SJeff Garzik 
6159c6fd2807SJeff Garzik 	return 0;
6160c6fd2807SJeff Garzik }
6161c6fd2807SJeff Garzik 
6162c6fd2807SJeff Garzik /**
6163c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
6164c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
6165c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
6166c6fd2807SJeff Garzik  *
6167c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
6168c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
6169c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
6170c6fd2807SJeff Garzik  *
6171c6fd2807SJeff Garzik  *	LOCKING:
6172cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6173c6fd2807SJeff Garzik  *
6174c6fd2807SJeff Garzik  *	RETURNS:
6175c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
6176c6fd2807SJeff Garzik  */
6177c6fd2807SJeff Garzik 
6178c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
6179c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
6180c6fd2807SJeff Garzik {
61819af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
6182c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
6183c6fd2807SJeff Garzik 
6184c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
618544877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6186c6fd2807SJeff Garzik 
6187c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
6188c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
6189c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
6190c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
6191c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6192c6fd2807SJeff Garzik 		 */
6193c6fd2807SJeff Garzik 
6194c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6195405e66b3STejun Heo 		 * The flag was turned on only for atapi devices.  No
6196405e66b3STejun Heo 		 * need to check ata_is_atapi(qc->tf.protocol) again.
6197c6fd2807SJeff Garzik 		 */
6198c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6199c6fd2807SJeff Garzik 			goto idle_irq;
6200c6fd2807SJeff Garzik 		break;
6201c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6202c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
62030dc36888STejun Heo 		    qc->tf.protocol == ATAPI_PROT_DMA) {
6204c6fd2807SJeff Garzik 			/* check status of DMA engine */
6205c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
620644877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
620744877b4eSTejun Heo 				ap->print_id, host_stat);
6208c6fd2807SJeff Garzik 
6209c6fd2807SJeff Garzik 			/* if it's not our irq... */
6210c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6211c6fd2807SJeff Garzik 				goto idle_irq;
6212c6fd2807SJeff Garzik 
6213c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6214c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6215c6fd2807SJeff Garzik 
6216c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6217c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6218c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6219c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6220c6fd2807SJeff Garzik 			}
6221c6fd2807SJeff Garzik 		}
6222c6fd2807SJeff Garzik 		break;
6223c6fd2807SJeff Garzik 	case HSM_ST:
6224c6fd2807SJeff Garzik 		break;
6225c6fd2807SJeff Garzik 	default:
6226c6fd2807SJeff Garzik 		goto idle_irq;
6227c6fd2807SJeff Garzik 	}
6228c6fd2807SJeff Garzik 
6229c6fd2807SJeff Garzik 	/* check altstatus */
6230c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6231c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6232c6fd2807SJeff Garzik 		goto idle_irq;
6233c6fd2807SJeff Garzik 
6234c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6235c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6236c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6237c6fd2807SJeff Garzik 		goto idle_irq;
6238c6fd2807SJeff Garzik 
6239c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6240c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6241c6fd2807SJeff Garzik 
6242c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6243ea54763fSTejun Heo 
6244ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
62450dc36888STejun Heo 				       qc->tf.protocol == ATAPI_PROT_DMA))
6246ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6247ea54763fSTejun Heo 
6248c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6249c6fd2807SJeff Garzik 
6250c6fd2807SJeff Garzik idle_irq:
6251c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6252c6fd2807SJeff Garzik 
6253c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6254c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
62556d32d30fSJeff Garzik 		ata_chk_status(ap);
62566d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6257c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6258c6fd2807SJeff Garzik 		return 1;
6259c6fd2807SJeff Garzik 	}
6260c6fd2807SJeff Garzik #endif
6261c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6262c6fd2807SJeff Garzik }
6263c6fd2807SJeff Garzik 
6264c6fd2807SJeff Garzik /**
6265c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6266c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6267cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6268c6fd2807SJeff Garzik  *
6269c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6270c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6271c6fd2807SJeff Garzik  *
6272c6fd2807SJeff Garzik  *	LOCKING:
6273cca3974eSJeff Garzik  *	Obtains host lock during operation.
6274c6fd2807SJeff Garzik  *
6275c6fd2807SJeff Garzik  *	RETURNS:
6276c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6277c6fd2807SJeff Garzik  */
6278c6fd2807SJeff Garzik 
62797d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6280c6fd2807SJeff Garzik {
6281cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6282c6fd2807SJeff Garzik 	unsigned int i;
6283c6fd2807SJeff Garzik 	unsigned int handled = 0;
6284c6fd2807SJeff Garzik 	unsigned long flags;
6285c6fd2807SJeff Garzik 
6286c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6287cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6288c6fd2807SJeff Garzik 
6289cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6290c6fd2807SJeff Garzik 		struct ata_port *ap;
6291c6fd2807SJeff Garzik 
6292cca3974eSJeff Garzik 		ap = host->ports[i];
6293c6fd2807SJeff Garzik 		if (ap &&
6294c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6295c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6296c6fd2807SJeff Garzik 
62979af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6298c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6299c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6300c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6301c6fd2807SJeff Garzik 		}
6302c6fd2807SJeff Garzik 	}
6303c6fd2807SJeff Garzik 
6304cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6305c6fd2807SJeff Garzik 
6306c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6307c6fd2807SJeff Garzik }
6308c6fd2807SJeff Garzik 
6309c6fd2807SJeff Garzik /**
6310c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6311936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6312c6fd2807SJeff Garzik  *
6313936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6314c6fd2807SJeff Garzik  *
6315c6fd2807SJeff Garzik  *	LOCKING:
6316c6fd2807SJeff Garzik  *	None.
6317c6fd2807SJeff Garzik  *
6318c6fd2807SJeff Garzik  *	RETURNS:
6319c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6320c6fd2807SJeff Garzik  */
6321936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6322c6fd2807SJeff Garzik {
6323936fd732STejun Heo 	struct ata_port *ap = link->ap;
6324936fd732STejun Heo 
6325a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6326c6fd2807SJeff Garzik }
6327c6fd2807SJeff Garzik 
6328c6fd2807SJeff Garzik /**
6329c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6330936fd732STejun Heo  *	@link: ATA link to read SCR for
6331c6fd2807SJeff Garzik  *	@reg: SCR to read
6332c6fd2807SJeff Garzik  *	@val: Place to store read value
6333c6fd2807SJeff Garzik  *
6334936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6335633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6336633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6337c6fd2807SJeff Garzik  *
6338c6fd2807SJeff Garzik  *	LOCKING:
6339633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6340c6fd2807SJeff Garzik  *
6341c6fd2807SJeff Garzik  *	RETURNS:
6342c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6343c6fd2807SJeff Garzik  */
6344936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6345c6fd2807SJeff Garzik {
6346633273a3STejun Heo 	if (ata_is_host_link(link)) {
6347936fd732STejun Heo 		struct ata_port *ap = link->ap;
6348936fd732STejun Heo 
6349936fd732STejun Heo 		if (sata_scr_valid(link))
6350da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6351c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6352c6fd2807SJeff Garzik 	}
6353c6fd2807SJeff Garzik 
6354633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6355633273a3STejun Heo }
6356633273a3STejun Heo 
6357c6fd2807SJeff Garzik /**
6358c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6359936fd732STejun Heo  *	@link: ATA link to write SCR for
6360c6fd2807SJeff Garzik  *	@reg: SCR to write
6361c6fd2807SJeff Garzik  *	@val: value to write
6362c6fd2807SJeff Garzik  *
6363936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6364633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6365633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6366c6fd2807SJeff Garzik  *
6367c6fd2807SJeff Garzik  *	LOCKING:
6368633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6369c6fd2807SJeff Garzik  *
6370c6fd2807SJeff Garzik  *	RETURNS:
6371c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6372c6fd2807SJeff Garzik  */
6373936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6374c6fd2807SJeff Garzik {
6375633273a3STejun Heo 	if (ata_is_host_link(link)) {
6376936fd732STejun Heo 		struct ata_port *ap = link->ap;
6377936fd732STejun Heo 
6378936fd732STejun Heo 		if (sata_scr_valid(link))
6379da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6380c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6381c6fd2807SJeff Garzik 	}
6382c6fd2807SJeff Garzik 
6383633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6384633273a3STejun Heo }
6385633273a3STejun Heo 
6386c6fd2807SJeff Garzik /**
6387c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6388936fd732STejun Heo  *	@link: ATA link to write SCR for
6389c6fd2807SJeff Garzik  *	@reg: SCR to write
6390c6fd2807SJeff Garzik  *	@val: value to write
6391c6fd2807SJeff Garzik  *
6392c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6393c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6394c6fd2807SJeff Garzik  *
6395c6fd2807SJeff Garzik  *	LOCKING:
6396633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6397c6fd2807SJeff Garzik  *
6398c6fd2807SJeff Garzik  *	RETURNS:
6399c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6400c6fd2807SJeff Garzik  */
6401936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6402c6fd2807SJeff Garzik {
6403633273a3STejun Heo 	if (ata_is_host_link(link)) {
6404936fd732STejun Heo 		struct ata_port *ap = link->ap;
6405da3dbb17STejun Heo 		int rc;
6406da3dbb17STejun Heo 
6407936fd732STejun Heo 		if (sata_scr_valid(link)) {
6408da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6409da3dbb17STejun Heo 			if (rc == 0)
6410da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6411da3dbb17STejun Heo 			return rc;
6412c6fd2807SJeff Garzik 		}
6413c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6414c6fd2807SJeff Garzik 	}
6415c6fd2807SJeff Garzik 
6416633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6417633273a3STejun Heo }
6418633273a3STejun Heo 
6419c6fd2807SJeff Garzik /**
6420936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6421936fd732STejun Heo  *	@link: ATA link to test
6422c6fd2807SJeff Garzik  *
6423936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6424936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6425936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6426c6fd2807SJeff Garzik  *
6427c6fd2807SJeff Garzik  *	LOCKING:
6428c6fd2807SJeff Garzik  *	None.
6429c6fd2807SJeff Garzik  *
6430c6fd2807SJeff Garzik  *	RETURNS:
6431c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6432c6fd2807SJeff Garzik  */
6433936fd732STejun Heo int ata_link_online(struct ata_link *link)
6434c6fd2807SJeff Garzik {
6435c6fd2807SJeff Garzik 	u32 sstatus;
6436c6fd2807SJeff Garzik 
6437936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6438936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6439c6fd2807SJeff Garzik 		return 1;
6440c6fd2807SJeff Garzik 	return 0;
6441c6fd2807SJeff Garzik }
6442c6fd2807SJeff Garzik 
6443c6fd2807SJeff Garzik /**
6444936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6445936fd732STejun Heo  *	@link: ATA link to test
6446c6fd2807SJeff Garzik  *
6447936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6448936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6449936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6450c6fd2807SJeff Garzik  *
6451c6fd2807SJeff Garzik  *	LOCKING:
6452c6fd2807SJeff Garzik  *	None.
6453c6fd2807SJeff Garzik  *
6454c6fd2807SJeff Garzik  *	RETURNS:
6455c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6456c6fd2807SJeff Garzik  */
6457936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6458c6fd2807SJeff Garzik {
6459c6fd2807SJeff Garzik 	u32 sstatus;
6460c6fd2807SJeff Garzik 
6461936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6462936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6463c6fd2807SJeff Garzik 		return 1;
6464c6fd2807SJeff Garzik 	return 0;
6465c6fd2807SJeff Garzik }
6466c6fd2807SJeff Garzik 
6467c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6468c6fd2807SJeff Garzik {
6469c6fd2807SJeff Garzik 	unsigned int err_mask;
6470c6fd2807SJeff Garzik 	u8 cmd;
6471c6fd2807SJeff Garzik 
6472c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6473c6fd2807SJeff Garzik 		return 0;
6474c6fd2807SJeff Garzik 
64756fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6476c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6477c6fd2807SJeff Garzik 	else
6478c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6479c6fd2807SJeff Garzik 
64804f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
64814f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
64824f34337bSAlan Cox 	   a further flush command to continue the writeback until it
64834f34337bSAlan Cox 	   does not error */
6484c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6485c6fd2807SJeff Garzik 	if (err_mask) {
6486c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6487c6fd2807SJeff Garzik 		return -EIO;
6488c6fd2807SJeff Garzik 	}
6489c6fd2807SJeff Garzik 
6490c6fd2807SJeff Garzik 	return 0;
6491c6fd2807SJeff Garzik }
6492c6fd2807SJeff Garzik 
64936ffa01d8STejun Heo #ifdef CONFIG_PM
6494cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6495cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6496cca3974eSJeff Garzik 			       int wait)
6497c6fd2807SJeff Garzik {
6498c6fd2807SJeff Garzik 	unsigned long flags;
6499c6fd2807SJeff Garzik 	int i, rc;
6500c6fd2807SJeff Garzik 
6501cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6502cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6503e3667ebfSTejun Heo 		struct ata_link *link;
6504c6fd2807SJeff Garzik 
6505c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6506c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6507c6fd2807SJeff Garzik 		 */
6508c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6509c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6510c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6511c6fd2807SJeff Garzik 		}
6512c6fd2807SJeff Garzik 
6513c6fd2807SJeff Garzik 		/* request PM ops to EH */
6514c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6515c6fd2807SJeff Garzik 
6516c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6517c6fd2807SJeff Garzik 		if (wait) {
6518c6fd2807SJeff Garzik 			rc = 0;
6519c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6520c6fd2807SJeff Garzik 		}
6521c6fd2807SJeff Garzik 
6522c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6523e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6524e3667ebfSTejun Heo 			link->eh_info.action |= action;
6525e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6526e3667ebfSTejun Heo 		}
6527c6fd2807SJeff Garzik 
6528c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6529c6fd2807SJeff Garzik 
6530c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6531c6fd2807SJeff Garzik 
6532c6fd2807SJeff Garzik 		/* wait and check result */
6533c6fd2807SJeff Garzik 		if (wait) {
6534c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6535c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6536c6fd2807SJeff Garzik 			if (rc)
6537c6fd2807SJeff Garzik 				return rc;
6538c6fd2807SJeff Garzik 		}
6539c6fd2807SJeff Garzik 	}
6540c6fd2807SJeff Garzik 
6541c6fd2807SJeff Garzik 	return 0;
6542c6fd2807SJeff Garzik }
6543c6fd2807SJeff Garzik 
6544c6fd2807SJeff Garzik /**
6545cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6546cca3974eSJeff Garzik  *	@host: host to suspend
6547c6fd2807SJeff Garzik  *	@mesg: PM message
6548c6fd2807SJeff Garzik  *
6549cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6550c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6551c6fd2807SJeff Garzik  *	to finish.
6552c6fd2807SJeff Garzik  *
6553c6fd2807SJeff Garzik  *	LOCKING:
6554c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6555c6fd2807SJeff Garzik  *
6556c6fd2807SJeff Garzik  *	RETURNS:
6557c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6558c6fd2807SJeff Garzik  */
6559cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6560c6fd2807SJeff Garzik {
65619666f400STejun Heo 	int rc;
6562c6fd2807SJeff Garzik 
6563ca77329fSKristen Carlson Accardi 	/*
6564ca77329fSKristen Carlson Accardi 	 * disable link pm on all ports before requesting
6565ca77329fSKristen Carlson Accardi 	 * any pm activity
6566ca77329fSKristen Carlson Accardi 	 */
6567ca77329fSKristen Carlson Accardi 	ata_lpm_enable(host);
6568ca77329fSKristen Carlson Accardi 
6569cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
657072ad6ec4SJeff Garzik 	if (rc == 0)
657172ad6ec4SJeff Garzik 		host->dev->power.power_state = mesg;
6572c6fd2807SJeff Garzik 	return rc;
6573c6fd2807SJeff Garzik }
6574c6fd2807SJeff Garzik 
6575c6fd2807SJeff Garzik /**
6576cca3974eSJeff Garzik  *	ata_host_resume - resume host
6577cca3974eSJeff Garzik  *	@host: host to resume
6578c6fd2807SJeff Garzik  *
6579cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6580c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6581c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6582c6fd2807SJeff Garzik  *
6583c6fd2807SJeff Garzik  *	LOCKING:
6584c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6585c6fd2807SJeff Garzik  */
6586cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6587c6fd2807SJeff Garzik {
6588cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6589c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
659072ad6ec4SJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6591ca77329fSKristen Carlson Accardi 
6592ca77329fSKristen Carlson Accardi 	/* reenable link pm */
6593ca77329fSKristen Carlson Accardi 	ata_lpm_disable(host);
6594c6fd2807SJeff Garzik }
65956ffa01d8STejun Heo #endif
6596c6fd2807SJeff Garzik 
6597c6fd2807SJeff Garzik /**
6598c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6599c6fd2807SJeff Garzik  *	@ap: Port to initialize
6600c6fd2807SJeff Garzik  *
6601c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6602c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6603c6fd2807SJeff Garzik  *
6604c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6605c6fd2807SJeff Garzik  *
6606c6fd2807SJeff Garzik  *	LOCKING:
6607c6fd2807SJeff Garzik  *	Inherited from caller.
6608c6fd2807SJeff Garzik  */
6609c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6610c6fd2807SJeff Garzik {
6611c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6612c6fd2807SJeff Garzik 
6613f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6614f0d36efdSTejun Heo 				      GFP_KERNEL);
6615c6fd2807SJeff Garzik 	if (!ap->prd)
6616c6fd2807SJeff Garzik 		return -ENOMEM;
6617c6fd2807SJeff Garzik 
6618c6fd2807SJeff Garzik 	return 0;
6619c6fd2807SJeff Garzik }
6620c6fd2807SJeff Garzik 
6621c6fd2807SJeff Garzik /**
6622c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6623c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6624c6fd2807SJeff Garzik  *
6625c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6626c6fd2807SJeff Garzik  *
6627c6fd2807SJeff Garzik  *	LOCKING:
6628c6fd2807SJeff Garzik  *	Inherited from caller.
6629c6fd2807SJeff Garzik  */
6630c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6631c6fd2807SJeff Garzik {
66329af5c9c9STejun Heo 	struct ata_link *link = dev->link;
66339af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6634c6fd2807SJeff Garzik 	unsigned long flags;
6635c6fd2807SJeff Garzik 
6636c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
66379af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66389af5c9c9STejun Heo 	link->sata_spd = 0;
6639c6fd2807SJeff Garzik 
6640c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6641c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6642cca3974eSJeff Garzik 	 * host lock.
6643c6fd2807SJeff Garzik 	 */
6644c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6645c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
66463dcc323fSTejun Heo 	dev->horkage = 0;
6647c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6648c6fd2807SJeff Garzik 
6649c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6650c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6651c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6652c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6653c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6654c6fd2807SJeff Garzik }
6655c6fd2807SJeff Garzik 
6656c6fd2807SJeff Garzik /**
66574fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
66584fb37a25STejun Heo  *	@ap: ATA port link is attached to
66594fb37a25STejun Heo  *	@link: Link structure to initialize
66608989805dSTejun Heo  *	@pmp: Port multiplier port number
66614fb37a25STejun Heo  *
66624fb37a25STejun Heo  *	Initialize @link.
66634fb37a25STejun Heo  *
66644fb37a25STejun Heo  *	LOCKING:
66654fb37a25STejun Heo  *	Kernel thread context (may sleep)
66664fb37a25STejun Heo  */
6667fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
66684fb37a25STejun Heo {
66694fb37a25STejun Heo 	int i;
66704fb37a25STejun Heo 
66714fb37a25STejun Heo 	/* clear everything except for devices */
66724fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
66734fb37a25STejun Heo 
66744fb37a25STejun Heo 	link->ap = ap;
66758989805dSTejun Heo 	link->pmp = pmp;
66764fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
66774fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
66784fb37a25STejun Heo 
66794fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
66804fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
66814fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
66824fb37a25STejun Heo 
66834fb37a25STejun Heo 		dev->link = link;
66844fb37a25STejun Heo 		dev->devno = dev - link->device;
66854fb37a25STejun Heo 		ata_dev_init(dev);
66864fb37a25STejun Heo 	}
66874fb37a25STejun Heo }
66884fb37a25STejun Heo 
66894fb37a25STejun Heo /**
66904fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
66914fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
66924fb37a25STejun Heo  *
66934fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
66944fb37a25STejun Heo  *	configured value.
66954fb37a25STejun Heo  *
66964fb37a25STejun Heo  *	LOCKING:
66974fb37a25STejun Heo  *	Kernel thread context (may sleep).
66984fb37a25STejun Heo  *
66994fb37a25STejun Heo  *	RETURNS:
67004fb37a25STejun Heo  *	0 on success, -errno on failure.
67014fb37a25STejun Heo  */
6702fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
67034fb37a25STejun Heo {
670433267325STejun Heo 	u32 scontrol;
670533267325STejun Heo 	u8 spd;
67064fb37a25STejun Heo 	int rc;
67074fb37a25STejun Heo 
67084fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
67094fb37a25STejun Heo 	if (rc)
67104fb37a25STejun Heo 		return rc;
67114fb37a25STejun Heo 
67124fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
67134fb37a25STejun Heo 	if (spd)
67144fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
67154fb37a25STejun Heo 
671633267325STejun Heo 	ata_force_spd_limit(link);
671733267325STejun Heo 
67184fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
67194fb37a25STejun Heo 
67204fb37a25STejun Heo 	return 0;
67214fb37a25STejun Heo }
67224fb37a25STejun Heo 
67234fb37a25STejun Heo /**
6724f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6725f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6726c6fd2807SJeff Garzik  *
6727f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6728f3187195STejun Heo  *
6729f3187195STejun Heo  *	RETURNS:
6730f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6731c6fd2807SJeff Garzik  *
6732c6fd2807SJeff Garzik  *	LOCKING:
6733f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6734c6fd2807SJeff Garzik  */
6735f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6736c6fd2807SJeff Garzik {
6737f3187195STejun Heo 	struct ata_port *ap;
6738c6fd2807SJeff Garzik 
6739f3187195STejun Heo 	DPRINTK("ENTER\n");
6740f3187195STejun Heo 
6741f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6742f3187195STejun Heo 	if (!ap)
6743f3187195STejun Heo 		return NULL;
6744f3187195STejun Heo 
6745f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6746cca3974eSJeff Garzik 	ap->lock = &host->lock;
6747c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6748f3187195STejun Heo 	ap->print_id = -1;
6749c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6750cca3974eSJeff Garzik 	ap->host = host;
6751f3187195STejun Heo 	ap->dev = host->dev;
6752c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6753c6fd2807SJeff Garzik 
6754c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6755c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6756c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6757c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6758c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6759c6fd2807SJeff Garzik #else
6760c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6761c6fd2807SJeff Garzik #endif
6762c6fd2807SJeff Garzik 
6763442eacc3SJeff Garzik 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
676465f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
676565f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6766c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6767c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
67685ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
67695ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
67705ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6771c6fd2807SJeff Garzik 
6772c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6773c6fd2807SJeff Garzik 
67748989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6775c6fd2807SJeff Garzik 
6776c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6777c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6778c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6779c6fd2807SJeff Garzik #endif
6780c6fd2807SJeff Garzik 	return ap;
6781c6fd2807SJeff Garzik }
6782c6fd2807SJeff Garzik 
6783f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6784f0d36efdSTejun Heo {
6785f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6786f0d36efdSTejun Heo 	int i;
6787f0d36efdSTejun Heo 
6788f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6789f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6790f0d36efdSTejun Heo 
6791ecef7253STejun Heo 		if (!ap)
6792ecef7253STejun Heo 			continue;
6793ecef7253STejun Heo 
67944911487aSTejun Heo 		if (ap->scsi_host)
67951aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
67961aa506e4STejun Heo 
6797633273a3STejun Heo 		kfree(ap->pmp_link);
67984911487aSTejun Heo 		kfree(ap);
67991aa506e4STejun Heo 		host->ports[i] = NULL;
68001aa506e4STejun Heo 	}
68011aa506e4STejun Heo 
68021aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6803f0d36efdSTejun Heo }
6804f0d36efdSTejun Heo 
6805c6fd2807SJeff Garzik /**
6806f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6807f3187195STejun Heo  *	@dev: generic device this host is associated with
6808f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6809f3187195STejun Heo  *
6810f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6811f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6812f3187195STejun Heo  *	attaches it using ata_host_register().
6813f3187195STejun Heo  *
6814f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6815f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6816f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6817f3187195STejun Heo  *	ports will be automatically freed on registration.
6818f3187195STejun Heo  *
6819f3187195STejun Heo  *	RETURNS:
6820f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6821f3187195STejun Heo  *
6822f3187195STejun Heo  *	LOCKING:
6823f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6824f3187195STejun Heo  */
6825f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6826f3187195STejun Heo {
6827f3187195STejun Heo 	struct ata_host *host;
6828f3187195STejun Heo 	size_t sz;
6829f3187195STejun Heo 	int i;
6830f3187195STejun Heo 
6831f3187195STejun Heo 	DPRINTK("ENTER\n");
6832f3187195STejun Heo 
6833f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6834f3187195STejun Heo 		return NULL;
6835f3187195STejun Heo 
6836f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6837f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6838f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6839f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6840f3187195STejun Heo 	if (!host)
6841f3187195STejun Heo 		goto err_out;
6842f3187195STejun Heo 
6843f3187195STejun Heo 	devres_add(dev, host);
6844f3187195STejun Heo 	dev_set_drvdata(dev, host);
6845f3187195STejun Heo 
6846f3187195STejun Heo 	spin_lock_init(&host->lock);
6847f3187195STejun Heo 	host->dev = dev;
6848f3187195STejun Heo 	host->n_ports = max_ports;
6849f3187195STejun Heo 
6850f3187195STejun Heo 	/* allocate ports bound to this host */
6851f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6852f3187195STejun Heo 		struct ata_port *ap;
6853f3187195STejun Heo 
6854f3187195STejun Heo 		ap = ata_port_alloc(host);
6855f3187195STejun Heo 		if (!ap)
6856f3187195STejun Heo 			goto err_out;
6857f3187195STejun Heo 
6858f3187195STejun Heo 		ap->port_no = i;
6859f3187195STejun Heo 		host->ports[i] = ap;
6860f3187195STejun Heo 	}
6861f3187195STejun Heo 
6862f3187195STejun Heo 	devres_remove_group(dev, NULL);
6863f3187195STejun Heo 	return host;
6864f3187195STejun Heo 
6865f3187195STejun Heo  err_out:
6866f3187195STejun Heo 	devres_release_group(dev, NULL);
6867f3187195STejun Heo 	return NULL;
6868f3187195STejun Heo }
6869f3187195STejun Heo 
6870f3187195STejun Heo /**
6871f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6872f5cda257STejun Heo  *	@dev: generic device this host is associated with
6873f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6874f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6875f5cda257STejun Heo  *
6876f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6877f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6878f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6879f5cda257STejun Heo  *
6880f5cda257STejun Heo  *	RETURNS:
6881f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6882f5cda257STejun Heo  *
6883f5cda257STejun Heo  *	LOCKING:
6884f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6885f5cda257STejun Heo  */
6886f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6887f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6888f5cda257STejun Heo 				      int n_ports)
6889f5cda257STejun Heo {
6890f5cda257STejun Heo 	const struct ata_port_info *pi;
6891f5cda257STejun Heo 	struct ata_host *host;
6892f5cda257STejun Heo 	int i, j;
6893f5cda257STejun Heo 
6894f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6895f5cda257STejun Heo 	if (!host)
6896f5cda257STejun Heo 		return NULL;
6897f5cda257STejun Heo 
6898f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6899f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6900f5cda257STejun Heo 
6901f5cda257STejun Heo 		if (ppi[j])
6902f5cda257STejun Heo 			pi = ppi[j++];
6903f5cda257STejun Heo 
6904f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6905f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6906f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6907f5cda257STejun Heo 		ap->flags |= pi->flags;
69080c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6909f5cda257STejun Heo 		ap->ops = pi->port_ops;
6910f5cda257STejun Heo 
6911f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6912f5cda257STejun Heo 			host->ops = pi->port_ops;
6913f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6914f5cda257STejun Heo 			host->private_data = pi->private_data;
6915f5cda257STejun Heo 	}
6916f5cda257STejun Heo 
6917f5cda257STejun Heo 	return host;
6918f5cda257STejun Heo }
6919f5cda257STejun Heo 
692032ebbc0cSTejun Heo static void ata_host_stop(struct device *gendev, void *res)
692132ebbc0cSTejun Heo {
692232ebbc0cSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
692332ebbc0cSTejun Heo 	int i;
692432ebbc0cSTejun Heo 
692532ebbc0cSTejun Heo 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
692632ebbc0cSTejun Heo 
692732ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
692832ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
692932ebbc0cSTejun Heo 
693032ebbc0cSTejun Heo 		if (ap->ops->port_stop)
693132ebbc0cSTejun Heo 			ap->ops->port_stop(ap);
693232ebbc0cSTejun Heo 	}
693332ebbc0cSTejun Heo 
693432ebbc0cSTejun Heo 	if (host->ops->host_stop)
693532ebbc0cSTejun Heo 		host->ops->host_stop(host);
693632ebbc0cSTejun Heo }
693732ebbc0cSTejun Heo 
6938f5cda257STejun Heo /**
6939ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6940ecef7253STejun Heo  *	@host: ATA host to start ports for
6941ecef7253STejun Heo  *
6942ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6943ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6944ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6945f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6946f3187195STejun Heo  *	first non-dummy port ops.
6947ecef7253STejun Heo  *
6948ecef7253STejun Heo  *	LOCKING:
6949ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6950ecef7253STejun Heo  *
6951ecef7253STejun Heo  *	RETURNS:
6952ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6953ecef7253STejun Heo  */
6954ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6955ecef7253STejun Heo {
695632ebbc0cSTejun Heo 	int have_stop = 0;
695732ebbc0cSTejun Heo 	void *start_dr = NULL;
6958ecef7253STejun Heo 	int i, rc;
6959ecef7253STejun Heo 
6960ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6961ecef7253STejun Heo 		return 0;
6962ecef7253STejun Heo 
6963ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6964ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6965ecef7253STejun Heo 
6966f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6967f3187195STejun Heo 			host->ops = ap->ops;
6968f3187195STejun Heo 
696932ebbc0cSTejun Heo 		if (ap->ops->port_stop)
697032ebbc0cSTejun Heo 			have_stop = 1;
697132ebbc0cSTejun Heo 	}
697232ebbc0cSTejun Heo 
697332ebbc0cSTejun Heo 	if (host->ops->host_stop)
697432ebbc0cSTejun Heo 		have_stop = 1;
697532ebbc0cSTejun Heo 
697632ebbc0cSTejun Heo 	if (have_stop) {
697732ebbc0cSTejun Heo 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
697832ebbc0cSTejun Heo 		if (!start_dr)
697932ebbc0cSTejun Heo 			return -ENOMEM;
698032ebbc0cSTejun Heo 	}
698132ebbc0cSTejun Heo 
698232ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
698332ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
698432ebbc0cSTejun Heo 
6985ecef7253STejun Heo 		if (ap->ops->port_start) {
6986ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6987ecef7253STejun Heo 			if (rc) {
69880f9fe9b7SAlan Cox 				if (rc != -ENODEV)
69890f757743SAndrew Morton 					dev_printk(KERN_ERR, host->dev,
69900f757743SAndrew Morton 						"failed to start port %d "
69910f757743SAndrew Morton 						"(errno=%d)\n", i, rc);
6992ecef7253STejun Heo 				goto err_out;
6993ecef7253STejun Heo 			}
6994ecef7253STejun Heo 		}
6995ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6996ecef7253STejun Heo 	}
6997ecef7253STejun Heo 
699832ebbc0cSTejun Heo 	if (start_dr)
699932ebbc0cSTejun Heo 		devres_add(host->dev, start_dr);
7000ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
7001ecef7253STejun Heo 	return 0;
7002ecef7253STejun Heo 
7003ecef7253STejun Heo  err_out:
7004ecef7253STejun Heo 	while (--i >= 0) {
7005ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
7006ecef7253STejun Heo 
7007ecef7253STejun Heo 		if (ap->ops->port_stop)
7008ecef7253STejun Heo 			ap->ops->port_stop(ap);
7009ecef7253STejun Heo 	}
701032ebbc0cSTejun Heo 	devres_free(start_dr);
7011ecef7253STejun Heo 	return rc;
7012ecef7253STejun Heo }
7013ecef7253STejun Heo 
7014ecef7253STejun Heo /**
7015cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
7016cca3974eSJeff Garzik  *	@host:	host to initialize
7017cca3974eSJeff Garzik  *	@dev:	device host is attached to
7018cca3974eSJeff Garzik  *	@flags:	host flags
7019c6fd2807SJeff Garzik  *	@ops:	port_ops
7020c6fd2807SJeff Garzik  *
7021c6fd2807SJeff Garzik  *	LOCKING:
7022c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
7023c6fd2807SJeff Garzik  *
7024c6fd2807SJeff Garzik  */
7025f3187195STejun Heo /* KILLME - the only user left is ipr */
7026cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
7027cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
7028c6fd2807SJeff Garzik {
7029cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
7030cca3974eSJeff Garzik 	host->dev = dev;
7031cca3974eSJeff Garzik 	host->flags = flags;
7032cca3974eSJeff Garzik 	host->ops = ops;
7033c6fd2807SJeff Garzik }
7034c6fd2807SJeff Garzik 
7035c6fd2807SJeff Garzik /**
7036f3187195STejun Heo  *	ata_host_register - register initialized ATA host
7037f3187195STejun Heo  *	@host: ATA host to register
7038f3187195STejun Heo  *	@sht: template for SCSI host
7039c6fd2807SJeff Garzik  *
7040f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
7041f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
7042f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
7043f3187195STejun Heo  *	probe registered devices.
7044c6fd2807SJeff Garzik  *
7045c6fd2807SJeff Garzik  *	LOCKING:
7046f3187195STejun Heo  *	Inherited from calling layer (may sleep).
7047c6fd2807SJeff Garzik  *
7048c6fd2807SJeff Garzik  *	RETURNS:
7049f3187195STejun Heo  *	0 on success, -errno otherwise.
7050c6fd2807SJeff Garzik  */
7051f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7052c6fd2807SJeff Garzik {
7053f3187195STejun Heo 	int i, rc;
7054c6fd2807SJeff Garzik 
7055f3187195STejun Heo 	/* host must have been started */
7056f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
7057f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
7058f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
7059f3187195STejun Heo 		WARN_ON(1);
7060f3187195STejun Heo 		return -EINVAL;
706102f076aaSAlan Cox 	}
7062f0d36efdSTejun Heo 
7063f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
7064f3187195STejun Heo 	 * determine the exact number of ports to allocate at
7065f3187195STejun Heo 	 * allocation time.
7066f3187195STejun Heo 	 */
7067f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
7068f3187195STejun Heo 		kfree(host->ports[i]);
7069f0d36efdSTejun Heo 
7070f3187195STejun Heo 	/* give ports names and add SCSI hosts */
7071f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
7072f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
7073c6fd2807SJeff Garzik 
7074f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
7075ecef7253STejun Heo 	if (rc)
7076f3187195STejun Heo 		return rc;
7077ecef7253STejun Heo 
7078fafbae87STejun Heo 	/* associate with ACPI nodes */
7079fafbae87STejun Heo 	ata_acpi_associate(host);
7080fafbae87STejun Heo 
7081f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
7082cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7083cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7084f3187195STejun Heo 		unsigned long xfer_mask;
7085f3187195STejun Heo 
7086f3187195STejun Heo 		/* set SATA cable type if still unset */
7087f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7088f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
7089c6fd2807SJeff Garzik 
7090c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
70914fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
7092c6fd2807SJeff Garzik 
7093cbcdd875STejun Heo 		/* print per-port info to dmesg */
7094f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7095f3187195STejun Heo 					      ap->udma_mask);
7096f3187195STejun Heo 
7097abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
7098cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
7099cbcdd875STejun Heo 					"%cATA max %s %s\n",
7100a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7101f3187195STejun Heo 					ata_mode_string(xfer_mask),
7102cbcdd875STejun Heo 					ap->link.eh_info.desc);
7103abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
7104abf6e8edSTejun Heo 		} else
7105f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7106c6fd2807SJeff Garzik 	}
7107c6fd2807SJeff Garzik 
7108f3187195STejun Heo 	/* perform each probe synchronously */
7109f3187195STejun Heo 	DPRINTK("probe begin\n");
7110f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
7111f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
7112f3187195STejun Heo 
7113f3187195STejun Heo 		/* probe */
7114c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
71159af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
7116c6fd2807SJeff Garzik 			unsigned long flags;
7117c6fd2807SJeff Garzik 
7118c6fd2807SJeff Garzik 			ata_port_probe(ap);
7119c6fd2807SJeff Garzik 
7120c6fd2807SJeff Garzik 			/* kick EH for boot probing */
7121c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
7122c6fd2807SJeff Garzik 
7123f58229f8STejun Heo 			ehi->probe_mask =
7124f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
7125c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
7126c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7127c6fd2807SJeff Garzik 
7128f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7129c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
7130c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
7131c6fd2807SJeff Garzik 
7132c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
7133c6fd2807SJeff Garzik 
7134c6fd2807SJeff Garzik 			/* wait for EH to finish */
7135c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
7136c6fd2807SJeff Garzik 		} else {
713744877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7138c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
713944877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
7140c6fd2807SJeff Garzik 
7141c6fd2807SJeff Garzik 			if (rc) {
7142c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
7143c6fd2807SJeff Garzik 				 * Current libata behavior will
7144c6fd2807SJeff Garzik 				 * tear down everything when
7145c6fd2807SJeff Garzik 				 * the module is removed
7146c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
7147c6fd2807SJeff Garzik 				 */
7148c6fd2807SJeff Garzik 			}
7149c6fd2807SJeff Garzik 		}
7150c6fd2807SJeff Garzik 	}
7151c6fd2807SJeff Garzik 
7152c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
7153c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
7154cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7155cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7156c6fd2807SJeff Garzik 
71571ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
7158ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
7159c6fd2807SJeff Garzik 	}
7160c6fd2807SJeff Garzik 
7161f3187195STejun Heo 	return 0;
7162f3187195STejun Heo }
7163f3187195STejun Heo 
7164f3187195STejun Heo /**
7165f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
7166f5cda257STejun Heo  *	@host: target ATA host
7167f5cda257STejun Heo  *	@irq: IRQ to request
7168f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
7169f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
7170f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
7171f5cda257STejun Heo  *
7172f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
7173f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
7174f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
7175f5cda257STejun Heo  *	arguments and performs the three steps in one go.
7176f5cda257STejun Heo  *
71773d46b2e2SPaul Mundt  *	An invalid IRQ skips the IRQ registration and expects the host to
71783d46b2e2SPaul Mundt  *	have set polling mode on the port. In this case, @irq_handler
71793d46b2e2SPaul Mundt  *	should be NULL.
71803d46b2e2SPaul Mundt  *
7181f5cda257STejun Heo  *	LOCKING:
7182f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
7183f5cda257STejun Heo  *
7184f5cda257STejun Heo  *	RETURNS:
7185f5cda257STejun Heo  *	0 on success, -errno otherwise.
7186f5cda257STejun Heo  */
7187f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
7188f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
7189f5cda257STejun Heo 		      struct scsi_host_template *sht)
7190f5cda257STejun Heo {
7191cbcdd875STejun Heo 	int i, rc;
7192f5cda257STejun Heo 
7193f5cda257STejun Heo 	rc = ata_host_start(host);
7194f5cda257STejun Heo 	if (rc)
7195f5cda257STejun Heo 		return rc;
7196f5cda257STejun Heo 
71973d46b2e2SPaul Mundt 	/* Special case for polling mode */
71983d46b2e2SPaul Mundt 	if (!irq) {
71993d46b2e2SPaul Mundt 		WARN_ON(irq_handler);
72003d46b2e2SPaul Mundt 		return ata_host_register(host, sht);
72013d46b2e2SPaul Mundt 	}
72023d46b2e2SPaul Mundt 
7203f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7204f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
7205f5cda257STejun Heo 	if (rc)
7206f5cda257STejun Heo 		return rc;
7207f5cda257STejun Heo 
7208cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
7209cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
72104031826bSTejun Heo 
7211f5cda257STejun Heo 	rc = ata_host_register(host, sht);
7212f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
7213f5cda257STejun Heo 	if (rc)
7214f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
7215f5cda257STejun Heo 
7216f5cda257STejun Heo 	return rc;
7217f5cda257STejun Heo }
7218f5cda257STejun Heo 
7219f5cda257STejun Heo /**
7220c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
7221c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
7222c6fd2807SJeff Garzik  *
7223c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
7224c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
7225c6fd2807SJeff Garzik  *	be quiescent on return from this function.
7226c6fd2807SJeff Garzik  *
7227c6fd2807SJeff Garzik  *	LOCKING:
7228c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
7229c6fd2807SJeff Garzik  */
7230741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
7231c6fd2807SJeff Garzik {
7232c6fd2807SJeff Garzik 	unsigned long flags;
723341bda9c9STejun Heo 	struct ata_link *link;
7234f58229f8STejun Heo 	struct ata_device *dev;
7235c6fd2807SJeff Garzik 
7236c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7237c6fd2807SJeff Garzik 		goto skip_eh;
7238c6fd2807SJeff Garzik 
7239c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7240c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7241c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7242c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7243c6fd2807SJeff Garzik 
7244c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7245c6fd2807SJeff Garzik 
72467f9ad9b8STejun Heo 	/* EH is now guaranteed to see UNLOADING - EH context belongs
72477f9ad9b8STejun Heo 	 * to us.  Disable all existing devices.
7248c6fd2807SJeff Garzik 	 */
724941bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
725041bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7251f58229f8STejun Heo 			ata_dev_disable(dev);
725241bda9c9STejun Heo 	}
7253c6fd2807SJeff Garzik 
7254c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7255c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7256c6fd2807SJeff Garzik 	 * target.
7257c6fd2807SJeff Garzik 	 */
7258c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7259c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7260c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7261c6fd2807SJeff Garzik 
7262c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
726345a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7264c6fd2807SJeff Garzik 
7265c6fd2807SJeff Garzik  skip_eh:
7266c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7267cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7268c6fd2807SJeff Garzik }
7269c6fd2807SJeff Garzik 
7270c6fd2807SJeff Garzik /**
72710529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
72720529c159STejun Heo  *	@host: Host to detach
72730529c159STejun Heo  *
72740529c159STejun Heo  *	Detach all ports of @host.
72750529c159STejun Heo  *
72760529c159STejun Heo  *	LOCKING:
72770529c159STejun Heo  *	Kernel thread context (may sleep).
72780529c159STejun Heo  */
72790529c159STejun Heo void ata_host_detach(struct ata_host *host)
72800529c159STejun Heo {
72810529c159STejun Heo 	int i;
72820529c159STejun Heo 
72830529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
72840529c159STejun Heo 		ata_port_detach(host->ports[i]);
7285562f0c2dSTejun Heo 
7286562f0c2dSTejun Heo 	/* the host is dead now, dissociate ACPI */
7287562f0c2dSTejun Heo 	ata_acpi_dissociate(host);
72880529c159STejun Heo }
72890529c159STejun Heo 
7290c6fd2807SJeff Garzik /**
7291c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7292c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7293c6fd2807SJeff Garzik  *
7294c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7295c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7296c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7297c6fd2807SJeff Garzik  *	relative to cmd_addr.
7298c6fd2807SJeff Garzik  *
7299c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7300c6fd2807SJeff Garzik  */
7301c6fd2807SJeff Garzik 
7302c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7303c6fd2807SJeff Garzik {
7304c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7305c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7306c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7307c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7308c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7309c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7310c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7311c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7312c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7313c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7314c6fd2807SJeff Garzik }
7315c6fd2807SJeff Garzik 
7316c6fd2807SJeff Garzik 
7317c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7318c6fd2807SJeff Garzik 
7319c6fd2807SJeff Garzik /**
7320c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7321c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7322c6fd2807SJeff Garzik  *
7323b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7324b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7325b878ca5dSTejun Heo  *	release is handled via devres.
7326c6fd2807SJeff Garzik  *
7327c6fd2807SJeff Garzik  *	LOCKING:
7328c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7329c6fd2807SJeff Garzik  */
7330c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7331c6fd2807SJeff Garzik {
73322855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7333cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7334c6fd2807SJeff Garzik 
7335f0d36efdSTejun Heo 	ata_host_detach(host);
7336c6fd2807SJeff Garzik }
7337c6fd2807SJeff Garzik 
7338c6fd2807SJeff Garzik /* move to PCI subsystem */
7339c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7340c6fd2807SJeff Garzik {
7341c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7342c6fd2807SJeff Garzik 
7343c6fd2807SJeff Garzik 	switch (bits->width) {
7344c6fd2807SJeff Garzik 	case 1: {
7345c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7346c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7347c6fd2807SJeff Garzik 		tmp = tmp8;
7348c6fd2807SJeff Garzik 		break;
7349c6fd2807SJeff Garzik 	}
7350c6fd2807SJeff Garzik 	case 2: {
7351c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7352c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7353c6fd2807SJeff Garzik 		tmp = tmp16;
7354c6fd2807SJeff Garzik 		break;
7355c6fd2807SJeff Garzik 	}
7356c6fd2807SJeff Garzik 	case 4: {
7357c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7358c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7359c6fd2807SJeff Garzik 		tmp = tmp32;
7360c6fd2807SJeff Garzik 		break;
7361c6fd2807SJeff Garzik 	}
7362c6fd2807SJeff Garzik 
7363c6fd2807SJeff Garzik 	default:
7364c6fd2807SJeff Garzik 		return -EINVAL;
7365c6fd2807SJeff Garzik 	}
7366c6fd2807SJeff Garzik 
7367c6fd2807SJeff Garzik 	tmp &= bits->mask;
7368c6fd2807SJeff Garzik 
7369c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7370c6fd2807SJeff Garzik }
7371c6fd2807SJeff Garzik 
73726ffa01d8STejun Heo #ifdef CONFIG_PM
7373c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7374c6fd2807SJeff Garzik {
7375c6fd2807SJeff Garzik 	pci_save_state(pdev);
7376c6fd2807SJeff Garzik 	pci_disable_device(pdev);
73774c90d971STejun Heo 
73783a2d5b70SRafael J. Wysocki 	if (mesg.event & PM_EVENT_SLEEP)
7379c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7380c6fd2807SJeff Garzik }
7381c6fd2807SJeff Garzik 
7382553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7383c6fd2807SJeff Garzik {
7384553c4aa6STejun Heo 	int rc;
7385553c4aa6STejun Heo 
7386c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7387c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7388553c4aa6STejun Heo 
7389f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7390553c4aa6STejun Heo 	if (rc) {
7391553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7392553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7393553c4aa6STejun Heo 		return rc;
7394553c4aa6STejun Heo 	}
7395553c4aa6STejun Heo 
7396c6fd2807SJeff Garzik 	pci_set_master(pdev);
7397553c4aa6STejun Heo 	return 0;
7398c6fd2807SJeff Garzik }
7399c6fd2807SJeff Garzik 
7400c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7401c6fd2807SJeff Garzik {
7402cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7403c6fd2807SJeff Garzik 	int rc = 0;
7404c6fd2807SJeff Garzik 
7405cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7406c6fd2807SJeff Garzik 	if (rc)
7407c6fd2807SJeff Garzik 		return rc;
7408c6fd2807SJeff Garzik 
7409c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7410c6fd2807SJeff Garzik 
7411c6fd2807SJeff Garzik 	return 0;
7412c6fd2807SJeff Garzik }
7413c6fd2807SJeff Garzik 
7414c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7415c6fd2807SJeff Garzik {
7416cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7417553c4aa6STejun Heo 	int rc;
7418c6fd2807SJeff Garzik 
7419553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7420553c4aa6STejun Heo 	if (rc == 0)
7421cca3974eSJeff Garzik 		ata_host_resume(host);
7422553c4aa6STejun Heo 	return rc;
7423c6fd2807SJeff Garzik }
74246ffa01d8STejun Heo #endif /* CONFIG_PM */
74256ffa01d8STejun Heo 
7426c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7427c6fd2807SJeff Garzik 
742833267325STejun Heo static int __init ata_parse_force_one(char **cur,
742933267325STejun Heo 				      struct ata_force_ent *force_ent,
743033267325STejun Heo 				      const char **reason)
743133267325STejun Heo {
743233267325STejun Heo 	/* FIXME: Currently, there's no way to tag init const data and
743333267325STejun Heo 	 * using __initdata causes build failure on some versions of
743433267325STejun Heo 	 * gcc.  Once __initdataconst is implemented, add const to the
743533267325STejun Heo 	 * following structure.
743633267325STejun Heo 	 */
743733267325STejun Heo 	static struct ata_force_param force_tbl[] __initdata = {
743833267325STejun Heo 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
743933267325STejun Heo 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
744033267325STejun Heo 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
744133267325STejun Heo 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
744233267325STejun Heo 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
744333267325STejun Heo 		{ "sata",	.cbl		= ATA_CBL_SATA },
744433267325STejun Heo 		{ "1.5Gbps",	.spd_limit	= 1 },
744533267325STejun Heo 		{ "3.0Gbps",	.spd_limit	= 2 },
744633267325STejun Heo 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
744733267325STejun Heo 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
744833267325STejun Heo 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
744933267325STejun Heo 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
745033267325STejun Heo 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
745133267325STejun Heo 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
745233267325STejun Heo 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
745333267325STejun Heo 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
745433267325STejun Heo 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
745533267325STejun Heo 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
745633267325STejun Heo 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
745733267325STejun Heo 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
745833267325STejun Heo 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
745933267325STejun Heo 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
746033267325STejun Heo 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
746133267325STejun Heo 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
746233267325STejun Heo 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
746333267325STejun Heo 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
746433267325STejun Heo 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
746533267325STejun Heo 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
746633267325STejun Heo 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
746733267325STejun Heo 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
746833267325STejun Heo 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
746933267325STejun Heo 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
747033267325STejun Heo 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
747133267325STejun Heo 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
747233267325STejun Heo 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
747333267325STejun Heo 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
747433267325STejun Heo 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
747533267325STejun Heo 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
747633267325STejun Heo 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
747733267325STejun Heo 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
747833267325STejun Heo 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
747933267325STejun Heo 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
748033267325STejun Heo 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
748133267325STejun Heo 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
748233267325STejun Heo 	};
748333267325STejun Heo 	char *start = *cur, *p = *cur;
748433267325STejun Heo 	char *id, *val, *endp;
748533267325STejun Heo 	const struct ata_force_param *match_fp = NULL;
748633267325STejun Heo 	int nr_matches = 0, i;
748733267325STejun Heo 
748833267325STejun Heo 	/* find where this param ends and update *cur */
748933267325STejun Heo 	while (*p != '\0' && *p != ',')
749033267325STejun Heo 		p++;
749133267325STejun Heo 
749233267325STejun Heo 	if (*p == '\0')
749333267325STejun Heo 		*cur = p;
749433267325STejun Heo 	else
749533267325STejun Heo 		*cur = p + 1;
749633267325STejun Heo 
749733267325STejun Heo 	*p = '\0';
749833267325STejun Heo 
749933267325STejun Heo 	/* parse */
750033267325STejun Heo 	p = strchr(start, ':');
750133267325STejun Heo 	if (!p) {
750233267325STejun Heo 		val = strstrip(start);
750333267325STejun Heo 		goto parse_val;
750433267325STejun Heo 	}
750533267325STejun Heo 	*p = '\0';
750633267325STejun Heo 
750733267325STejun Heo 	id = strstrip(start);
750833267325STejun Heo 	val = strstrip(p + 1);
750933267325STejun Heo 
751033267325STejun Heo 	/* parse id */
751133267325STejun Heo 	p = strchr(id, '.');
751233267325STejun Heo 	if (p) {
751333267325STejun Heo 		*p++ = '\0';
751433267325STejun Heo 		force_ent->device = simple_strtoul(p, &endp, 10);
751533267325STejun Heo 		if (p == endp || *endp != '\0') {
751633267325STejun Heo 			*reason = "invalid device";
751733267325STejun Heo 			return -EINVAL;
751833267325STejun Heo 		}
751933267325STejun Heo 	}
752033267325STejun Heo 
752133267325STejun Heo 	force_ent->port = simple_strtoul(id, &endp, 10);
752233267325STejun Heo 	if (p == endp || *endp != '\0') {
752333267325STejun Heo 		*reason = "invalid port/link";
752433267325STejun Heo 		return -EINVAL;
752533267325STejun Heo 	}
752633267325STejun Heo 
752733267325STejun Heo  parse_val:
752833267325STejun Heo 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
752933267325STejun Heo 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
753033267325STejun Heo 		const struct ata_force_param *fp = &force_tbl[i];
753133267325STejun Heo 
753233267325STejun Heo 		if (strncasecmp(val, fp->name, strlen(val)))
753333267325STejun Heo 			continue;
753433267325STejun Heo 
753533267325STejun Heo 		nr_matches++;
753633267325STejun Heo 		match_fp = fp;
753733267325STejun Heo 
753833267325STejun Heo 		if (strcasecmp(val, fp->name) == 0) {
753933267325STejun Heo 			nr_matches = 1;
754033267325STejun Heo 			break;
754133267325STejun Heo 		}
754233267325STejun Heo 	}
754333267325STejun Heo 
754433267325STejun Heo 	if (!nr_matches) {
754533267325STejun Heo 		*reason = "unknown value";
754633267325STejun Heo 		return -EINVAL;
754733267325STejun Heo 	}
754833267325STejun Heo 	if (nr_matches > 1) {
754933267325STejun Heo 		*reason = "ambigious value";
755033267325STejun Heo 		return -EINVAL;
755133267325STejun Heo 	}
755233267325STejun Heo 
755333267325STejun Heo 	force_ent->param = *match_fp;
755433267325STejun Heo 
755533267325STejun Heo 	return 0;
755633267325STejun Heo }
755733267325STejun Heo 
755833267325STejun Heo static void __init ata_parse_force_param(void)
755933267325STejun Heo {
756033267325STejun Heo 	int idx = 0, size = 1;
756133267325STejun Heo 	int last_port = -1, last_device = -1;
756233267325STejun Heo 	char *p, *cur, *next;
756333267325STejun Heo 
756433267325STejun Heo 	/* calculate maximum number of params and allocate force_tbl */
756533267325STejun Heo 	for (p = ata_force_param_buf; *p; p++)
756633267325STejun Heo 		if (*p == ',')
756733267325STejun Heo 			size++;
756833267325STejun Heo 
756933267325STejun Heo 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
757033267325STejun Heo 	if (!ata_force_tbl) {
757133267325STejun Heo 		printk(KERN_WARNING "ata: failed to extend force table, "
757233267325STejun Heo 		       "libata.force ignored\n");
757333267325STejun Heo 		return;
757433267325STejun Heo 	}
757533267325STejun Heo 
757633267325STejun Heo 	/* parse and populate the table */
757733267325STejun Heo 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
757833267325STejun Heo 		const char *reason = "";
757933267325STejun Heo 		struct ata_force_ent te = { .port = -1, .device = -1 };
758033267325STejun Heo 
758133267325STejun Heo 		next = cur;
758233267325STejun Heo 		if (ata_parse_force_one(&next, &te, &reason)) {
758333267325STejun Heo 			printk(KERN_WARNING "ata: failed to parse force "
758433267325STejun Heo 			       "parameter \"%s\" (%s)\n",
758533267325STejun Heo 			       cur, reason);
758633267325STejun Heo 			continue;
758733267325STejun Heo 		}
758833267325STejun Heo 
758933267325STejun Heo 		if (te.port == -1) {
759033267325STejun Heo 			te.port = last_port;
759133267325STejun Heo 			te.device = last_device;
759233267325STejun Heo 		}
759333267325STejun Heo 
759433267325STejun Heo 		ata_force_tbl[idx++] = te;
759533267325STejun Heo 
759633267325STejun Heo 		last_port = te.port;
759733267325STejun Heo 		last_device = te.device;
759833267325STejun Heo 	}
759933267325STejun Heo 
760033267325STejun Heo 	ata_force_tbl_size = idx;
760133267325STejun Heo }
7602c6fd2807SJeff Garzik 
7603c6fd2807SJeff Garzik static int __init ata_init(void)
7604c6fd2807SJeff Garzik {
7605c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
760633267325STejun Heo 
760733267325STejun Heo 	ata_parse_force_param();
760833267325STejun Heo 
7609c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7610c6fd2807SJeff Garzik 	if (!ata_wq)
7611c6fd2807SJeff Garzik 		return -ENOMEM;
7612c6fd2807SJeff Garzik 
7613c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7614c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7615c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7616c6fd2807SJeff Garzik 		return -ENOMEM;
7617c6fd2807SJeff Garzik 	}
7618c6fd2807SJeff Garzik 
7619c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7620c6fd2807SJeff Garzik 	return 0;
7621c6fd2807SJeff Garzik }
7622c6fd2807SJeff Garzik 
7623c6fd2807SJeff Garzik static void __exit ata_exit(void)
7624c6fd2807SJeff Garzik {
762533267325STejun Heo 	kfree(ata_force_tbl);
7626c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7627c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7628c6fd2807SJeff Garzik }
7629c6fd2807SJeff Garzik 
7630a4625085SBrian King subsys_initcall(ata_init);
7631c6fd2807SJeff Garzik module_exit(ata_exit);
7632c6fd2807SJeff Garzik 
7633c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7634c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7635c6fd2807SJeff Garzik 
7636c6fd2807SJeff Garzik int ata_ratelimit(void)
7637c6fd2807SJeff Garzik {
7638c6fd2807SJeff Garzik 	int rc;
7639c6fd2807SJeff Garzik 	unsigned long flags;
7640c6fd2807SJeff Garzik 
7641c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7642c6fd2807SJeff Garzik 
7643c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7644c6fd2807SJeff Garzik 		rc = 1;
7645c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7646c6fd2807SJeff Garzik 	} else
7647c6fd2807SJeff Garzik 		rc = 0;
7648c6fd2807SJeff Garzik 
7649c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7650c6fd2807SJeff Garzik 
7651c6fd2807SJeff Garzik 	return rc;
7652c6fd2807SJeff Garzik }
7653c6fd2807SJeff Garzik 
7654c6fd2807SJeff Garzik /**
7655c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7656c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7657c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7658c6fd2807SJeff Garzik  *	@val: Wait condition
7659c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7660c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7661c6fd2807SJeff Garzik  *
7662c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7663c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7664c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7665c6fd2807SJeff Garzik  *
7666c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7667c6fd2807SJeff Garzik  *
7668c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7669c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7670c6fd2807SJeff Garzik  *
7671c6fd2807SJeff Garzik  *	LOCKING:
7672c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7673c6fd2807SJeff Garzik  *
7674c6fd2807SJeff Garzik  *	RETURNS:
7675c6fd2807SJeff Garzik  *	The final register value.
7676c6fd2807SJeff Garzik  */
7677c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7678c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7679c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7680c6fd2807SJeff Garzik {
7681c6fd2807SJeff Garzik 	unsigned long timeout;
7682c6fd2807SJeff Garzik 	u32 tmp;
7683c6fd2807SJeff Garzik 
7684c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7685c6fd2807SJeff Garzik 
7686c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7687c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7688c6fd2807SJeff Garzik 	 * eat away the timeout.
7689c6fd2807SJeff Garzik 	 */
7690c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7691c6fd2807SJeff Garzik 
7692c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7693c6fd2807SJeff Garzik 		msleep(interval_msec);
7694c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7695c6fd2807SJeff Garzik 	}
7696c6fd2807SJeff Garzik 
7697c6fd2807SJeff Garzik 	return tmp;
7698c6fd2807SJeff Garzik }
7699c6fd2807SJeff Garzik 
7700c6fd2807SJeff Garzik /*
7701c6fd2807SJeff Garzik  * Dummy port_ops
7702c6fd2807SJeff Garzik  */
7703c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7704c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7705c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7706c6fd2807SJeff Garzik 
7707c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7708c6fd2807SJeff Garzik {
7709c6fd2807SJeff Garzik 	return ATA_DRDY;
7710c6fd2807SJeff Garzik }
7711c6fd2807SJeff Garzik 
7712c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7713c6fd2807SJeff Garzik {
7714c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7715c6fd2807SJeff Garzik }
7716c6fd2807SJeff Garzik 
7717c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7718c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7719c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7720c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7721c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7722c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7723c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7724c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7725c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7726c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7727c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7728c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7729c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7730c6fd2807SJeff Garzik };
7731c6fd2807SJeff Garzik 
773221b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
773321b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
773421b0ad4fSTejun Heo };
773521b0ad4fSTejun Heo 
7736c6fd2807SJeff Garzik /*
7737c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7738c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7739c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7740c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7741c6fd2807SJeff Garzik  */
7742c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7743c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7744c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7745c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
774621b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7747c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7748c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7749cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7750f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7751f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7752ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7753f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7754f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
77550529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7756c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7757c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7758c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7759c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7760c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7761c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7762c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7763c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7764c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
776543727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7766c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7767c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
77686357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_pack_xfermask);
77696357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
77706357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
77716357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
77726357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
77736357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_mode_string);
77746357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_id_xfermask);
7775c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7776c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7777c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7778c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7779d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7780c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
778104351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
77820d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
77830d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
778431cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7785c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7786d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7787c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7788c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7789c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7790c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7791c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7792c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7793c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7794c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7795c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7796c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7797c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7798c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
779910305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7800c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7801936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7802936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7803c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7804c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7805c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7806cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7807c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7808c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7809c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7810c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7811c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7812c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7813c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7814c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
781588ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7816d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7817c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7818c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7819c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7820c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7821c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7822c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7823c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7824c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7825c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7826c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7827936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7828936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
78296ffa01d8STejun Heo #ifdef CONFIG_PM
7830cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7831cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
78326ffa01d8STejun Heo #endif /* CONFIG_PM */
7833c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7834c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
7835c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7836c6fd2807SJeff Garzik 
7837c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
78386357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7839c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7840c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7841a0f79b92STejun Heo EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7842c6fd2807SJeff Garzik 
7843c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7844c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7845d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
78461626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7847d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
78484e6b79faSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
7849c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7850c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
78516ffa01d8STejun Heo #ifdef CONFIG_PM
7852c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7853c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7854c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7855c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
78566ffa01d8STejun Heo #endif /* CONFIG_PM */
7857c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7858c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7859c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7860c6fd2807SJeff Garzik 
786131f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
78623af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
78633af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
78643af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
78653af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
78663af9a77aSTejun Heo 
7867b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7868b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7869b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7870cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7871cbcdd875STejun Heo #ifdef CONFIG_PCI
7872cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7873cbcdd875STejun Heo #endif /* CONFIG_PCI */
7874c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7875dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7876c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7877c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
78787d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7879c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7880c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7881c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7882c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7883c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
788483625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7885a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7886be0d18dfSAlan Cox 
7887be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7888be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7889be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7890c88f90c3STejun Heo EXPORT_SYMBOL_GPL(ata_cable_ignore);
7891be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7892