xref: /openbmc/linux/drivers/ata/libata-core.c (revision 55dba312)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
3392c52c52SAlan Cox  *  Standards documents from:
3492c52c52SAlan Cox  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
3592c52c52SAlan Cox  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
3692c52c52SAlan Cox  *	http://www.sata-io.org (SATA)
3792c52c52SAlan Cox  *	http://www.compactflash.org (CF)
3892c52c52SAlan Cox  *	http://www.qic.org (QIC157 - Tape and DSC)
3992c52c52SAlan Cox  *	http://www.ce-ata.org (CE-ATA: not supported)
4092c52c52SAlan Cox  *
41c6fd2807SJeff Garzik  */
42c6fd2807SJeff Garzik 
43c6fd2807SJeff Garzik #include <linux/kernel.h>
44c6fd2807SJeff Garzik #include <linux/module.h>
45c6fd2807SJeff Garzik #include <linux/pci.h>
46c6fd2807SJeff Garzik #include <linux/init.h>
47c6fd2807SJeff Garzik #include <linux/list.h>
48c6fd2807SJeff Garzik #include <linux/mm.h>
49c6fd2807SJeff Garzik #include <linux/highmem.h>
50c6fd2807SJeff Garzik #include <linux/spinlock.h>
51c6fd2807SJeff Garzik #include <linux/blkdev.h>
52c6fd2807SJeff Garzik #include <linux/delay.h>
53c6fd2807SJeff Garzik #include <linux/timer.h>
54c6fd2807SJeff Garzik #include <linux/interrupt.h>
55c6fd2807SJeff Garzik #include <linux/completion.h>
56c6fd2807SJeff Garzik #include <linux/suspend.h>
57c6fd2807SJeff Garzik #include <linux/workqueue.h>
58c6fd2807SJeff Garzik #include <linux/jiffies.h>
59c6fd2807SJeff Garzik #include <linux/scatterlist.h>
602dcb407eSJeff Garzik #include <linux/io.h>
61c6fd2807SJeff Garzik #include <scsi/scsi.h>
62c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
63c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
64c6fd2807SJeff Garzik #include <linux/libata.h>
65c6fd2807SJeff Garzik #include <asm/semaphore.h>
66c6fd2807SJeff Garzik #include <asm/byteorder.h>
67140b5e59STejun Heo #include <linux/cdrom.h>
68c6fd2807SJeff Garzik 
69c6fd2807SJeff Garzik #include "libata.h"
70c6fd2807SJeff Garzik 
71fda0efc5SJeff Garzik 
72c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
73c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
74c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
75c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
76c6fd2807SJeff Garzik 
77c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
78c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
79c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
81218f3d30SJeff Garzik 					u8 enable, u8 feature);
82c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
8375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
84c6fd2807SJeff Garzik 
85f3187195STejun Heo unsigned int ata_print_id = 1;
86c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
89c6fd2807SJeff Garzik 
90c6fd2807SJeff Garzik int atapi_enabled = 1;
91c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
92c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93c6fd2807SJeff Garzik 
94c6fd2807SJeff Garzik int atapi_dmadir = 0;
95c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
96c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97c6fd2807SJeff Garzik 
98baf4fdfaSMark Lord int atapi_passthru16 = 1;
99baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
100baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101baf4fdfaSMark Lord 
102c6fd2807SJeff Garzik int libata_fua = 0;
103c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
104c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105c6fd2807SJeff Garzik 
1062dcb407eSJeff Garzik static int ata_ignore_hpa;
1071e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
1081e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1091e999736SAlan Cox 
110b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
112b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113b3a70601SAlan Cox 
114c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
116c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117c6fd2807SJeff Garzik 
1186ebe9d86SJeff Garzik int libata_noacpi = 0;
119d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1206ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
12111ef697bSKristen Carlson Accardi 
122ae8d4ee7SAlan Cox int libata_allow_tpm = 0;
123ae8d4ee7SAlan Cox module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124ae8d4ee7SAlan Cox MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125ae8d4ee7SAlan Cox 
126c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
127c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
128c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
129c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
130c6fd2807SJeff Garzik 
131c6fd2807SJeff Garzik 
132c6fd2807SJeff Garzik /**
133c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
135c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1369977126cSTejun Heo  *	@is_cmd: This FIS is for command
1379977126cSTejun Heo  *	@fis: Buffer into which data will output
138c6fd2807SJeff Garzik  *
139c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
140c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
141c6fd2807SJeff Garzik  *
142c6fd2807SJeff Garzik  *	LOCKING:
143c6fd2807SJeff Garzik  *	Inherited from caller.
144c6fd2807SJeff Garzik  */
1459977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
146c6fd2807SJeff Garzik {
147c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1489977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1499977126cSTejun Heo 	if (is_cmd)
1509977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1519977126cSTejun Heo 
152c6fd2807SJeff Garzik 	fis[2] = tf->command;
153c6fd2807SJeff Garzik 	fis[3] = tf->feature;
154c6fd2807SJeff Garzik 
155c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
156c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
157c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
158c6fd2807SJeff Garzik 	fis[7] = tf->device;
159c6fd2807SJeff Garzik 
160c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
161c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
162c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
163c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
164c6fd2807SJeff Garzik 
165c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
166c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
167c6fd2807SJeff Garzik 	fis[14] = 0;
168c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
169c6fd2807SJeff Garzik 
170c6fd2807SJeff Garzik 	fis[16] = 0;
171c6fd2807SJeff Garzik 	fis[17] = 0;
172c6fd2807SJeff Garzik 	fis[18] = 0;
173c6fd2807SJeff Garzik 	fis[19] = 0;
174c6fd2807SJeff Garzik }
175c6fd2807SJeff Garzik 
176c6fd2807SJeff Garzik /**
177c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
179c6fd2807SJeff Garzik  *	@tf: Taskfile to output
180c6fd2807SJeff Garzik  *
181c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
182c6fd2807SJeff Garzik  *
183c6fd2807SJeff Garzik  *	LOCKING:
184c6fd2807SJeff Garzik  *	Inherited from caller.
185c6fd2807SJeff Garzik  */
186c6fd2807SJeff Garzik 
187c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
188c6fd2807SJeff Garzik {
189c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
190c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
193c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
194c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
195c6fd2807SJeff Garzik 	tf->device	= fis[7];
196c6fd2807SJeff Garzik 
197c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
198c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
199c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
200c6fd2807SJeff Garzik 
201c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
202c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
203c6fd2807SJeff Garzik }
204c6fd2807SJeff Garzik 
205c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
206c6fd2807SJeff Garzik 	/* pio multi */
207c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
209c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
210c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
211c6fd2807SJeff Garzik 	0,
212c6fd2807SJeff Garzik 	0,
213c6fd2807SJeff Garzik 	0,
214c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
215c6fd2807SJeff Garzik 	/* pio */
216c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
217c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
218c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
219c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
220c6fd2807SJeff Garzik 	0,
221c6fd2807SJeff Garzik 	0,
222c6fd2807SJeff Garzik 	0,
223c6fd2807SJeff Garzik 	0,
224c6fd2807SJeff Garzik 	/* dma */
225c6fd2807SJeff Garzik 	ATA_CMD_READ,
226c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
227c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
228c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
229c6fd2807SJeff Garzik 	0,
230c6fd2807SJeff Garzik 	0,
231c6fd2807SJeff Garzik 	0,
232c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
233c6fd2807SJeff Garzik };
234c6fd2807SJeff Garzik 
235c6fd2807SJeff Garzik /**
236c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
237bd056d7eSTejun Heo  *	@tf: command to examine and configure
238bd056d7eSTejun Heo  *	@dev: device tf belongs to
239c6fd2807SJeff Garzik  *
240c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
241c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
242c6fd2807SJeff Garzik  *
243c6fd2807SJeff Garzik  *	LOCKING:
244c6fd2807SJeff Garzik  *	caller.
245c6fd2807SJeff Garzik  */
246bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
247c6fd2807SJeff Garzik {
248c6fd2807SJeff Garzik 	u8 cmd;
249c6fd2807SJeff Garzik 
250c6fd2807SJeff Garzik 	int index, fua, lba48, write;
251c6fd2807SJeff Garzik 
252c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
253c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
255c6fd2807SJeff Garzik 
256c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
257c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
258c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2599af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
260c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
261c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
262c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
263c6fd2807SJeff Garzik 	} else {
264c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
265c6fd2807SJeff Garzik 		index = 16;
266c6fd2807SJeff Garzik 	}
267c6fd2807SJeff Garzik 
268c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
269c6fd2807SJeff Garzik 	if (cmd) {
270c6fd2807SJeff Garzik 		tf->command = cmd;
271c6fd2807SJeff Garzik 		return 0;
272c6fd2807SJeff Garzik 	}
273c6fd2807SJeff Garzik 	return -1;
274c6fd2807SJeff Garzik }
275c6fd2807SJeff Garzik 
276c6fd2807SJeff Garzik /**
27735b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
27835b649feSTejun Heo  *	@tf: ATA taskfile of interest
27935b649feSTejun Heo  *	@dev: ATA device @tf belongs to
28035b649feSTejun Heo  *
28135b649feSTejun Heo  *	LOCKING:
28235b649feSTejun Heo  *	None.
28335b649feSTejun Heo  *
28435b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
28535b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
28635b649feSTejun Heo  *	flags select the address format to use.
28735b649feSTejun Heo  *
28835b649feSTejun Heo  *	RETURNS:
28935b649feSTejun Heo  *	Block address read from @tf.
29035b649feSTejun Heo  */
29135b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
29235b649feSTejun Heo {
29335b649feSTejun Heo 	u64 block = 0;
29435b649feSTejun Heo 
29535b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
29635b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
29735b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
29835b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
29935b649feSTejun Heo 			block |= tf->hob_lbal << 24;
30035b649feSTejun Heo 		} else
30135b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
30235b649feSTejun Heo 
30335b649feSTejun Heo 		block |= tf->lbah << 16;
30435b649feSTejun Heo 		block |= tf->lbam << 8;
30535b649feSTejun Heo 		block |= tf->lbal;
30635b649feSTejun Heo 	} else {
30735b649feSTejun Heo 		u32 cyl, head, sect;
30835b649feSTejun Heo 
30935b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
31035b649feSTejun Heo 		head = tf->device & 0xf;
31135b649feSTejun Heo 		sect = tf->lbal;
31235b649feSTejun Heo 
31335b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
31435b649feSTejun Heo 	}
31535b649feSTejun Heo 
31635b649feSTejun Heo 	return block;
31735b649feSTejun Heo }
31835b649feSTejun Heo 
31935b649feSTejun Heo /**
320bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
321bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
322bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
323bd056d7eSTejun Heo  *	@block: Block address
324bd056d7eSTejun Heo  *	@n_block: Number of blocks
325bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
326bd056d7eSTejun Heo  *	@tag: tag
327bd056d7eSTejun Heo  *
328bd056d7eSTejun Heo  *	LOCKING:
329bd056d7eSTejun Heo  *	None.
330bd056d7eSTejun Heo  *
331bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
332bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
333bd056d7eSTejun Heo  *
334bd056d7eSTejun Heo  *	RETURNS:
335bd056d7eSTejun Heo  *
336bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
337bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
338bd056d7eSTejun Heo  */
339bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
341bd056d7eSTejun Heo 		    unsigned int tag)
342bd056d7eSTejun Heo {
343bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344bd056d7eSTejun Heo 	tf->flags |= tf_flags;
345bd056d7eSTejun Heo 
3466d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
347bd056d7eSTejun Heo 		/* yay, NCQ */
348bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
349bd056d7eSTejun Heo 			return -ERANGE;
350bd056d7eSTejun Heo 
351bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
352bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353bd056d7eSTejun Heo 
354bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
355bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
356bd056d7eSTejun Heo 		else
357bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
358bd056d7eSTejun Heo 
359bd056d7eSTejun Heo 		tf->nsect = tag << 3;
360bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
361bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
364bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
365bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
366bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
367bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
368bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 		tf->device = 1 << 6;
371bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
372bd056d7eSTejun Heo 			tf->device |= 1 << 7;
373bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
374bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
375bd056d7eSTejun Heo 
376bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
377bd056d7eSTejun Heo 			/* use LBA28 */
378bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
379bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
380bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
381bd056d7eSTejun Heo 				return -ERANGE;
382bd056d7eSTejun Heo 
383bd056d7eSTejun Heo 			/* use LBA48 */
384bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
385bd056d7eSTejun Heo 
386bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
387bd056d7eSTejun Heo 
388bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
389bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
390bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
391bd056d7eSTejun Heo 		} else
392bd056d7eSTejun Heo 			/* request too large even for LBA48 */
393bd056d7eSTejun Heo 			return -ERANGE;
394bd056d7eSTejun Heo 
395bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396bd056d7eSTejun Heo 			return -EINVAL;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
399bd056d7eSTejun Heo 
400bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
401bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
402bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
403bd056d7eSTejun Heo 
404bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
405bd056d7eSTejun Heo 	} else {
406bd056d7eSTejun Heo 		/* CHS */
407bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
410bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
411bd056d7eSTejun Heo 			return -ERANGE;
412bd056d7eSTejun Heo 
413bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414bd056d7eSTejun Heo 			return -EINVAL;
415bd056d7eSTejun Heo 
416bd056d7eSTejun Heo 		/* Convert LBA to CHS */
417bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
418bd056d7eSTejun Heo 		cyl   = track / dev->heads;
419bd056d7eSTejun Heo 		head  = track % dev->heads;
420bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
421bd056d7eSTejun Heo 
422bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
424bd056d7eSTejun Heo 
425bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
426bd056d7eSTejun Heo 		   Cylinder: 0-65535
427bd056d7eSTejun Heo 		   Head: 0-15
428bd056d7eSTejun Heo 		   Sector: 1-255*/
429bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430bd056d7eSTejun Heo 			return -ERANGE;
431bd056d7eSTejun Heo 
432bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433bd056d7eSTejun Heo 		tf->lbal = sect;
434bd056d7eSTejun Heo 		tf->lbam = cyl;
435bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
436bd056d7eSTejun Heo 		tf->device |= head;
437bd056d7eSTejun Heo 	}
438bd056d7eSTejun Heo 
439bd056d7eSTejun Heo 	return 0;
440bd056d7eSTejun Heo }
441bd056d7eSTejun Heo 
442bd056d7eSTejun Heo /**
443c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
445c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
446c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
447c6fd2807SJeff Garzik  *
448c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
450c6fd2807SJeff Garzik  *
451c6fd2807SJeff Garzik  *	LOCKING:
452c6fd2807SJeff Garzik  *	None.
453c6fd2807SJeff Garzik  *
454c6fd2807SJeff Garzik  *	RETURNS:
455c6fd2807SJeff Garzik  *	Packed xfer_mask.
456c6fd2807SJeff Garzik  */
4577dc951aeSTejun Heo unsigned long ata_pack_xfermask(unsigned long pio_mask,
4587dc951aeSTejun Heo 				unsigned long mwdma_mask,
4597dc951aeSTejun Heo 				unsigned long udma_mask)
460c6fd2807SJeff Garzik {
461c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
464c6fd2807SJeff Garzik }
465c6fd2807SJeff Garzik 
466c6fd2807SJeff Garzik /**
467c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
469c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
470c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
471c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
472c6fd2807SJeff Garzik  *
473c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
475c6fd2807SJeff Garzik  */
4767dc951aeSTejun Heo void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
4777dc951aeSTejun Heo 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
478c6fd2807SJeff Garzik {
479c6fd2807SJeff Garzik 	if (pio_mask)
480c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481c6fd2807SJeff Garzik 	if (mwdma_mask)
482c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483c6fd2807SJeff Garzik 	if (udma_mask)
484c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
485c6fd2807SJeff Garzik }
486c6fd2807SJeff Garzik 
487c6fd2807SJeff Garzik static const struct ata_xfer_ent {
488c6fd2807SJeff Garzik 	int shift, bits;
489c6fd2807SJeff Garzik 	u8 base;
490c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
49170cd071eSTejun Heo 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
49270cd071eSTejun Heo 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
49370cd071eSTejun Heo 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
494c6fd2807SJeff Garzik 	{ -1, },
495c6fd2807SJeff Garzik };
496c6fd2807SJeff Garzik 
497c6fd2807SJeff Garzik /**
498c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
500c6fd2807SJeff Garzik  *
501c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
502c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
503c6fd2807SJeff Garzik  *
504c6fd2807SJeff Garzik  *	LOCKING:
505c6fd2807SJeff Garzik  *	None.
506c6fd2807SJeff Garzik  *
507c6fd2807SJeff Garzik  *	RETURNS:
50870cd071eSTejun Heo  *	Matching XFER_* value, 0xff if no match found.
509c6fd2807SJeff Garzik  */
5107dc951aeSTejun Heo u8 ata_xfer_mask2mode(unsigned long xfer_mask)
511c6fd2807SJeff Garzik {
512c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
513c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
514c6fd2807SJeff Garzik 
515c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
51870cd071eSTejun Heo 	return 0xff;
519c6fd2807SJeff Garzik }
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik /**
522c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
524c6fd2807SJeff Garzik  *
525c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	LOCKING:
528c6fd2807SJeff Garzik  *	None.
529c6fd2807SJeff Garzik  *
530c6fd2807SJeff Garzik  *	RETURNS:
531c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
532c6fd2807SJeff Garzik  */
5337dc951aeSTejun Heo unsigned long ata_xfer_mode2mask(u8 xfer_mode)
534c6fd2807SJeff Garzik {
535c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
536c6fd2807SJeff Garzik 
537c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
53970cd071eSTejun Heo 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
54070cd071eSTejun Heo 				& ~((1 << ent->shift) - 1);
541c6fd2807SJeff Garzik 	return 0;
542c6fd2807SJeff Garzik }
543c6fd2807SJeff Garzik 
544c6fd2807SJeff Garzik /**
545c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
547c6fd2807SJeff Garzik  *
548c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
549c6fd2807SJeff Garzik  *
550c6fd2807SJeff Garzik  *	LOCKING:
551c6fd2807SJeff Garzik  *	None.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	RETURNS:
554c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
555c6fd2807SJeff Garzik  */
5567dc951aeSTejun Heo int ata_xfer_mode2shift(unsigned long xfer_mode)
557c6fd2807SJeff Garzik {
558c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
559c6fd2807SJeff Garzik 
560c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
562c6fd2807SJeff Garzik 			return ent->shift;
563c6fd2807SJeff Garzik 	return -1;
564c6fd2807SJeff Garzik }
565c6fd2807SJeff Garzik 
566c6fd2807SJeff Garzik /**
567c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
568c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
569c6fd2807SJeff Garzik  *
570c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
571c6fd2807SJeff Garzik  *	(highest bit in @modemask).
572c6fd2807SJeff Garzik  *
573c6fd2807SJeff Garzik  *	LOCKING:
574c6fd2807SJeff Garzik  *	None.
575c6fd2807SJeff Garzik  *
576c6fd2807SJeff Garzik  *	RETURNS:
577c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
578c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
579c6fd2807SJeff Garzik  */
5807dc951aeSTejun Heo const char *ata_mode_string(unsigned long xfer_mask)
581c6fd2807SJeff Garzik {
582c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
583c6fd2807SJeff Garzik 		"PIO0",
584c6fd2807SJeff Garzik 		"PIO1",
585c6fd2807SJeff Garzik 		"PIO2",
586c6fd2807SJeff Garzik 		"PIO3",
587c6fd2807SJeff Garzik 		"PIO4",
588b352e57dSAlan Cox 		"PIO5",
589b352e57dSAlan Cox 		"PIO6",
590c6fd2807SJeff Garzik 		"MWDMA0",
591c6fd2807SJeff Garzik 		"MWDMA1",
592c6fd2807SJeff Garzik 		"MWDMA2",
593b352e57dSAlan Cox 		"MWDMA3",
594b352e57dSAlan Cox 		"MWDMA4",
595c6fd2807SJeff Garzik 		"UDMA/16",
596c6fd2807SJeff Garzik 		"UDMA/25",
597c6fd2807SJeff Garzik 		"UDMA/33",
598c6fd2807SJeff Garzik 		"UDMA/44",
599c6fd2807SJeff Garzik 		"UDMA/66",
600c6fd2807SJeff Garzik 		"UDMA/100",
601c6fd2807SJeff Garzik 		"UDMA/133",
602c6fd2807SJeff Garzik 		"UDMA7",
603c6fd2807SJeff Garzik 	};
604c6fd2807SJeff Garzik 	int highbit;
605c6fd2807SJeff Garzik 
606c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
607c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
609c6fd2807SJeff Garzik 	return "<n/a>";
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
613c6fd2807SJeff Garzik {
614c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
615c6fd2807SJeff Garzik 		"1.5 Gbps",
616c6fd2807SJeff Garzik 		"3.0 Gbps",
617c6fd2807SJeff Garzik 	};
618c6fd2807SJeff Garzik 
619c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620c6fd2807SJeff Garzik 		return "<unknown>";
621c6fd2807SJeff Garzik 	return spd_str[spd - 1];
622c6fd2807SJeff Garzik }
623c6fd2807SJeff Garzik 
624c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
625c6fd2807SJeff Garzik {
62609d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6279af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
628c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
629562f0c2dSTejun Heo 		ata_acpi_on_disable(dev);
6304ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6314ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
632c6fd2807SJeff Garzik 		dev->class++;
633c6fd2807SJeff Garzik 	}
634c6fd2807SJeff Garzik }
635c6fd2807SJeff Garzik 
636ca77329fSKristen Carlson Accardi static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637ca77329fSKristen Carlson Accardi {
638ca77329fSKristen Carlson Accardi 	struct ata_link *link = dev->link;
639ca77329fSKristen Carlson Accardi 	struct ata_port *ap = link->ap;
640ca77329fSKristen Carlson Accardi 	u32 scontrol;
641ca77329fSKristen Carlson Accardi 	unsigned int err_mask;
642ca77329fSKristen Carlson Accardi 	int rc;
643ca77329fSKristen Carlson Accardi 
644ca77329fSKristen Carlson Accardi 	/*
645ca77329fSKristen Carlson Accardi 	 * disallow DIPM for drivers which haven't set
646ca77329fSKristen Carlson Accardi 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
647ca77329fSKristen Carlson Accardi 	 * phy ready will be set in the interrupt status on
648ca77329fSKristen Carlson Accardi 	 * state changes, which will cause some drivers to
649ca77329fSKristen Carlson Accardi 	 * think there are errors - additionally drivers will
650ca77329fSKristen Carlson Accardi 	 * need to disable hot plug.
651ca77329fSKristen Carlson Accardi 	 */
652ca77329fSKristen Carlson Accardi 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653ca77329fSKristen Carlson Accardi 		ap->pm_policy = NOT_AVAILABLE;
654ca77329fSKristen Carlson Accardi 		return -EINVAL;
655ca77329fSKristen Carlson Accardi 	}
656ca77329fSKristen Carlson Accardi 
657ca77329fSKristen Carlson Accardi 	/*
658ca77329fSKristen Carlson Accardi 	 * For DIPM, we will only enable it for the
659ca77329fSKristen Carlson Accardi 	 * min_power setting.
660ca77329fSKristen Carlson Accardi 	 *
661ca77329fSKristen Carlson Accardi 	 * Why?  Because Disks are too stupid to know that
662ca77329fSKristen Carlson Accardi 	 * If the host rejects a request to go to SLUMBER
663ca77329fSKristen Carlson Accardi 	 * they should retry at PARTIAL, and instead it
664ca77329fSKristen Carlson Accardi 	 * just would give up.  So, for medium_power to
665ca77329fSKristen Carlson Accardi 	 * work at all, we need to only allow HIPM.
666ca77329fSKristen Carlson Accardi 	 */
667ca77329fSKristen Carlson Accardi 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
668ca77329fSKristen Carlson Accardi 	if (rc)
669ca77329fSKristen Carlson Accardi 		return rc;
670ca77329fSKristen Carlson Accardi 
671ca77329fSKristen Carlson Accardi 	switch (policy) {
672ca77329fSKristen Carlson Accardi 	case MIN_POWER:
673ca77329fSKristen Carlson Accardi 		/* no restrictions on IPM transitions */
674ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x3 << 8);
675ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676ca77329fSKristen Carlson Accardi 		if (rc)
677ca77329fSKristen Carlson Accardi 			return rc;
678ca77329fSKristen Carlson Accardi 
679ca77329fSKristen Carlson Accardi 		/* enable DIPM */
680ca77329fSKristen Carlson Accardi 		if (dev->flags & ATA_DFLAG_DIPM)
681ca77329fSKristen Carlson Accardi 			err_mask = ata_dev_set_feature(dev,
682ca77329fSKristen Carlson Accardi 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
683ca77329fSKristen Carlson Accardi 		break;
684ca77329fSKristen Carlson Accardi 	case MEDIUM_POWER:
685ca77329fSKristen Carlson Accardi 		/* allow IPM to PARTIAL */
686ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x1 << 8);
687ca77329fSKristen Carlson Accardi 		scontrol |= (0x2 << 8);
688ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689ca77329fSKristen Carlson Accardi 		if (rc)
690ca77329fSKristen Carlson Accardi 			return rc;
691ca77329fSKristen Carlson Accardi 
692f5456b63SKristen Carlson Accardi 		/*
693f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
694f5456b63SKristen Carlson Accardi 		 * disallow transitions to SLUMBER, which effectively
695f5456b63SKristen Carlson Accardi 		 * disable DIPM if it does not support PARTIAL
696f5456b63SKristen Carlson Accardi 		 */
697ca77329fSKristen Carlson Accardi 		break;
698ca77329fSKristen Carlson Accardi 	case NOT_AVAILABLE:
699ca77329fSKristen Carlson Accardi 	case MAX_PERFORMANCE:
700ca77329fSKristen Carlson Accardi 		/* disable all IPM transitions */
701ca77329fSKristen Carlson Accardi 		scontrol |= (0x3 << 8);
702ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
703ca77329fSKristen Carlson Accardi 		if (rc)
704ca77329fSKristen Carlson Accardi 			return rc;
705ca77329fSKristen Carlson Accardi 
706f5456b63SKristen Carlson Accardi 		/*
707f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
708f5456b63SKristen Carlson Accardi 		 * disallow all transitions which effectively
709f5456b63SKristen Carlson Accardi 		 * disable DIPM anyway.
710f5456b63SKristen Carlson Accardi 		 */
711ca77329fSKristen Carlson Accardi 		break;
712ca77329fSKristen Carlson Accardi 	}
713ca77329fSKristen Carlson Accardi 
714ca77329fSKristen Carlson Accardi 	/* FIXME: handle SET FEATURES failure */
715ca77329fSKristen Carlson Accardi 	(void) err_mask;
716ca77329fSKristen Carlson Accardi 
717ca77329fSKristen Carlson Accardi 	return 0;
718ca77329fSKristen Carlson Accardi }
719ca77329fSKristen Carlson Accardi 
720ca77329fSKristen Carlson Accardi /**
721ca77329fSKristen Carlson Accardi  *	ata_dev_enable_pm - enable SATA interface power management
72248166fd9SStephen Hemminger  *	@dev:  device to enable power management
72348166fd9SStephen Hemminger  *	@policy: the link power management policy
724ca77329fSKristen Carlson Accardi  *
725ca77329fSKristen Carlson Accardi  *	Enable SATA Interface power management.  This will enable
726ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) for min_power
727ca77329fSKristen Carlson Accardi  * 	policy, and then call driver specific callbacks for
728ca77329fSKristen Carlson Accardi  *	enabling Host Initiated Power management.
729ca77329fSKristen Carlson Accardi  *
730ca77329fSKristen Carlson Accardi  *	Locking: Caller.
731ca77329fSKristen Carlson Accardi  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
732ca77329fSKristen Carlson Accardi  */
733ca77329fSKristen Carlson Accardi void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
734ca77329fSKristen Carlson Accardi {
735ca77329fSKristen Carlson Accardi 	int rc = 0;
736ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
737ca77329fSKristen Carlson Accardi 
738ca77329fSKristen Carlson Accardi 	/* set HIPM first, then DIPM */
739ca77329fSKristen Carlson Accardi 	if (ap->ops->enable_pm)
740ca77329fSKristen Carlson Accardi 		rc = ap->ops->enable_pm(ap, policy);
741ca77329fSKristen Carlson Accardi 	if (rc)
742ca77329fSKristen Carlson Accardi 		goto enable_pm_out;
743ca77329fSKristen Carlson Accardi 	rc = ata_dev_set_dipm(dev, policy);
744ca77329fSKristen Carlson Accardi 
745ca77329fSKristen Carlson Accardi enable_pm_out:
746ca77329fSKristen Carlson Accardi 	if (rc)
747ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
748ca77329fSKristen Carlson Accardi 	else
749ca77329fSKristen Carlson Accardi 		ap->pm_policy = policy;
750ca77329fSKristen Carlson Accardi 	return /* rc */;	/* hopefully we can use 'rc' eventually */
751ca77329fSKristen Carlson Accardi }
752ca77329fSKristen Carlson Accardi 
7531992a5edSStephen Rothwell #ifdef CONFIG_PM
754ca77329fSKristen Carlson Accardi /**
755ca77329fSKristen Carlson Accardi  *	ata_dev_disable_pm - disable SATA interface power management
75648166fd9SStephen Hemminger  *	@dev: device to disable power management
757ca77329fSKristen Carlson Accardi  *
758ca77329fSKristen Carlson Accardi  *	Disable SATA Interface power management.  This will disable
759ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) without changing
760ca77329fSKristen Carlson Accardi  * 	policy,  call driver specific callbacks for disabling Host
761ca77329fSKristen Carlson Accardi  * 	Initiated Power management.
762ca77329fSKristen Carlson Accardi  *
763ca77329fSKristen Carlson Accardi  *	Locking: Caller.
764ca77329fSKristen Carlson Accardi  *	Returns: void
765ca77329fSKristen Carlson Accardi  */
766ca77329fSKristen Carlson Accardi static void ata_dev_disable_pm(struct ata_device *dev)
767ca77329fSKristen Carlson Accardi {
768ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
769ca77329fSKristen Carlson Accardi 
770ca77329fSKristen Carlson Accardi 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771ca77329fSKristen Carlson Accardi 	if (ap->ops->disable_pm)
772ca77329fSKristen Carlson Accardi 		ap->ops->disable_pm(ap);
773ca77329fSKristen Carlson Accardi }
7741992a5edSStephen Rothwell #endif	/* CONFIG_PM */
775ca77329fSKristen Carlson Accardi 
776ca77329fSKristen Carlson Accardi void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777ca77329fSKristen Carlson Accardi {
778ca77329fSKristen Carlson Accardi 	ap->pm_policy = policy;
779ca77329fSKristen Carlson Accardi 	ap->link.eh_info.action |= ATA_EHI_LPM;
780ca77329fSKristen Carlson Accardi 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781ca77329fSKristen Carlson Accardi 	ata_port_schedule_eh(ap);
782ca77329fSKristen Carlson Accardi }
783ca77329fSKristen Carlson Accardi 
7841992a5edSStephen Rothwell #ifdef CONFIG_PM
785ca77329fSKristen Carlson Accardi static void ata_lpm_enable(struct ata_host *host)
786ca77329fSKristen Carlson Accardi {
787ca77329fSKristen Carlson Accardi 	struct ata_link *link;
788ca77329fSKristen Carlson Accardi 	struct ata_port *ap;
789ca77329fSKristen Carlson Accardi 	struct ata_device *dev;
790ca77329fSKristen Carlson Accardi 	int i;
791ca77329fSKristen Carlson Accardi 
792ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
793ca77329fSKristen Carlson Accardi 		ap = host->ports[i];
794ca77329fSKristen Carlson Accardi 		ata_port_for_each_link(link, ap) {
795ca77329fSKristen Carlson Accardi 			ata_link_for_each_dev(dev, link)
796ca77329fSKristen Carlson Accardi 				ata_dev_disable_pm(dev);
797ca77329fSKristen Carlson Accardi 		}
798ca77329fSKristen Carlson Accardi 	}
799ca77329fSKristen Carlson Accardi }
800ca77329fSKristen Carlson Accardi 
801ca77329fSKristen Carlson Accardi static void ata_lpm_disable(struct ata_host *host)
802ca77329fSKristen Carlson Accardi {
803ca77329fSKristen Carlson Accardi 	int i;
804ca77329fSKristen Carlson Accardi 
805ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
806ca77329fSKristen Carlson Accardi 		struct ata_port *ap = host->ports[i];
807ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
808ca77329fSKristen Carlson Accardi 	}
809ca77329fSKristen Carlson Accardi }
8101992a5edSStephen Rothwell #endif	/* CONFIG_PM */
811ca77329fSKristen Carlson Accardi 
812ca77329fSKristen Carlson Accardi 
813c6fd2807SJeff Garzik /**
814c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
815c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
816c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
817c6fd2807SJeff Garzik  *
8180d5ff566STejun Heo  *	This technique was originally described in
8190d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
8200d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
8210d5ff566STejun Heo  *
8220d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
8230d5ff566STejun Heo  *	and if a device is present, it will respond by
8240d5ff566STejun Heo  *	correctly storing and echoing back the
8250d5ff566STejun Heo  *	ATA shadow register contents.
826c6fd2807SJeff Garzik  *
827c6fd2807SJeff Garzik  *	LOCKING:
828c6fd2807SJeff Garzik  *	caller.
829c6fd2807SJeff Garzik  */
830c6fd2807SJeff Garzik 
8310d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
832c6fd2807SJeff Garzik {
8330d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
8340d5ff566STejun Heo 	u8 nsect, lbal;
8350d5ff566STejun Heo 
8360d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
8370d5ff566STejun Heo 
8380d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8390d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8400d5ff566STejun Heo 
8410d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
8420d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
8430d5ff566STejun Heo 
8440d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8450d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8460d5ff566STejun Heo 
8470d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
8480d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
8490d5ff566STejun Heo 
8500d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
8510d5ff566STejun Heo 		return 1;	/* we found a device */
8520d5ff566STejun Heo 
8530d5ff566STejun Heo 	return 0;		/* nothing found */
854c6fd2807SJeff Garzik }
855c6fd2807SJeff Garzik 
856c6fd2807SJeff Garzik /**
857c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
858c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
859c6fd2807SJeff Garzik  *
860c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
861c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
862c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
863c6fd2807SJeff Garzik  *
864c6fd2807SJeff Garzik  *	LOCKING:
865c6fd2807SJeff Garzik  *	None.
866c6fd2807SJeff Garzik  *
867c6fd2807SJeff Garzik  *	RETURNS:
868633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
870c6fd2807SJeff Garzik  */
871c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
872c6fd2807SJeff Garzik {
873c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
874c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
875c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
876633273a3STejun Heo 	 *
877633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
879633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
880633273a3STejun Heo 	 * spec has never mentioned about using different signatures
881633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
882633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
883633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
886633273a3STejun Heo 	 * SerialATA.
887633273a3STejun Heo 	 *
888633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
889633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
890c6fd2807SJeff Garzik 	 */
891633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
892c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
893c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
894c6fd2807SJeff Garzik 	}
895c6fd2807SJeff Garzik 
896633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
897c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
898c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
899c6fd2807SJeff Garzik 	}
900c6fd2807SJeff Garzik 
901633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
903633273a3STejun Heo 		return ATA_DEV_PMP;
904633273a3STejun Heo 	}
905633273a3STejun Heo 
906633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
9072dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
908633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
909633273a3STejun Heo 	}
910633273a3STejun Heo 
911c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
912c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
913c6fd2807SJeff Garzik }
914c6fd2807SJeff Garzik 
915c6fd2807SJeff Garzik /**
916c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
9173f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
9183f19859eSTejun Heo  *	@present: device seems present
919c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
920c6fd2807SJeff Garzik  *
921c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
923c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
924c6fd2807SJeff Garzik  *	and diagnostics.
925c6fd2807SJeff Garzik  *
926c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
927c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
928c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
929c6fd2807SJeff Garzik  *
930c6fd2807SJeff Garzik  *	LOCKING:
931c6fd2807SJeff Garzik  *	caller.
932c6fd2807SJeff Garzik  *
933c6fd2807SJeff Garzik  *	RETURNS:
934c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
935c6fd2807SJeff Garzik  */
9363f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
9373f19859eSTejun Heo 				  u8 *r_err)
938c6fd2807SJeff Garzik {
9393f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
940c6fd2807SJeff Garzik 	struct ata_taskfile tf;
941c6fd2807SJeff Garzik 	unsigned int class;
942c6fd2807SJeff Garzik 	u8 err;
943c6fd2807SJeff Garzik 
9443f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
945c6fd2807SJeff Garzik 
946c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
947c6fd2807SJeff Garzik 
948c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
949c6fd2807SJeff Garzik 	err = tf.feature;
950c6fd2807SJeff Garzik 	if (r_err)
951c6fd2807SJeff Garzik 		*r_err = err;
952c6fd2807SJeff Garzik 
95393590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
9543f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
95593590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
9563f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
95793590859SAlan Cox 	else if (err == 1)
958c6fd2807SJeff Garzik 		/* do nothing */ ;
9593f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
960c6fd2807SJeff Garzik 		/* do nothing */ ;
961c6fd2807SJeff Garzik 	else
962c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
963c6fd2807SJeff Garzik 
964c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
965c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
966c6fd2807SJeff Garzik 
967d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
968d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
969d7fbee05STejun Heo 		 * have reported incorrect device signature too.
970d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
971d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
972d7fbee05STejun Heo 		 * failure.
973d7fbee05STejun Heo 		 */
974d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
975d7fbee05STejun Heo 			class = ATA_DEV_ATA;
976d7fbee05STejun Heo 		else
977d7fbee05STejun Heo 			class = ATA_DEV_NONE;
978d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979d7fbee05STejun Heo 		class = ATA_DEV_NONE;
980d7fbee05STejun Heo 
981c6fd2807SJeff Garzik 	return class;
982c6fd2807SJeff Garzik }
983c6fd2807SJeff Garzik 
984c6fd2807SJeff Garzik /**
985c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
986c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
987c6fd2807SJeff Garzik  *	@s: string into which data is output
988c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
989c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
990c6fd2807SJeff Garzik  *
991c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
992c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
993c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
994c6fd2807SJeff Garzik  *
995c6fd2807SJeff Garzik  *	LOCKING:
996c6fd2807SJeff Garzik  *	caller.
997c6fd2807SJeff Garzik  */
998c6fd2807SJeff Garzik 
999c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
1000c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
1001c6fd2807SJeff Garzik {
1002c6fd2807SJeff Garzik 	unsigned int c;
1003c6fd2807SJeff Garzik 
1004c6fd2807SJeff Garzik 	while (len > 0) {
1005c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
1006c6fd2807SJeff Garzik 		*s = c;
1007c6fd2807SJeff Garzik 		s++;
1008c6fd2807SJeff Garzik 
1009c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
1010c6fd2807SJeff Garzik 		*s = c;
1011c6fd2807SJeff Garzik 		s++;
1012c6fd2807SJeff Garzik 
1013c6fd2807SJeff Garzik 		ofs++;
1014c6fd2807SJeff Garzik 		len -= 2;
1015c6fd2807SJeff Garzik 	}
1016c6fd2807SJeff Garzik }
1017c6fd2807SJeff Garzik 
1018c6fd2807SJeff Garzik /**
1019c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1020c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1021c6fd2807SJeff Garzik  *	@s: string into which data is output
1022c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1023c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
1024c6fd2807SJeff Garzik  *
1025c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
1026c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
1027c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
1028c6fd2807SJeff Garzik  *
1029c6fd2807SJeff Garzik  *	LOCKING:
1030c6fd2807SJeff Garzik  *	caller.
1031c6fd2807SJeff Garzik  */
1032c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
1033c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
1034c6fd2807SJeff Garzik {
1035c6fd2807SJeff Garzik 	unsigned char *p;
1036c6fd2807SJeff Garzik 
1037c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
1038c6fd2807SJeff Garzik 
1039c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
1040c6fd2807SJeff Garzik 
1041c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
1042c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
1043c6fd2807SJeff Garzik 		p--;
1044c6fd2807SJeff Garzik 	*p = '\0';
1045c6fd2807SJeff Garzik }
1046c6fd2807SJeff Garzik 
1047db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
1048db6f8759STejun Heo {
1049db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
1050db6f8759STejun Heo 		if (ata_id_has_lba48(id))
1051db6f8759STejun Heo 			return ata_id_u64(id, 100);
1052db6f8759STejun Heo 		else
1053db6f8759STejun Heo 			return ata_id_u32(id, 60);
1054db6f8759STejun Heo 	} else {
1055db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
1056db6f8759STejun Heo 			return ata_id_u32(id, 57);
1057db6f8759STejun Heo 		else
1058db6f8759STejun Heo 			return id[1] * id[3] * id[6];
1059db6f8759STejun Heo 	}
1060db6f8759STejun Heo }
1061db6f8759STejun Heo 
10621e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
10631e999736SAlan Cox {
10641e999736SAlan Cox 	u64 sectors = 0;
10651e999736SAlan Cox 
10661e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
10671e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
10681e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
10691e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10701e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10711e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10721e999736SAlan Cox 
10731e999736SAlan Cox 	return ++sectors;
10741e999736SAlan Cox }
10751e999736SAlan Cox 
10761e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
10771e999736SAlan Cox {
10781e999736SAlan Cox 	u64 sectors = 0;
10791e999736SAlan Cox 
10801e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
10811e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10821e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10831e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10841e999736SAlan Cox 
10851e999736SAlan Cox 	return ++sectors;
10861e999736SAlan Cox }
10871e999736SAlan Cox 
10881e999736SAlan Cox /**
1089c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
1090c728a914STejun Heo  *	@dev: target device
1091c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
10921e999736SAlan Cox  *
1093c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
1094c728a914STejun Heo  *	question.
1095c728a914STejun Heo  *
1096c728a914STejun Heo  *	RETURNS:
1097c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
1098c728a914STejun Heo  *	-EIO on other errors.
10991e999736SAlan Cox  */
1100c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
11011e999736SAlan Cox {
1102c728a914STejun Heo 	unsigned int err_mask;
11031e999736SAlan Cox 	struct ata_taskfile tf;
1104c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11051e999736SAlan Cox 
11061e999736SAlan Cox 	ata_tf_init(dev, &tf);
11071e999736SAlan Cox 
1108c728a914STejun Heo 	/* always clear all address registers */
11091e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1110c728a914STejun Heo 
1111c728a914STejun Heo 	if (lba48) {
1112c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
1114c728a914STejun Heo 	} else
1115c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1116c728a914STejun Heo 
11171e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1118c728a914STejun Heo 	tf.device |= ATA_LBA;
11191e999736SAlan Cox 
11202b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1121c728a914STejun Heo 	if (err_mask) {
1122c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1124c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1125c728a914STejun Heo 			return -EACCES;
1126c728a914STejun Heo 		return -EIO;
1127c728a914STejun Heo 	}
1128c728a914STejun Heo 
1129c728a914STejun Heo 	if (lba48)
1130c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
1131c728a914STejun Heo 	else
1132c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
113393328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
113493328e11SAlan Cox 		(*max_sectors)--;
11351e999736SAlan Cox 	return 0;
11361e999736SAlan Cox }
11371e999736SAlan Cox 
11381e999736SAlan Cox /**
1139c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
1140c728a914STejun Heo  *	@dev: target device
11416b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
11421e999736SAlan Cox  *
1143c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
1144c728a914STejun Heo  *
1145c728a914STejun Heo  *	RETURNS:
1146c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
1147c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1148c728a914STejun Heo  *	errors.
11491e999736SAlan Cox  */
115005027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
11511e999736SAlan Cox {
1152c728a914STejun Heo 	unsigned int err_mask;
11531e999736SAlan Cox 	struct ata_taskfile tf;
1154c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11551e999736SAlan Cox 
11561e999736SAlan Cox 	new_sectors--;
11571e999736SAlan Cox 
11581e999736SAlan Cox 	ata_tf_init(dev, &tf);
11591e999736SAlan Cox 
1160c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
11611e999736SAlan Cox 
1162c728a914STejun Heo 	if (lba48) {
1163c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
1164c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
11651e999736SAlan Cox 
11661e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
11671e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
11681e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
11691e582ba4STejun Heo 	} else {
11701e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
1171c728a914STejun Heo 
11721e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
11731e582ba4STejun Heo 	}
11741e582ba4STejun Heo 
11751e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1176c728a914STejun Heo 	tf.device |= ATA_LBA;
11771e999736SAlan Cox 
11781e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
11791e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
11801e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
11811e999736SAlan Cox 
11822b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1183c728a914STejun Heo 	if (err_mask) {
1184c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1186c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
1187c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1188c728a914STejun Heo 			return -EACCES;
1189c728a914STejun Heo 		return -EIO;
1190c728a914STejun Heo 	}
1191c728a914STejun Heo 
11921e999736SAlan Cox 	return 0;
11931e999736SAlan Cox }
11941e999736SAlan Cox 
11951e999736SAlan Cox /**
11961e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
11971e999736SAlan Cox  *	@dev: Device to resize
11981e999736SAlan Cox  *
11991e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
12001e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
12011e999736SAlan Cox  *	the drive has the HPA feature set enabled.
120205027adcSTejun Heo  *
120305027adcSTejun Heo  *	RETURNS:
120405027adcSTejun Heo  *	0 on success, -errno on failure.
12051e999736SAlan Cox  */
120605027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
12071e999736SAlan Cox {
120805027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
120905027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
121005027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
121105027adcSTejun Heo 	u64 native_sectors;
1212c728a914STejun Heo 	int rc;
12131e999736SAlan Cox 
121405027adcSTejun Heo 	/* do we need to do it? */
121505027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
121605027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
121705027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1218c728a914STejun Heo 		return 0;
12191e999736SAlan Cox 
122005027adcSTejun Heo 	/* read native max address */
122105027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
122205027adcSTejun Heo 	if (rc) {
122305027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
122405027adcSTejun Heo 		 * resizing from the next try.
122505027adcSTejun Heo 		 */
122605027adcSTejun Heo 		if (!ata_ignore_hpa) {
122705027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
122805027adcSTejun Heo 				       "broken, will skip HPA handling\n");
122905027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
123005027adcSTejun Heo 
123105027adcSTejun Heo 			/* we can continue if device aborted the command */
123205027adcSTejun Heo 			if (rc == -EACCES)
123305027adcSTejun Heo 				rc = 0;
123405027adcSTejun Heo 		}
123505027adcSTejun Heo 
123605027adcSTejun Heo 		return rc;
123705027adcSTejun Heo 	}
123805027adcSTejun Heo 
123905027adcSTejun Heo 	/* nothing to do? */
124005027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
124105027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
124205027adcSTejun Heo 			return 0;
124305027adcSTejun Heo 
124405027adcSTejun Heo 		if (native_sectors > sectors)
12451e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
124605027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
124705027adcSTejun Heo 				(unsigned long long)sectors,
124805027adcSTejun Heo 				(unsigned long long)native_sectors);
124905027adcSTejun Heo 		else if (native_sectors < sectors)
125005027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
125105027adcSTejun Heo 				"native sectors (%llu) is smaller than "
125205027adcSTejun Heo 				"sectors (%llu)\n",
125305027adcSTejun Heo 				(unsigned long long)native_sectors,
125405027adcSTejun Heo 				(unsigned long long)sectors);
125505027adcSTejun Heo 		return 0;
12561e999736SAlan Cox 	}
125737301a55STejun Heo 
125805027adcSTejun Heo 	/* let's unlock HPA */
125905027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
126005027adcSTejun Heo 	if (rc == -EACCES) {
126105027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
126205027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
126305027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
126405027adcSTejun Heo 			       (unsigned long long)sectors,
126505027adcSTejun Heo 			       (unsigned long long)native_sectors);
126605027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
126705027adcSTejun Heo 		return 0;
126805027adcSTejun Heo 	} else if (rc)
126905027adcSTejun Heo 		return rc;
127005027adcSTejun Heo 
127105027adcSTejun Heo 	/* re-read IDENTIFY data */
127205027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
127305027adcSTejun Heo 	if (rc) {
127405027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
127505027adcSTejun Heo 			       "data after HPA resizing\n");
127605027adcSTejun Heo 		return rc;
127705027adcSTejun Heo 	}
127805027adcSTejun Heo 
127905027adcSTejun Heo 	if (print_info) {
128005027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
128105027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
128205027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
128305027adcSTejun Heo 			(unsigned long long)sectors,
128405027adcSTejun Heo 			(unsigned long long)new_sectors,
128505027adcSTejun Heo 			(unsigned long long)native_sectors);
128605027adcSTejun Heo 	}
128705027adcSTejun Heo 
128805027adcSTejun Heo 	return 0;
12891e999736SAlan Cox }
12901e999736SAlan Cox 
1291c6fd2807SJeff Garzik /**
1292c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1293c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1294c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1295c6fd2807SJeff Garzik  *
1296c6fd2807SJeff Garzik  *	This function performs no actual function.
1297c6fd2807SJeff Garzik  *
1298c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1299c6fd2807SJeff Garzik  *
1300c6fd2807SJeff Garzik  *	LOCKING:
1301c6fd2807SJeff Garzik  *	caller.
1302c6fd2807SJeff Garzik  */
1303c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1304c6fd2807SJeff Garzik {
1305c6fd2807SJeff Garzik }
1306c6fd2807SJeff Garzik 
1307c6fd2807SJeff Garzik 
1308c6fd2807SJeff Garzik /**
1309c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1310c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1311c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1312c6fd2807SJeff Garzik  *
1313c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1314c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1315c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1316c6fd2807SJeff Garzik  *
1317c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1318c6fd2807SJeff Garzik  *
1319c6fd2807SJeff Garzik  *	LOCKING:
1320c6fd2807SJeff Garzik  *	caller.
1321c6fd2807SJeff Garzik  */
1322c6fd2807SJeff Garzik 
1323c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1324c6fd2807SJeff Garzik {
1325c6fd2807SJeff Garzik 	u8 tmp;
1326c6fd2807SJeff Garzik 
1327c6fd2807SJeff Garzik 	if (device == 0)
1328c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1329c6fd2807SJeff Garzik 	else
1330c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331c6fd2807SJeff Garzik 
13320d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1333c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1334c6fd2807SJeff Garzik }
1335c6fd2807SJeff Garzik 
1336c6fd2807SJeff Garzik /**
1337c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1338c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1339c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1340c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1341c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1342c6fd2807SJeff Garzik  *
1343c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1344c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1345c6fd2807SJeff Garzik  *	ATA channel.
1346c6fd2807SJeff Garzik  *
1347c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1348c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1349c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1350c6fd2807SJeff Garzik  *
1351c6fd2807SJeff Garzik  *	LOCKING:
1352c6fd2807SJeff Garzik  *	caller.
1353c6fd2807SJeff Garzik  */
1354c6fd2807SJeff Garzik 
1355c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1356c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1357c6fd2807SJeff Garzik {
1358c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
135944877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
136044877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1361c6fd2807SJeff Garzik 
1362c6fd2807SJeff Garzik 	if (wait)
1363c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1364c6fd2807SJeff Garzik 
1365c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1366c6fd2807SJeff Garzik 
1367c6fd2807SJeff Garzik 	if (wait) {
13689af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1369c6fd2807SJeff Garzik 			msleep(150);
1370c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1371c6fd2807SJeff Garzik 	}
1372c6fd2807SJeff Garzik }
1373c6fd2807SJeff Garzik 
1374c6fd2807SJeff Garzik /**
1375c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1376c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1377c6fd2807SJeff Garzik  *
1378c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1379c6fd2807SJeff Garzik  *	page.
1380c6fd2807SJeff Garzik  *
1381c6fd2807SJeff Garzik  *	LOCKING:
1382c6fd2807SJeff Garzik  *	caller.
1383c6fd2807SJeff Garzik  */
1384c6fd2807SJeff Garzik 
1385c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1386c6fd2807SJeff Garzik {
1387c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1388c6fd2807SJeff Garzik 		"53==0x%04x  "
1389c6fd2807SJeff Garzik 		"63==0x%04x  "
1390c6fd2807SJeff Garzik 		"64==0x%04x  "
1391c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1392c6fd2807SJeff Garzik 		id[49],
1393c6fd2807SJeff Garzik 		id[53],
1394c6fd2807SJeff Garzik 		id[63],
1395c6fd2807SJeff Garzik 		id[64],
1396c6fd2807SJeff Garzik 		id[75]);
1397c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1398c6fd2807SJeff Garzik 		"81==0x%04x  "
1399c6fd2807SJeff Garzik 		"82==0x%04x  "
1400c6fd2807SJeff Garzik 		"83==0x%04x  "
1401c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1402c6fd2807SJeff Garzik 		id[80],
1403c6fd2807SJeff Garzik 		id[81],
1404c6fd2807SJeff Garzik 		id[82],
1405c6fd2807SJeff Garzik 		id[83],
1406c6fd2807SJeff Garzik 		id[84]);
1407c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1408c6fd2807SJeff Garzik 		"93==0x%04x\n",
1409c6fd2807SJeff Garzik 		id[88],
1410c6fd2807SJeff Garzik 		id[93]);
1411c6fd2807SJeff Garzik }
1412c6fd2807SJeff Garzik 
1413c6fd2807SJeff Garzik /**
1414c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1416c6fd2807SJeff Garzik  *
1417c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1418c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1419c6fd2807SJeff Garzik  *
1420c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1421c6fd2807SJeff Garzik  *
1422c6fd2807SJeff Garzik  *	LOCKING:
1423c6fd2807SJeff Garzik  *	None.
1424c6fd2807SJeff Garzik  *
1425c6fd2807SJeff Garzik  *	RETURNS:
1426c6fd2807SJeff Garzik  *	Computed xfermask
1427c6fd2807SJeff Garzik  */
14287dc951aeSTejun Heo unsigned long ata_id_xfermask(const u16 *id)
1429c6fd2807SJeff Garzik {
14307dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
1431c6fd2807SJeff Garzik 
1432c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1433c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1435c6fd2807SJeff Garzik 		pio_mask <<= 3;
1436c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1437c6fd2807SJeff Garzik 	} else {
1438c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1439c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1440c6fd2807SJeff Garzik 		 * a mask.
1441c6fd2807SJeff Garzik 		 */
14427a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
144346767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
144446767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
144546767aebSAlan Cox 		else
144646767aebSAlan Cox 			pio_mask = 1;
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1449c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1450c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1451c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1452c6fd2807SJeff Garzik 		 * will get this right anyway
1453c6fd2807SJeff Garzik 		 */
1454c6fd2807SJeff Garzik 	}
1455c6fd2807SJeff Garzik 
1456c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1457c6fd2807SJeff Garzik 
1458b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1459b352e57dSAlan Cox 		/*
1460b352e57dSAlan Cox 		 *	Process compact flash extended modes
1461b352e57dSAlan Cox 		 */
1462b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1463b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1464b352e57dSAlan Cox 
1465b352e57dSAlan Cox 		if (pio)
1466b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1467b352e57dSAlan Cox 		if (pio > 1)
1468b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1469b352e57dSAlan Cox 		if (dma)
1470b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1471b352e57dSAlan Cox 		if (dma > 1)
1472b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1473b352e57dSAlan Cox 	}
1474b352e57dSAlan Cox 
1475c6fd2807SJeff Garzik 	udma_mask = 0;
1476c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1478c6fd2807SJeff Garzik 
1479c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1480c6fd2807SJeff Garzik }
1481c6fd2807SJeff Garzik 
1482c6fd2807SJeff Garzik /**
1483c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1484c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1485c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
148665f27f38SDavid Howells  *	@data: data for @fn to use
1487c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1488c6fd2807SJeff Garzik  *
1489c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1490c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1491c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1492c6fd2807SJeff Garzik  *	one task is active at any given time.
1493c6fd2807SJeff Garzik  *
1494c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1495c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1496c6fd2807SJeff Garzik  *	synchronization.
1497c6fd2807SJeff Garzik  *
1498c6fd2807SJeff Garzik  *	LOCKING:
1499c6fd2807SJeff Garzik  *	Inherited from caller.
1500c6fd2807SJeff Garzik  */
150165f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1502c6fd2807SJeff Garzik 			 unsigned long delay)
1503c6fd2807SJeff Garzik {
150465f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
150565f27f38SDavid Howells 	ap->port_task_data = data;
1506c6fd2807SJeff Garzik 
150745a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
150845a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1509c6fd2807SJeff Garzik }
1510c6fd2807SJeff Garzik 
1511c6fd2807SJeff Garzik /**
1512c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1513c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1514c6fd2807SJeff Garzik  *
1515c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1516c6fd2807SJeff Garzik  *	be running or scheduled.
1517c6fd2807SJeff Garzik  *
1518c6fd2807SJeff Garzik  *	LOCKING:
1519c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1520c6fd2807SJeff Garzik  */
1521c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1522c6fd2807SJeff Garzik {
1523c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1524c6fd2807SJeff Garzik 
152545a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1526c6fd2807SJeff Garzik 
1527c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1528c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1529c6fd2807SJeff Garzik }
1530c6fd2807SJeff Garzik 
15317102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1532c6fd2807SJeff Garzik {
1533c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1534c6fd2807SJeff Garzik 
1535c6fd2807SJeff Garzik 	complete(waiting);
1536c6fd2807SJeff Garzik }
1537c6fd2807SJeff Garzik 
1538c6fd2807SJeff Garzik /**
15392432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1540c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1541c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1542c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1543c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
15445c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
15452432697bSTejun Heo  *	@n_elem: Number of sg entries
15462b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1547c6fd2807SJeff Garzik  *
1548c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1549c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1550c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1551c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1552c6fd2807SJeff Garzik  *	clean up after timeout.
1553c6fd2807SJeff Garzik  *
1554c6fd2807SJeff Garzik  *	LOCKING:
1555c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1556c6fd2807SJeff Garzik  *
1557c6fd2807SJeff Garzik  *	RETURNS:
1558c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1559c6fd2807SJeff Garzik  */
15602432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1561c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
156287260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
15632b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1564c6fd2807SJeff Garzik {
15659af5c9c9STejun Heo 	struct ata_link *link = dev->link;
15669af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1567c6fd2807SJeff Garzik 	u8 command = tf->command;
1568c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1569c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1570c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1571da917d69STejun Heo 	int preempted_nr_active_links;
1572c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1573c6fd2807SJeff Garzik 	unsigned long flags;
1574c6fd2807SJeff Garzik 	unsigned int err_mask;
1575c6fd2807SJeff Garzik 	int rc;
1576c6fd2807SJeff Garzik 
1577c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1578c6fd2807SJeff Garzik 
1579c6fd2807SJeff Garzik 	/* no internal command while frozen */
1580c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1581c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1582c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1583c6fd2807SJeff Garzik 	}
1584c6fd2807SJeff Garzik 
1585c6fd2807SJeff Garzik 	/* initialize internal qc */
1586c6fd2807SJeff Garzik 
1587c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1588c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1589c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1590c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1591c6fd2807SJeff Garzik 	 */
1592c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1593c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1594c6fd2807SJeff Garzik 	else
1595c6fd2807SJeff Garzik 		tag = 0;
1596c6fd2807SJeff Garzik 
1597c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1598c6fd2807SJeff Garzik 		BUG();
1599c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1600c6fd2807SJeff Garzik 
1601c6fd2807SJeff Garzik 	qc->tag = tag;
1602c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1603c6fd2807SJeff Garzik 	qc->ap = ap;
1604c6fd2807SJeff Garzik 	qc->dev = dev;
1605c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1606c6fd2807SJeff Garzik 
16079af5c9c9STejun Heo 	preempted_tag = link->active_tag;
16089af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1609c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1610da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
16119af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
16129af5c9c9STejun Heo 	link->sactive = 0;
1613c6fd2807SJeff Garzik 	ap->qc_active = 0;
1614da917d69STejun Heo 	ap->nr_active_links = 0;
1615c6fd2807SJeff Garzik 
1616c6fd2807SJeff Garzik 	/* prepare & issue qc */
1617c6fd2807SJeff Garzik 	qc->tf = *tf;
1618c6fd2807SJeff Garzik 	if (cdb)
1619c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1620c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1621c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1622c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
16232432697bSTejun Heo 		unsigned int i, buflen = 0;
162487260216SJens Axboe 		struct scatterlist *sg;
16252432697bSTejun Heo 
162687260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
162787260216SJens Axboe 			buflen += sg->length;
16282432697bSTejun Heo 
162987260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
163049c80429SBrian King 		qc->nbytes = buflen;
1631c6fd2807SJeff Garzik 	}
1632c6fd2807SJeff Garzik 
1633c6fd2807SJeff Garzik 	qc->private_data = &wait;
1634c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1635c6fd2807SJeff Garzik 
1636c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1637c6fd2807SJeff Garzik 
1638c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1639c6fd2807SJeff Garzik 
16402b789108STejun Heo 	if (!timeout)
16412b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
16422b789108STejun Heo 
16432b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1644c6fd2807SJeff Garzik 
1645c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1646c6fd2807SJeff Garzik 
1647c6fd2807SJeff Garzik 	if (!rc) {
1648c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1649c6fd2807SJeff Garzik 
1650c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1651c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1652c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1653c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1654c6fd2807SJeff Garzik 		 */
1655c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1656c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1657c6fd2807SJeff Garzik 
1658c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1659c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1660c6fd2807SJeff Garzik 			else
1661c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1662c6fd2807SJeff Garzik 
1663c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1664c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1665c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1666c6fd2807SJeff Garzik 		}
1667c6fd2807SJeff Garzik 
1668c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1669c6fd2807SJeff Garzik 	}
1670c6fd2807SJeff Garzik 
1671c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1672c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1673c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1674c6fd2807SJeff Garzik 
1675a51d644aSTejun Heo 	/* perform minimal error analysis */
1676a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1677a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1678a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1679a51d644aSTejun Heo 
1680a51d644aSTejun Heo 		if (!qc->err_mask)
1681c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1682a51d644aSTejun Heo 
1683a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1684a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1685c6fd2807SJeff Garzik 	}
1686c6fd2807SJeff Garzik 
1687c6fd2807SJeff Garzik 	/* finish up */
1688c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1689c6fd2807SJeff Garzik 
1690c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1691c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1692c6fd2807SJeff Garzik 
1693c6fd2807SJeff Garzik 	ata_qc_free(qc);
16949af5c9c9STejun Heo 	link->active_tag = preempted_tag;
16959af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1696c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1697da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1698c6fd2807SJeff Garzik 
1699c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1700c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1701c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1702c6fd2807SJeff Garzik 	 * port.
1703c6fd2807SJeff Garzik 	 *
1704c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1705c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1706c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1707c6fd2807SJeff Garzik 	 *
1708c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1709c6fd2807SJeff Garzik 	 */
1710c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1711c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1712c6fd2807SJeff Garzik 		ata_port_probe(ap);
1713c6fd2807SJeff Garzik 	}
1714c6fd2807SJeff Garzik 
1715c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1716c6fd2807SJeff Garzik 
1717c6fd2807SJeff Garzik 	return err_mask;
1718c6fd2807SJeff Garzik }
1719c6fd2807SJeff Garzik 
1720c6fd2807SJeff Garzik /**
172133480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
17222432697bSTejun Heo  *	@dev: Device to which the command is sent
17232432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
17242432697bSTejun Heo  *	@cdb: CDB for packet command
17252432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
17262432697bSTejun Heo  *	@buf: Data buffer of the command
17272432697bSTejun Heo  *	@buflen: Length of data buffer
17282b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
17292432697bSTejun Heo  *
17302432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
17312432697bSTejun Heo  *	buffer instead of sg list.
17322432697bSTejun Heo  *
17332432697bSTejun Heo  *	LOCKING:
17342432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
17352432697bSTejun Heo  *
17362432697bSTejun Heo  *	RETURNS:
17372432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
17382432697bSTejun Heo  */
17392432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
17402432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
17412b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
17422b789108STejun Heo 			   unsigned long timeout)
17432432697bSTejun Heo {
174433480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
174533480a0eSTejun Heo 	unsigned int n_elem = 0;
17462432697bSTejun Heo 
174733480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
174833480a0eSTejun Heo 		WARN_ON(!buf);
17492432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
175033480a0eSTejun Heo 		psg = &sg;
175133480a0eSTejun Heo 		n_elem++;
175233480a0eSTejun Heo 	}
17532432697bSTejun Heo 
17542b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
17552b789108STejun Heo 				    timeout);
17562432697bSTejun Heo }
17572432697bSTejun Heo 
17582432697bSTejun Heo /**
1759c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1760c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1761c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1762c6fd2807SJeff Garzik  *
1763c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1764c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1765c6fd2807SJeff Garzik  *
1766c6fd2807SJeff Garzik  *	LOCKING:
1767c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1768c6fd2807SJeff Garzik  *
1769c6fd2807SJeff Garzik  *	RETURNS:
1770c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1771c6fd2807SJeff Garzik  */
1772c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1773c6fd2807SJeff Garzik {
1774c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1775c6fd2807SJeff Garzik 
1776c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1777c6fd2807SJeff Garzik 
1778c6fd2807SJeff Garzik 	tf.command = cmd;
1779c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1780c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1781c6fd2807SJeff Garzik 
17822b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1783c6fd2807SJeff Garzik }
1784c6fd2807SJeff Garzik 
1785c6fd2807SJeff Garzik /**
1786c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1787c6fd2807SJeff Garzik  *	@adev: ATA device
1788c6fd2807SJeff Garzik  *
1789c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1790c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1791c6fd2807SJeff Garzik  */
1792c6fd2807SJeff Garzik 
1793c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1794c6fd2807SJeff Garzik {
1795432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1796432729f0SAlan Cox 	   as the caller should know this */
17979af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1798c6fd2807SJeff Garzik 		return 0;
1799432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1800432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1801c6fd2807SJeff Garzik 		return 1;
1802432729f0SAlan Cox 	/* We turn it on when possible */
1803432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1804432729f0SAlan Cox 		return 1;
1805432729f0SAlan Cox 	return 0;
1806432729f0SAlan Cox }
1807c6fd2807SJeff Garzik 
1808432729f0SAlan Cox /**
1809432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1810432729f0SAlan Cox  *	@adev: ATA device
1811432729f0SAlan Cox  *
1812432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1813432729f0SAlan Cox  *	-1 if no iordy mode is available.
1814432729f0SAlan Cox  */
1815432729f0SAlan Cox 
1816432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817432729f0SAlan Cox {
1818c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1819c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1820432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1821c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1822c6fd2807SJeff Garzik 		if (pio) {
1823c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1824c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1825432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1826432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1827c6fd2807SJeff Garzik 		}
1828c6fd2807SJeff Garzik 	}
1829432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1830c6fd2807SJeff Garzik }
1831c6fd2807SJeff Garzik 
1832c6fd2807SJeff Garzik /**
1833c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1834c6fd2807SJeff Garzik  *	@dev: target device
1835c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1836bff04647STejun Heo  *	@flags: ATA_READID_* flags
1837c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1838c6fd2807SJeff Garzik  *
1839c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1840c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1841c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1842c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1843c6fd2807SJeff Garzik  *
184450a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
184550a99018SAlan Cox  *	now we abort if we hit that case.
184650a99018SAlan Cox  *
1847c6fd2807SJeff Garzik  *	LOCKING:
1848c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1849c6fd2807SJeff Garzik  *
1850c6fd2807SJeff Garzik  *	RETURNS:
1851c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1852c6fd2807SJeff Garzik  */
1853c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1854bff04647STejun Heo 		    unsigned int flags, u16 *id)
1855c6fd2807SJeff Garzik {
18569af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1857c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1858c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1859c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1860c6fd2807SJeff Garzik 	const char *reason;
186154936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1862c6fd2807SJeff Garzik 	int rc;
1863c6fd2807SJeff Garzik 
1864c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
186544877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1866c6fd2807SJeff Garzik 
1867c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1868c6fd2807SJeff Garzik  retry:
1869c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1870c6fd2807SJeff Garzik 
1871c6fd2807SJeff Garzik 	switch (class) {
1872c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1873c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1874c6fd2807SJeff Garzik 		break;
1875c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1876c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1877c6fd2807SJeff Garzik 		break;
1878c6fd2807SJeff Garzik 	default:
1879c6fd2807SJeff Garzik 		rc = -ENODEV;
1880c6fd2807SJeff Garzik 		reason = "unsupported class";
1881c6fd2807SJeff Garzik 		goto err_out;
1882c6fd2807SJeff Garzik 	}
1883c6fd2807SJeff Garzik 
1884c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
188581afe893STejun Heo 
188681afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
188781afe893STejun Heo 	 * sure those are properly initialized.
188881afe893STejun Heo 	 */
188981afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
189081afe893STejun Heo 
189181afe893STejun Heo 	/* Device presence detection is unreliable on some
189281afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
189381afe893STejun Heo 	 */
189481afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1895c6fd2807SJeff Garzik 
1896c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
18972b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1898c6fd2807SJeff Garzik 	if (err_mask) {
1899800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
190055a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
190144877b4eSTejun Heo 				ap->print_id, dev->devno);
190255a8e2c8STejun Heo 			return -ENOENT;
190355a8e2c8STejun Heo 		}
190455a8e2c8STejun Heo 
190554936f8bSTejun Heo 		/* Device or controller might have reported the wrong
190654936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
190754936f8bSTejun Heo 		 * the current one is aborted by the device.
190854936f8bSTejun Heo 		 */
190954936f8bSTejun Heo 		if (may_fallback &&
191054936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
191154936f8bSTejun Heo 			may_fallback = 0;
191254936f8bSTejun Heo 
191354936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
191454936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
191554936f8bSTejun Heo 			else
191654936f8bSTejun Heo 				class = ATA_DEV_ATA;
191754936f8bSTejun Heo 			goto retry;
191854936f8bSTejun Heo 		}
191954936f8bSTejun Heo 
1920c6fd2807SJeff Garzik 		rc = -EIO;
1921c6fd2807SJeff Garzik 		reason = "I/O error";
1922c6fd2807SJeff Garzik 		goto err_out;
1923c6fd2807SJeff Garzik 	}
1924c6fd2807SJeff Garzik 
192554936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
192654936f8bSTejun Heo 	 * successfully at least once.
192754936f8bSTejun Heo 	 */
192854936f8bSTejun Heo 	may_fallback = 0;
192954936f8bSTejun Heo 
1930c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1931c6fd2807SJeff Garzik 
1932c6fd2807SJeff Garzik 	/* sanity check */
1933c6fd2807SJeff Garzik 	rc = -EINVAL;
19346070068bSAlan Cox 	reason = "device reports invalid type";
19354a3381feSJeff Garzik 
19364a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
19374a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
19384a3381feSJeff Garzik 			goto err_out;
19394a3381feSJeff Garzik 	} else {
19404a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1941c6fd2807SJeff Garzik 			goto err_out;
1942c6fd2807SJeff Garzik 	}
1943c6fd2807SJeff Garzik 
1944169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1945169439c2SMark Lord 		tried_spinup = 1;
1946169439c2SMark Lord 		/*
1947169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1948169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1949169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1950169439c2SMark Lord 		 */
1951218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1952fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1953169439c2SMark Lord 			rc = -EIO;
1954169439c2SMark Lord 			reason = "SPINUP failed";
1955169439c2SMark Lord 			goto err_out;
1956169439c2SMark Lord 		}
1957169439c2SMark Lord 		/*
1958169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1959169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1960169439c2SMark Lord 		 */
1961169439c2SMark Lord 		if (id[2] == 0x37c8)
1962169439c2SMark Lord 			goto retry;
1963169439c2SMark Lord 	}
1964169439c2SMark Lord 
1965bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1966c6fd2807SJeff Garzik 		/*
1967c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1968c6fd2807SJeff Garzik 		 * SRST RESET
196950a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
197050a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1971c6fd2807SJeff Garzik 		 * anything else..
1972c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
197350a99018SAlan Cox 		 *
197450a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
197550a99018SAlan Cox 		 * shoud never trigger.
1976c6fd2807SJeff Garzik 		 */
1977c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1978c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1979c6fd2807SJeff Garzik 			if (err_mask) {
1980c6fd2807SJeff Garzik 				rc = -EIO;
1981c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1982c6fd2807SJeff Garzik 				goto err_out;
1983c6fd2807SJeff Garzik 			}
1984c6fd2807SJeff Garzik 
1985c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1986c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1987c6fd2807SJeff Garzik 			 */
1988bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1989c6fd2807SJeff Garzik 			goto retry;
1990c6fd2807SJeff Garzik 		}
1991c6fd2807SJeff Garzik 	}
1992c6fd2807SJeff Garzik 
1993c6fd2807SJeff Garzik 	*p_class = class;
1994c6fd2807SJeff Garzik 
1995c6fd2807SJeff Garzik 	return 0;
1996c6fd2807SJeff Garzik 
1997c6fd2807SJeff Garzik  err_out:
1998c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1999c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2000c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2001c6fd2807SJeff Garzik 	return rc;
2002c6fd2807SJeff Garzik }
2003c6fd2807SJeff Garzik 
2004c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
2005c6fd2807SJeff Garzik {
20069af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20079af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2008c6fd2807SJeff Garzik }
2009c6fd2807SJeff Garzik 
2010c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
2011c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
2012c6fd2807SJeff Garzik {
20139af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2014c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2015c6fd2807SJeff Garzik 
2016c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
2017c6fd2807SJeff Garzik 		desc[0] = '\0';
2018c6fd2807SJeff Garzik 		return;
2019c6fd2807SJeff Garzik 	}
202075683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
20216919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
20226919a0a6SAlan Cox 		return;
20236919a0a6SAlan Cox 	}
2024c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
2025cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2026c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
2027c6fd2807SJeff Garzik 	}
2028c6fd2807SJeff Garzik 
2029c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
2030c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2031c6fd2807SJeff Garzik 	else
2032c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2033c6fd2807SJeff Garzik }
2034c6fd2807SJeff Garzik 
2035c6fd2807SJeff Garzik /**
2036c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2037c6fd2807SJeff Garzik  *	@dev: Target device to configure
2038c6fd2807SJeff Garzik  *
2039c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
2040c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
2041c6fd2807SJeff Garzik  *
2042c6fd2807SJeff Garzik  *	LOCKING:
2043c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2044c6fd2807SJeff Garzik  *
2045c6fd2807SJeff Garzik  *	RETURNS:
2046c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
2047c6fd2807SJeff Garzik  */
2048efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
2049c6fd2807SJeff Garzik {
20509af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20519af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
20526746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2053c6fd2807SJeff Garzik 	const u16 *id = dev->id;
20547dc951aeSTejun Heo 	unsigned long xfer_mask;
2055b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
20563f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
20573f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
2058c6fd2807SJeff Garzik 	int rc;
2059c6fd2807SJeff Garzik 
2060c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
206144877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
206244877b4eSTejun Heo 			       __FUNCTION__);
2063c6fd2807SJeff Garzik 		return 0;
2064c6fd2807SJeff Garzik 	}
2065c6fd2807SJeff Garzik 
2066c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
206744877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2068c6fd2807SJeff Garzik 
206975683fe7STejun Heo 	/* set horkage */
207075683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
207175683fe7STejun Heo 
20726746544cSTejun Heo 	/* let ACPI work its magic */
20736746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
20746746544cSTejun Heo 	if (rc)
20756746544cSTejun Heo 		return rc;
207608573a86SKristen Carlson Accardi 
207705027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
207805027adcSTejun Heo 	rc = ata_hpa_resize(dev);
207905027adcSTejun Heo 	if (rc)
208005027adcSTejun Heo 		return rc;
208105027adcSTejun Heo 
2082c6fd2807SJeff Garzik 	/* print device capabilities */
2083c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2084c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2085c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2086c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2087c6fd2807SJeff Garzik 			       __FUNCTION__,
2088c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
2089c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
2090c6fd2807SJeff Garzik 
2091c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
2092c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2093c6fd2807SJeff Garzik 	dev->max_sectors = 0;
2094c6fd2807SJeff Garzik 	dev->cdb_len = 0;
2095c6fd2807SJeff Garzik 	dev->n_sectors = 0;
2096c6fd2807SJeff Garzik 	dev->cylinders = 0;
2097c6fd2807SJeff Garzik 	dev->heads = 0;
2098c6fd2807SJeff Garzik 	dev->sectors = 0;
2099c6fd2807SJeff Garzik 
2100c6fd2807SJeff Garzik 	/*
2101c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
2102c6fd2807SJeff Garzik 	 */
2103c6fd2807SJeff Garzik 
2104c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
2105c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
2106c6fd2807SJeff Garzik 
2107c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2108c6fd2807SJeff Garzik 		ata_dump_id(id);
2109c6fd2807SJeff Garzik 
2110ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2111ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2112ef143d57SAlbert Lee 			sizeof(fwrevbuf));
2113ef143d57SAlbert Lee 
2114ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2115ef143d57SAlbert Lee 			sizeof(modelbuf));
2116ef143d57SAlbert Lee 
2117c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
2118c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
2119b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
2120b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
212144877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
212244877b4eSTejun Heo 					       "supports DRM functions and may "
212344877b4eSTejun Heo 					       "not be fully accessable.\n");
2124b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
2125ae8d4ee7SAlan Cox 		} else {
2126b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2127ae8d4ee7SAlan Cox 			/* Warn the user if the device has TPM extensions */
2128ae8d4ee7SAlan Cox 			if (ata_id_has_tpm(id))
2129ae8d4ee7SAlan Cox 				ata_dev_printk(dev, KERN_WARNING,
2130ae8d4ee7SAlan Cox 					       "supports DRM functions and may "
2131ae8d4ee7SAlan Cox 					       "not be fully accessable.\n");
2132ae8d4ee7SAlan Cox 		}
2133b352e57dSAlan Cox 
2134c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
2135c6fd2807SJeff Garzik 
21363f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
21373f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
21383f64f565SEric D. Mudama 
2139c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
2140c6fd2807SJeff Garzik 			const char *lba_desc;
2141c6fd2807SJeff Garzik 			char ncq_desc[20];
2142c6fd2807SJeff Garzik 
2143c6fd2807SJeff Garzik 			lba_desc = "LBA";
2144c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
2145c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
2146c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
2147c6fd2807SJeff Garzik 				lba_desc = "LBA48";
21486fc49adbSTejun Heo 
21496fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
21506fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
21516fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2152c6fd2807SJeff Garzik 			}
2153c6fd2807SJeff Garzik 
2154c6fd2807SJeff Garzik 			/* config NCQ */
2155c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2156c6fd2807SJeff Garzik 
2157c6fd2807SJeff Garzik 			/* print device info to dmesg */
21583f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
21593f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21603f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21613f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
21623f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21633f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21643f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2165c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
21663f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
21673f64f565SEric D. Mudama 			}
2168c6fd2807SJeff Garzik 		} else {
2169c6fd2807SJeff Garzik 			/* CHS */
2170c6fd2807SJeff Garzik 
2171c6fd2807SJeff Garzik 			/* Default translation */
2172c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2173c6fd2807SJeff Garzik 			dev->heads	= id[3];
2174c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2175c6fd2807SJeff Garzik 
2176c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2177c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2178c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2179c6fd2807SJeff Garzik 				dev->heads     = id[55];
2180c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2181c6fd2807SJeff Garzik 			}
2182c6fd2807SJeff Garzik 
2183c6fd2807SJeff Garzik 			/* print device info to dmesg */
21843f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2185c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
21863f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21873f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
21883f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21893f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21903f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
21913f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
21923f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
21933f64f565SEric D. Mudama 					dev->heads, dev->sectors);
21943f64f565SEric D. Mudama 			}
2195c6fd2807SJeff Garzik 		}
2196c6fd2807SJeff Garzik 
2197c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2198c6fd2807SJeff Garzik 	}
2199c6fd2807SJeff Garzik 
2200c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2201c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2202854c73a2STejun Heo 		const char *cdb_intr_string = "";
2203854c73a2STejun Heo 		const char *atapi_an_string = "";
22047d77b247STejun Heo 		u32 sntf;
2205c6fd2807SJeff Garzik 
2206c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2207c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2208c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2209c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2210c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2211c6fd2807SJeff Garzik 			rc = -EINVAL;
2212c6fd2807SJeff Garzik 			goto err_out_nosup;
2213c6fd2807SJeff Garzik 		}
2214c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2215c6fd2807SJeff Garzik 
22167d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
22177d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
22187d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
22197d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
22209f45cbd3SKristen Carlson Accardi 		 */
22217d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
22227d77b247STejun Heo 		    (!ap->nr_pmp_links ||
22237d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2224854c73a2STejun Heo 			unsigned int err_mask;
2225854c73a2STejun Heo 
22269f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2227218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2228218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2229854c73a2STejun Heo 			if (err_mask)
22309f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2231854c73a2STejun Heo 					"failed to enable ATAPI AN "
2232854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2233854c73a2STejun Heo 			else {
22349f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2235854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2236854c73a2STejun Heo 			}
22379f45cbd3SKristen Carlson Accardi 		}
22389f45cbd3SKristen Carlson Accardi 
2239c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2240c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2241c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2242c6fd2807SJeff Garzik 		}
2243c6fd2807SJeff Garzik 
2244c6fd2807SJeff Garzik 		/* print device info to dmesg */
2245c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2246ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2247854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2248ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2249c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2250854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2251c6fd2807SJeff Garzik 	}
2252c6fd2807SJeff Garzik 
2253914ed354STejun Heo 	/* determine max_sectors */
2254914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2255914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2256914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2257914ed354STejun Heo 
2258ca77329fSKristen Carlson Accardi 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2259ca77329fSKristen Carlson Accardi 		if (ata_id_has_hipm(dev->id))
2260ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_HIPM;
2261ca77329fSKristen Carlson Accardi 		if (ata_id_has_dipm(dev->id))
2262ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_DIPM;
2263ca77329fSKristen Carlson Accardi 	}
2264ca77329fSKristen Carlson Accardi 
226593590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
226693590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
226793590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
226893590859SAlan Cox 		   idiot */
226993590859SAlan Cox 		if (print_info) {
227093590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
227193590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
227293590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
227393590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
227493590859SAlan Cox 		}
227593590859SAlan Cox 	}
227693590859SAlan Cox 
2277c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2278c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2279c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2280c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2281c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2282c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2283c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2284c6fd2807SJeff Garzik 	}
2285c6fd2807SJeff Garzik 
2286f8d8e579STony Battersby 	if ((dev->class == ATA_DEV_ATAPI) &&
2287f442cd86SAlbert Lee 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2288f8d8e579STony Battersby 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2289f442cd86SAlbert Lee 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2290f442cd86SAlbert Lee 	}
2291f8d8e579STony Battersby 
229275683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
229303ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
229403ec52deSTejun Heo 					 dev->max_sectors);
229518d6e9d5SAlbert Lee 
2296ca77329fSKristen Carlson Accardi 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2297ca77329fSKristen Carlson Accardi 		dev->horkage |= ATA_HORKAGE_IPM;
2298ca77329fSKristen Carlson Accardi 
2299ca77329fSKristen Carlson Accardi 		/* reset link pm_policy for this port to no pm */
2300ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
2301ca77329fSKristen Carlson Accardi 	}
2302ca77329fSKristen Carlson Accardi 
2303c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2304cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2305c6fd2807SJeff Garzik 
2306c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2307c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2308c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2309c6fd2807SJeff Garzik 	return 0;
2310c6fd2807SJeff Garzik 
2311c6fd2807SJeff Garzik err_out_nosup:
2312c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2313c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2314c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2315c6fd2807SJeff Garzik 	return rc;
2316c6fd2807SJeff Garzik }
2317c6fd2807SJeff Garzik 
2318c6fd2807SJeff Garzik /**
23192e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2320be0d18dfSAlan Cox  *	@ap: port
2321be0d18dfSAlan Cox  *
23222e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2323be0d18dfSAlan Cox  *	detection.
2324be0d18dfSAlan Cox  */
2325be0d18dfSAlan Cox 
2326be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2327be0d18dfSAlan Cox {
2328be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2329be0d18dfSAlan Cox }
2330be0d18dfSAlan Cox 
2331be0d18dfSAlan Cox /**
23322e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2333be0d18dfSAlan Cox  *	@ap: port
2334be0d18dfSAlan Cox  *
23352e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2336be0d18dfSAlan Cox  *	detection.
2337be0d18dfSAlan Cox  */
2338be0d18dfSAlan Cox 
2339be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2340be0d18dfSAlan Cox {
2341be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2342be0d18dfSAlan Cox }
2343be0d18dfSAlan Cox 
2344be0d18dfSAlan Cox /**
2345be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2346be0d18dfSAlan Cox  *	@ap: port
2347be0d18dfSAlan Cox  *
2348be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2349be0d18dfSAlan Cox  */
2350be0d18dfSAlan Cox 
2351be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2352be0d18dfSAlan Cox {
2353be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2354be0d18dfSAlan Cox }
2355be0d18dfSAlan Cox 
2356be0d18dfSAlan Cox /**
2357c88f90c3STejun Heo  *	ata_cable_ignore	-	return ignored PATA cable.
2358c88f90c3STejun Heo  *	@ap: port
2359c88f90c3STejun Heo  *
2360c88f90c3STejun Heo  *	Helper method for drivers which don't use cable type to limit
2361c88f90c3STejun Heo  *	transfer mode.
2362c88f90c3STejun Heo  */
2363c88f90c3STejun Heo int ata_cable_ignore(struct ata_port *ap)
2364c88f90c3STejun Heo {
2365c88f90c3STejun Heo 	return ATA_CBL_PATA_IGN;
2366c88f90c3STejun Heo }
2367c88f90c3STejun Heo 
2368c88f90c3STejun Heo /**
2369be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2370be0d18dfSAlan Cox  *	@ap: port
2371be0d18dfSAlan Cox  *
2372be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2373be0d18dfSAlan Cox  */
2374be0d18dfSAlan Cox 
2375be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2376be0d18dfSAlan Cox {
2377be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2378be0d18dfSAlan Cox }
2379be0d18dfSAlan Cox 
2380be0d18dfSAlan Cox /**
2381c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2382c6fd2807SJeff Garzik  *	@ap: Bus to probe
2383c6fd2807SJeff Garzik  *
2384c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2385c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2386c6fd2807SJeff Garzik  *	the bus.
2387c6fd2807SJeff Garzik  *
2388c6fd2807SJeff Garzik  *	LOCKING:
2389c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2390c6fd2807SJeff Garzik  *
2391c6fd2807SJeff Garzik  *	RETURNS:
2392c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2393c6fd2807SJeff Garzik  */
2394c6fd2807SJeff Garzik 
2395c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2396c6fd2807SJeff Garzik {
2397c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2398c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2399f58229f8STejun Heo 	int rc;
2400c6fd2807SJeff Garzik 	struct ata_device *dev;
2401c6fd2807SJeff Garzik 
2402c6fd2807SJeff Garzik 	ata_port_probe(ap);
2403c6fd2807SJeff Garzik 
2404f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2405f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2406c6fd2807SJeff Garzik 
2407c6fd2807SJeff Garzik  retry:
2408cdeab114STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2409cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2410cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2411cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2412cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2413cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2414cdeab114STejun Heo 		 * bus as we may be talking too fast.
2415cdeab114STejun Heo 		 */
2416cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2417cdeab114STejun Heo 
2418cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2419cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2420cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2421cdeab114STejun Heo 		 * configuring devices.
2422cdeab114STejun Heo 		 */
2423cdeab114STejun Heo 		if (ap->ops->set_piomode)
2424cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2425cdeab114STejun Heo 	}
2426cdeab114STejun Heo 
2427c6fd2807SJeff Garzik 	/* reset and determine device classes */
2428c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2429c6fd2807SJeff Garzik 
2430f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2431c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2432c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2433c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2434c6fd2807SJeff Garzik 		else
2435c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2436c6fd2807SJeff Garzik 
2437c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2438c6fd2807SJeff Garzik 	}
2439c6fd2807SJeff Garzik 
2440c6fd2807SJeff Garzik 	ata_port_probe(ap);
2441c6fd2807SJeff Garzik 
2442f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2443f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2444f31f0cc2SJeff Garzik 	   the slave device */
2445f31f0cc2SJeff Garzik 
2446f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2447f58229f8STejun Heo 		if (tries[dev->devno])
2448f58229f8STejun Heo 			dev->class = classes[dev->devno];
2449c6fd2807SJeff Garzik 
2450c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2451c6fd2807SJeff Garzik 			continue;
2452c6fd2807SJeff Garzik 
2453bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2454bff04647STejun Heo 				     dev->id);
2455c6fd2807SJeff Garzik 		if (rc)
2456c6fd2807SJeff Garzik 			goto fail;
2457f31f0cc2SJeff Garzik 	}
2458f31f0cc2SJeff Garzik 
2459be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2460be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2461be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2462be0d18dfSAlan Cox 
2463614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2464614fe29bSAlan Cox 	   reported cable types and sensed types */
2465614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2466614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2467614fe29bSAlan Cox 			continue;
2468614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2469614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2470614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2471614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2472614fe29bSAlan Cox 	}
2473614fe29bSAlan Cox 
2474f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2475f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2476f31f0cc2SJeff Garzik 
2477f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2478f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2479f31f0cc2SJeff Garzik 			continue;
2480c6fd2807SJeff Garzik 
24819af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2482efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
24839af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2484c6fd2807SJeff Garzik 		if (rc)
2485c6fd2807SJeff Garzik 			goto fail;
2486c6fd2807SJeff Garzik 	}
2487c6fd2807SJeff Garzik 
2488c6fd2807SJeff Garzik 	/* configure transfer mode */
24890260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
24904ae72a1eSTejun Heo 	if (rc)
2491c6fd2807SJeff Garzik 		goto fail;
2492c6fd2807SJeff Garzik 
2493f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2494f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2495c6fd2807SJeff Garzik 			return 0;
2496c6fd2807SJeff Garzik 
2497c6fd2807SJeff Garzik 	/* no device present, disable port */
2498c6fd2807SJeff Garzik 	ata_port_disable(ap);
2499c6fd2807SJeff Garzik 	return -ENODEV;
2500c6fd2807SJeff Garzik 
2501c6fd2807SJeff Garzik  fail:
25024ae72a1eSTejun Heo 	tries[dev->devno]--;
25034ae72a1eSTejun Heo 
2504c6fd2807SJeff Garzik 	switch (rc) {
2505c6fd2807SJeff Garzik 	case -EINVAL:
25064ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2507c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2508c6fd2807SJeff Garzik 		break;
25094ae72a1eSTejun Heo 
25104ae72a1eSTejun Heo 	case -ENODEV:
25114ae72a1eSTejun Heo 		/* give it just one more chance */
25124ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2513c6fd2807SJeff Garzik 	case -EIO:
25144ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
25154ae72a1eSTejun Heo 			/* This is the last chance, better to slow
25164ae72a1eSTejun Heo 			 * down than lose it.
25174ae72a1eSTejun Heo 			 */
2518936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
25194ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
25204ae72a1eSTejun Heo 		}
2521c6fd2807SJeff Garzik 	}
2522c6fd2807SJeff Garzik 
25234ae72a1eSTejun Heo 	if (!tries[dev->devno])
2524c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2525c6fd2807SJeff Garzik 
2526c6fd2807SJeff Garzik 	goto retry;
2527c6fd2807SJeff Garzik }
2528c6fd2807SJeff Garzik 
2529c6fd2807SJeff Garzik /**
2530c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2531c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2532c6fd2807SJeff Garzik  *
2533c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2534c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2535c6fd2807SJeff Garzik  *
2536cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2537c6fd2807SJeff Garzik  *	serialization.
2538c6fd2807SJeff Garzik  */
2539c6fd2807SJeff Garzik 
2540c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2541c6fd2807SJeff Garzik {
2542c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2543c6fd2807SJeff Garzik }
2544c6fd2807SJeff Garzik 
2545c6fd2807SJeff Garzik /**
2546c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2547936fd732STejun Heo  *	@link: SATA link to printk link status about
2548c6fd2807SJeff Garzik  *
2549c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2550c6fd2807SJeff Garzik  *
2551c6fd2807SJeff Garzik  *	LOCKING:
2552c6fd2807SJeff Garzik  *	None.
2553c6fd2807SJeff Garzik  */
2554936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2555c6fd2807SJeff Garzik {
2556c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2557c6fd2807SJeff Garzik 
2558936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2559c6fd2807SJeff Garzik 		return;
2560936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2561c6fd2807SJeff Garzik 
2562936fd732STejun Heo 	if (ata_link_online(link)) {
2563c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2564936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2565c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2566c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2567c6fd2807SJeff Garzik 	} else {
2568936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2569c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2570c6fd2807SJeff Garzik 				sstatus, scontrol);
2571c6fd2807SJeff Garzik 	}
2572c6fd2807SJeff Garzik }
2573c6fd2807SJeff Garzik 
2574c6fd2807SJeff Garzik /**
2575c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2576c6fd2807SJeff Garzik  *	@adev: device
2577c6fd2807SJeff Garzik  *
2578c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2579c6fd2807SJeff Garzik  *	present NULL is returned
2580c6fd2807SJeff Garzik  */
2581c6fd2807SJeff Garzik 
2582c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2583c6fd2807SJeff Garzik {
25849af5c9c9STejun Heo 	struct ata_link *link = adev->link;
25859af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2586c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2587c6fd2807SJeff Garzik 		return NULL;
2588c6fd2807SJeff Garzik 	return pair;
2589c6fd2807SJeff Garzik }
2590c6fd2807SJeff Garzik 
2591c6fd2807SJeff Garzik /**
2592c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2593c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2594c6fd2807SJeff Garzik  *
2595c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2596c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2597c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2598c6fd2807SJeff Garzik  *	on this port.
2599c6fd2807SJeff Garzik  *
2600cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2601c6fd2807SJeff Garzik  *	serialization.
2602c6fd2807SJeff Garzik  */
2603c6fd2807SJeff Garzik 
2604c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2605c6fd2807SJeff Garzik {
26069af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
26079af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2608c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2609c6fd2807SJeff Garzik }
2610c6fd2807SJeff Garzik 
2611c6fd2807SJeff Garzik /**
2612c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2613936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2614c6fd2807SJeff Garzik  *
2615936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2616c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2617c6fd2807SJeff Garzik  *	using sata_set_spd().
2618c6fd2807SJeff Garzik  *
2619c6fd2807SJeff Garzik  *	LOCKING:
2620c6fd2807SJeff Garzik  *	Inherited from caller.
2621c6fd2807SJeff Garzik  *
2622c6fd2807SJeff Garzik  *	RETURNS:
2623c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2624c6fd2807SJeff Garzik  */
2625936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2626c6fd2807SJeff Garzik {
2627c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2628c6fd2807SJeff Garzik 	int rc, highbit;
2629c6fd2807SJeff Garzik 
2630936fd732STejun Heo 	if (!sata_scr_valid(link))
2631008a7896STejun Heo 		return -EOPNOTSUPP;
2632008a7896STejun Heo 
2633008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2634936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2635008a7896STejun Heo 	 */
2636936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2637008a7896STejun Heo 	if (rc == 0)
2638008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2639008a7896STejun Heo 	else
2640936fd732STejun Heo 		spd = link->sata_spd;
2641c6fd2807SJeff Garzik 
2642936fd732STejun Heo 	mask = link->sata_spd_limit;
2643c6fd2807SJeff Garzik 	if (mask <= 1)
2644c6fd2807SJeff Garzik 		return -EINVAL;
2645008a7896STejun Heo 
2646008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2647c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2648c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2649c6fd2807SJeff Garzik 
2650008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2651008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2652008a7896STejun Heo 	 */
2653008a7896STejun Heo 	if (spd > 1)
2654008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2655008a7896STejun Heo 	else
2656008a7896STejun Heo 		mask &= 1;
2657008a7896STejun Heo 
2658008a7896STejun Heo 	/* were we already at the bottom? */
2659c6fd2807SJeff Garzik 	if (!mask)
2660c6fd2807SJeff Garzik 		return -EINVAL;
2661c6fd2807SJeff Garzik 
2662936fd732STejun Heo 	link->sata_spd_limit = mask;
2663c6fd2807SJeff Garzik 
2664936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2665c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2666c6fd2807SJeff Garzik 
2667c6fd2807SJeff Garzik 	return 0;
2668c6fd2807SJeff Garzik }
2669c6fd2807SJeff Garzik 
2670936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2671c6fd2807SJeff Garzik {
26725270222fSTejun Heo 	struct ata_link *host_link = &link->ap->link;
26735270222fSTejun Heo 	u32 limit, target, spd;
2674c6fd2807SJeff Garzik 
26755270222fSTejun Heo 	limit = link->sata_spd_limit;
26765270222fSTejun Heo 
26775270222fSTejun Heo 	/* Don't configure downstream link faster than upstream link.
26785270222fSTejun Heo 	 * It doesn't speed up anything and some PMPs choke on such
26795270222fSTejun Heo 	 * configuration.
26805270222fSTejun Heo 	 */
26815270222fSTejun Heo 	if (!ata_is_host_link(link) && host_link->sata_spd)
26825270222fSTejun Heo 		limit &= (1 << host_link->sata_spd) - 1;
26835270222fSTejun Heo 
26845270222fSTejun Heo 	if (limit == UINT_MAX)
26855270222fSTejun Heo 		target = 0;
2686c6fd2807SJeff Garzik 	else
26875270222fSTejun Heo 		target = fls(limit);
2688c6fd2807SJeff Garzik 
2689c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
26905270222fSTejun Heo 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2691c6fd2807SJeff Garzik 
26925270222fSTejun Heo 	return spd != target;
2693c6fd2807SJeff Garzik }
2694c6fd2807SJeff Garzik 
2695c6fd2807SJeff Garzik /**
2696c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2697936fd732STejun Heo  *	@link: Link in question
2698c6fd2807SJeff Garzik  *
2699c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2700936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2701c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2702c6fd2807SJeff Garzik  *	configuration.
2703c6fd2807SJeff Garzik  *
2704c6fd2807SJeff Garzik  *	LOCKING:
2705c6fd2807SJeff Garzik  *	Inherited from caller.
2706c6fd2807SJeff Garzik  *
2707c6fd2807SJeff Garzik  *	RETURNS:
2708c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2709c6fd2807SJeff Garzik  */
2710936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2711c6fd2807SJeff Garzik {
2712c6fd2807SJeff Garzik 	u32 scontrol;
2713c6fd2807SJeff Garzik 
2714936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2715db64bcf3STejun Heo 		return 1;
2716c6fd2807SJeff Garzik 
2717936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2718c6fd2807SJeff Garzik }
2719c6fd2807SJeff Garzik 
2720c6fd2807SJeff Garzik /**
2721c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2722936fd732STejun Heo  *	@link: Link to set SATA spd for
2723c6fd2807SJeff Garzik  *
2724936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2725c6fd2807SJeff Garzik  *
2726c6fd2807SJeff Garzik  *	LOCKING:
2727c6fd2807SJeff Garzik  *	Inherited from caller.
2728c6fd2807SJeff Garzik  *
2729c6fd2807SJeff Garzik  *	RETURNS:
2730c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2731c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2732c6fd2807SJeff Garzik  */
2733936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2734c6fd2807SJeff Garzik {
2735c6fd2807SJeff Garzik 	u32 scontrol;
2736c6fd2807SJeff Garzik 	int rc;
2737c6fd2807SJeff Garzik 
2738936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2739c6fd2807SJeff Garzik 		return rc;
2740c6fd2807SJeff Garzik 
2741936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2742c6fd2807SJeff Garzik 		return 0;
2743c6fd2807SJeff Garzik 
2744936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2745c6fd2807SJeff Garzik 		return rc;
2746c6fd2807SJeff Garzik 
2747c6fd2807SJeff Garzik 	return 1;
2748c6fd2807SJeff Garzik }
2749c6fd2807SJeff Garzik 
2750c6fd2807SJeff Garzik /*
2751c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2752c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2753c6fd2807SJeff Garzik  */
2754c6fd2807SJeff Garzik /*
2755b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2756c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2757b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2758b352e57dSAlan Cox  *
2759b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2760c6fd2807SJeff Garzik  */
2761c6fd2807SJeff Garzik 
2762c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
276370cd071eSTejun Heo /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
276470cd071eSTejun Heo 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
276570cd071eSTejun Heo 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
276670cd071eSTejun Heo 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
276770cd071eSTejun Heo 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
276870cd071eSTejun Heo 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
276970cd071eSTejun Heo 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
277070cd071eSTejun Heo 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2771c6fd2807SJeff Garzik 
277270cd071eSTejun Heo 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
277370cd071eSTejun Heo 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
277470cd071eSTejun Heo 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2775c6fd2807SJeff Garzik 
277670cd071eSTejun Heo 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
277770cd071eSTejun Heo 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
277870cd071eSTejun Heo 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2779b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
278070cd071eSTejun Heo 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2781c6fd2807SJeff Garzik 
2782c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
278370cd071eSTejun Heo 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
278470cd071eSTejun Heo 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
278570cd071eSTejun Heo 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
278670cd071eSTejun Heo 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
278770cd071eSTejun Heo 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
278870cd071eSTejun Heo 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
278970cd071eSTejun Heo 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2790c6fd2807SJeff Garzik 
2791c6fd2807SJeff Garzik 	{ 0xFF }
2792c6fd2807SJeff Garzik };
2793c6fd2807SJeff Garzik 
2794c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2795c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2796c6fd2807SJeff Garzik 
2797c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2798c6fd2807SJeff Garzik {
2799c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2800c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2801c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2802c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2803c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2804c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2805c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2806c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2807c6fd2807SJeff Garzik }
2808c6fd2807SJeff Garzik 
2809c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2810c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2811c6fd2807SJeff Garzik {
2812c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2813c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2814c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2815c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2816c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2817c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2818c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2819c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2820c6fd2807SJeff Garzik }
2821c6fd2807SJeff Garzik 
28226357357cSTejun Heo const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2823c6fd2807SJeff Garzik {
282470cd071eSTejun Heo 	const struct ata_timing *t = ata_timing;
2825c6fd2807SJeff Garzik 
282670cd071eSTejun Heo 	while (xfer_mode > t->mode)
282770cd071eSTejun Heo 		t++;
282870cd071eSTejun Heo 
282970cd071eSTejun Heo 	if (xfer_mode == t->mode)
2830c6fd2807SJeff Garzik 		return t;
283170cd071eSTejun Heo 	return NULL;
2832c6fd2807SJeff Garzik }
2833c6fd2807SJeff Garzik 
2834c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2835c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2836c6fd2807SJeff Garzik {
2837c6fd2807SJeff Garzik 	const struct ata_timing *s;
2838c6fd2807SJeff Garzik 	struct ata_timing p;
2839c6fd2807SJeff Garzik 
2840c6fd2807SJeff Garzik 	/*
2841c6fd2807SJeff Garzik 	 * Find the mode.
2842c6fd2807SJeff Garzik 	 */
2843c6fd2807SJeff Garzik 
2844c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2845c6fd2807SJeff Garzik 		return -EINVAL;
2846c6fd2807SJeff Garzik 
2847c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2848c6fd2807SJeff Garzik 
2849c6fd2807SJeff Garzik 	/*
2850c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2851c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2852c6fd2807SJeff Garzik 	 */
2853c6fd2807SJeff Garzik 
2854c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2855c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2856c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2857c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2858c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2859c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2860c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2861c6fd2807SJeff Garzik 		}
2862c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2863c6fd2807SJeff Garzik 	}
2864c6fd2807SJeff Garzik 
2865c6fd2807SJeff Garzik 	/*
2866c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2867c6fd2807SJeff Garzik 	 */
2868c6fd2807SJeff Garzik 
2869c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2870c6fd2807SJeff Garzik 
2871c6fd2807SJeff Garzik 	/*
2872c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2873c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2874c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2875c6fd2807SJeff Garzik 	 */
2876c6fd2807SJeff Garzik 
2877fd3367afSAlan 	if (speed > XFER_PIO_6) {
2878c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2879c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2880c6fd2807SJeff Garzik 	}
2881c6fd2807SJeff Garzik 
2882c6fd2807SJeff Garzik 	/*
2883c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2884c6fd2807SJeff Garzik 	 */
2885c6fd2807SJeff Garzik 
2886c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2887c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2888c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2889c6fd2807SJeff Garzik 	}
2890c6fd2807SJeff Garzik 
2891c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2892c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2893c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2894c6fd2807SJeff Garzik 	}
28954f701d1eSAlan Cox 
28964f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
28974f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
28984f701d1eSAlan Cox 	   if so we must correct this */
28994f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
29004f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2901c6fd2807SJeff Garzik 
2902c6fd2807SJeff Garzik 	return 0;
2903c6fd2807SJeff Garzik }
2904c6fd2807SJeff Garzik 
2905c6fd2807SJeff Garzik /**
2906a0f79b92STejun Heo  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2907a0f79b92STejun Heo  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2908a0f79b92STejun Heo  *	@cycle: cycle duration in ns
2909a0f79b92STejun Heo  *
2910a0f79b92STejun Heo  *	Return matching xfer mode for @cycle.  The returned mode is of
2911a0f79b92STejun Heo  *	the transfer type specified by @xfer_shift.  If @cycle is too
2912a0f79b92STejun Heo  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
2913a0f79b92STejun Heo  *	than the fastest known mode, the fasted mode is returned.
2914a0f79b92STejun Heo  *
2915a0f79b92STejun Heo  *	LOCKING:
2916a0f79b92STejun Heo  *	None.
2917a0f79b92STejun Heo  *
2918a0f79b92STejun Heo  *	RETURNS:
2919a0f79b92STejun Heo  *	Matching xfer_mode, 0xff if no match found.
2920a0f79b92STejun Heo  */
2921a0f79b92STejun Heo u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2922a0f79b92STejun Heo {
2923a0f79b92STejun Heo 	u8 base_mode = 0xff, last_mode = 0xff;
2924a0f79b92STejun Heo 	const struct ata_xfer_ent *ent;
2925a0f79b92STejun Heo 	const struct ata_timing *t;
2926a0f79b92STejun Heo 
2927a0f79b92STejun Heo 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2928a0f79b92STejun Heo 		if (ent->shift == xfer_shift)
2929a0f79b92STejun Heo 			base_mode = ent->base;
2930a0f79b92STejun Heo 
2931a0f79b92STejun Heo 	for (t = ata_timing_find_mode(base_mode);
2932a0f79b92STejun Heo 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
2933a0f79b92STejun Heo 		unsigned short this_cycle;
2934a0f79b92STejun Heo 
2935a0f79b92STejun Heo 		switch (xfer_shift) {
2936a0f79b92STejun Heo 		case ATA_SHIFT_PIO:
2937a0f79b92STejun Heo 		case ATA_SHIFT_MWDMA:
2938a0f79b92STejun Heo 			this_cycle = t->cycle;
2939a0f79b92STejun Heo 			break;
2940a0f79b92STejun Heo 		case ATA_SHIFT_UDMA:
2941a0f79b92STejun Heo 			this_cycle = t->udma;
2942a0f79b92STejun Heo 			break;
2943a0f79b92STejun Heo 		default:
2944a0f79b92STejun Heo 			return 0xff;
2945a0f79b92STejun Heo 		}
2946a0f79b92STejun Heo 
2947a0f79b92STejun Heo 		if (cycle > this_cycle)
2948a0f79b92STejun Heo 			break;
2949a0f79b92STejun Heo 
2950a0f79b92STejun Heo 		last_mode = t->mode;
2951a0f79b92STejun Heo 	}
2952a0f79b92STejun Heo 
2953a0f79b92STejun Heo 	return last_mode;
2954a0f79b92STejun Heo }
2955a0f79b92STejun Heo 
2956a0f79b92STejun Heo /**
2957c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2958c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2959458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2960c6fd2807SJeff Garzik  *
2961c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2962c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2963c6fd2807SJeff Garzik  *	will apply the limit.
2964c6fd2807SJeff Garzik  *
2965c6fd2807SJeff Garzik  *	LOCKING:
2966c6fd2807SJeff Garzik  *	Inherited from caller.
2967c6fd2807SJeff Garzik  *
2968c6fd2807SJeff Garzik  *	RETURNS:
2969c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2970c6fd2807SJeff Garzik  */
2971458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2972c6fd2807SJeff Garzik {
2973458337dbSTejun Heo 	char buf[32];
29747dc951aeSTejun Heo 	unsigned long orig_mask, xfer_mask;
29757dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
2976458337dbSTejun Heo 	int quiet, highbit;
2977c6fd2807SJeff Garzik 
2978458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2979458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2980458337dbSTejun Heo 
2981458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2982458337dbSTejun Heo 						  dev->mwdma_mask,
2983c6fd2807SJeff Garzik 						  dev->udma_mask);
2984458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2985c6fd2807SJeff Garzik 
2986458337dbSTejun Heo 	switch (sel) {
2987458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2988458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2989458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2990458337dbSTejun Heo 		break;
2991458337dbSTejun Heo 
2992458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2993458337dbSTejun Heo 		if (udma_mask) {
2994458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2995458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2996458337dbSTejun Heo 			if (!udma_mask)
2997458337dbSTejun Heo 				return -ENOENT;
2998458337dbSTejun Heo 		} else if (mwdma_mask) {
2999458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
3000458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
3001458337dbSTejun Heo 			if (!mwdma_mask)
3002458337dbSTejun Heo 				return -ENOENT;
3003458337dbSTejun Heo 		}
3004458337dbSTejun Heo 		break;
3005458337dbSTejun Heo 
3006458337dbSTejun Heo 	case ATA_DNXFER_40C:
3007458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
3008458337dbSTejun Heo 		break;
3009458337dbSTejun Heo 
3010458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
3011458337dbSTejun Heo 		pio_mask &= 1;
3012458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
3013458337dbSTejun Heo 		mwdma_mask = 0;
3014458337dbSTejun Heo 		udma_mask = 0;
3015458337dbSTejun Heo 		break;
3016458337dbSTejun Heo 
3017458337dbSTejun Heo 	default:
3018458337dbSTejun Heo 		BUG();
3019458337dbSTejun Heo 	}
3020458337dbSTejun Heo 
3021458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3022458337dbSTejun Heo 
3023458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3024458337dbSTejun Heo 		return -ENOENT;
3025458337dbSTejun Heo 
3026458337dbSTejun Heo 	if (!quiet) {
3027458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3028458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
3029458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
3030458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3031458337dbSTejun Heo 		else
3032458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
3033458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
3034458337dbSTejun Heo 
3035458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
3036458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
3037458337dbSTejun Heo 	}
3038c6fd2807SJeff Garzik 
3039c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3040c6fd2807SJeff Garzik 			    &dev->udma_mask);
3041c6fd2807SJeff Garzik 
3042c6fd2807SJeff Garzik 	return 0;
3043c6fd2807SJeff Garzik }
3044c6fd2807SJeff Garzik 
3045c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
3046c6fd2807SJeff Garzik {
30479af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3048c6fd2807SJeff Garzik 	unsigned int err_mask;
3049c6fd2807SJeff Garzik 	int rc;
3050c6fd2807SJeff Garzik 
3051c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
3052c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3053c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
3054c6fd2807SJeff Garzik 
3055c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
30562dcb407eSJeff Garzik 
305711750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
305811750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
305911750a40SAlan 		err_mask &= ~AC_ERR_DEV;
30602dcb407eSJeff Garzik 
30610bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
30620bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
30630bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
30640bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
30650bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30662dcb407eSJeff Garzik 
30673acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
30683acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
30693acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
30703acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
30713acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
30723acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30733acaf94bSAlan Cox 
3074c6fd2807SJeff Garzik 	if (err_mask) {
3075c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3076c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
3077c6fd2807SJeff Garzik 		return -EIO;
3078c6fd2807SJeff Garzik 	}
3079c6fd2807SJeff Garzik 
3080baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3081422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3082baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3083c6fd2807SJeff Garzik 	if (rc)
3084c6fd2807SJeff Garzik 		return rc;
3085c6fd2807SJeff Garzik 
3086c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3087c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
3088c6fd2807SJeff Garzik 
3089c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3090c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3091c6fd2807SJeff Garzik 	return 0;
3092c6fd2807SJeff Garzik }
3093c6fd2807SJeff Garzik 
3094c6fd2807SJeff Garzik /**
309504351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
30960260731fSTejun Heo  *	@link: link on which timings will be programmed
3097c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
3098c6fd2807SJeff Garzik  *
309904351821SAlan  *	Standard implementation of the function used to tune and set
310004351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
310104351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
3102c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
3103c6fd2807SJeff Garzik  *
3104c6fd2807SJeff Garzik  *	LOCKING:
3105c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3106c6fd2807SJeff Garzik  *
3107c6fd2807SJeff Garzik  *	RETURNS:
3108c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3109c6fd2807SJeff Garzik  */
311004351821SAlan 
31110260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3112c6fd2807SJeff Garzik {
31130260731fSTejun Heo 	struct ata_port *ap = link->ap;
3114c6fd2807SJeff Garzik 	struct ata_device *dev;
3115f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
3116c6fd2807SJeff Garzik 
3117c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
3118f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
31197dc951aeSTejun Heo 		unsigned long pio_mask, dma_mask;
3120b3a70601SAlan Cox 		unsigned int mode_mask;
3121c6fd2807SJeff Garzik 
3122c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3123c6fd2807SJeff Garzik 			continue;
3124c6fd2807SJeff Garzik 
3125b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
3126b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
3127b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
3128b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
3129b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
3130b3a70601SAlan Cox 
3131c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
3132c6fd2807SJeff Garzik 
3133c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3134c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3135b3a70601SAlan Cox 
3136b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
3137b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3138b3a70601SAlan Cox 		else
3139b3a70601SAlan Cox 			dma_mask = 0;
3140b3a70601SAlan Cox 
3141c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3142c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3143c6fd2807SJeff Garzik 
3144c6fd2807SJeff Garzik 		found = 1;
314570cd071eSTejun Heo 		if (dev->dma_mode != 0xff)
3146c6fd2807SJeff Garzik 			used_dma = 1;
3147c6fd2807SJeff Garzik 	}
3148c6fd2807SJeff Garzik 	if (!found)
3149c6fd2807SJeff Garzik 		goto out;
3150c6fd2807SJeff Garzik 
3151c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
3152f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3153c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3154c6fd2807SJeff Garzik 			continue;
3155c6fd2807SJeff Garzik 
315670cd071eSTejun Heo 		if (dev->pio_mode == 0xff) {
3157c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3158c6fd2807SJeff Garzik 			rc = -EINVAL;
3159c6fd2807SJeff Garzik 			goto out;
3160c6fd2807SJeff Garzik 		}
3161c6fd2807SJeff Garzik 
3162c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
3163c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
3164c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
3165c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
3166c6fd2807SJeff Garzik 	}
3167c6fd2807SJeff Garzik 
3168c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
3169f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
317070cd071eSTejun Heo 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3171c6fd2807SJeff Garzik 			continue;
3172c6fd2807SJeff Garzik 
3173c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
3174c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3175c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
3176c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
3177c6fd2807SJeff Garzik 	}
3178c6fd2807SJeff Garzik 
3179c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
3180f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
318118d90debSAlan 		/* don't update suspended devices' xfer mode */
31829666f400STejun Heo 		if (!ata_dev_enabled(dev))
3183c6fd2807SJeff Garzik 			continue;
3184c6fd2807SJeff Garzik 
3185c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3186c6fd2807SJeff Garzik 		if (rc)
3187c6fd2807SJeff Garzik 			goto out;
3188c6fd2807SJeff Garzik 	}
3189c6fd2807SJeff Garzik 
3190c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3191c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3192c6fd2807SJeff Garzik 	 */
3193cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3194032af1ceSAlan 		ap->host->simplex_claimed = ap;
3195c6fd2807SJeff Garzik 
3196c6fd2807SJeff Garzik  out:
3197c6fd2807SJeff Garzik 	if (rc)
3198c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3199c6fd2807SJeff Garzik 	return rc;
3200c6fd2807SJeff Garzik }
3201c6fd2807SJeff Garzik 
3202c6fd2807SJeff Garzik /**
3203c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3204c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3205c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3206c6fd2807SJeff Garzik  *
3207c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3208c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3209c6fd2807SJeff Garzik  *	other threads.
3210c6fd2807SJeff Garzik  *
3211c6fd2807SJeff Garzik  *	LOCKING:
3212cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3213c6fd2807SJeff Garzik  */
3214c6fd2807SJeff Garzik 
3215c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3216c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3217c6fd2807SJeff Garzik {
3218c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3219c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3220c6fd2807SJeff Garzik }
3221c6fd2807SJeff Garzik 
3222c6fd2807SJeff Garzik /**
3223c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3224c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3225c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3226c6fd2807SJeff Garzik  *	@tmout: overall timeout
3227c6fd2807SJeff Garzik  *
3228c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3229c6fd2807SJeff Garzik  *	or a timeout occurs.
3230c6fd2807SJeff Garzik  *
3231d1adc1bbSTejun Heo  *	LOCKING:
3232d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3233d1adc1bbSTejun Heo  *
3234d1adc1bbSTejun Heo  *	RETURNS:
3235d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3236c6fd2807SJeff Garzik  */
3237d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3238c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3239c6fd2807SJeff Garzik {
3240c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3241c6fd2807SJeff Garzik 	u8 status;
3242c6fd2807SJeff Garzik 
3243c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3244c6fd2807SJeff Garzik 	timer_start = jiffies;
3245c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3246d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3247d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3248c6fd2807SJeff Garzik 		msleep(50);
3249c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3250c6fd2807SJeff Garzik 	}
3251c6fd2807SJeff Garzik 
3252d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3253c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
325435aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
325535aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3256c6fd2807SJeff Garzik 
3257c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3258d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3259d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3260c6fd2807SJeff Garzik 		msleep(50);
3261c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3262c6fd2807SJeff Garzik 	}
3263c6fd2807SJeff Garzik 
3264d1adc1bbSTejun Heo 	if (status == 0xff)
3265d1adc1bbSTejun Heo 		return -ENODEV;
3266d1adc1bbSTejun Heo 
3267c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3268c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
326935aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
327035aa7a43SJeff Garzik 				tmout / HZ, status);
3271d1adc1bbSTejun Heo 		return -EBUSY;
3272c6fd2807SJeff Garzik 	}
3273c6fd2807SJeff Garzik 
3274c6fd2807SJeff Garzik 	return 0;
3275c6fd2807SJeff Garzik }
3276c6fd2807SJeff Garzik 
3277d4b2bab4STejun Heo /**
327888ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
327988ff6eafSTejun Heo  *	@ap: port containing status register to be polled
328088ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
328188ff6eafSTejun Heo  *
328288ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
328388ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
328488ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
328588ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
328688ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
328788ff6eafSTejun Heo  *	status.
328888ff6eafSTejun Heo  *
328988ff6eafSTejun Heo  *	LOCKING:
329088ff6eafSTejun Heo  *	Kernel thread context (may sleep).
329188ff6eafSTejun Heo  */
329288ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
329388ff6eafSTejun Heo {
329488ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
329588ff6eafSTejun Heo 
329688ff6eafSTejun Heo 	if (time_before(until, deadline))
329788ff6eafSTejun Heo 		deadline = until;
329888ff6eafSTejun Heo 
329988ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
330088ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
330188ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
330288ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
330388ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
330488ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
330588ff6eafSTejun Heo 	 * delay here as well.
330688ff6eafSTejun Heo 	 *
330788ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
330888ff6eafSTejun Heo 	 */
330988ff6eafSTejun Heo 	msleep(150);
331088ff6eafSTejun Heo 
331188ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
331288ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
331388ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
331488ff6eafSTejun Heo 	 * than that.
33151974e201STejun Heo 	 *
33161974e201STejun Heo 	 * Note that some PATA controllers (pata_ali) explode if
33171974e201STejun Heo 	 * status register is read more than once when there's no
33181974e201STejun Heo 	 * device attached.
331988ff6eafSTejun Heo 	 */
33201974e201STejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
332188ff6eafSTejun Heo 		while (1) {
332288ff6eafSTejun Heo 			u8 status = ata_chk_status(ap);
332388ff6eafSTejun Heo 
332488ff6eafSTejun Heo 			if (status != 0xff || time_after(jiffies, deadline))
332588ff6eafSTejun Heo 				return;
332688ff6eafSTejun Heo 
332788ff6eafSTejun Heo 			msleep(50);
332888ff6eafSTejun Heo 		}
332988ff6eafSTejun Heo 	}
33301974e201STejun Heo }
333188ff6eafSTejun Heo 
333288ff6eafSTejun Heo /**
3333d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3334d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3335d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3336d4b2bab4STejun Heo  *
3337d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3338d4b2bab4STejun Heo  *	occurs.
3339d4b2bab4STejun Heo  *
3340d4b2bab4STejun Heo  *	LOCKING:
3341d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3342d4b2bab4STejun Heo  *
3343d4b2bab4STejun Heo  *	RETURNS:
3344d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3345d4b2bab4STejun Heo  */
3346d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3347d4b2bab4STejun Heo {
3348d4b2bab4STejun Heo 	unsigned long start = jiffies;
3349d4b2bab4STejun Heo 	int warned = 0;
3350d4b2bab4STejun Heo 
3351d4b2bab4STejun Heo 	while (1) {
3352d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3353d4b2bab4STejun Heo 		unsigned long now = jiffies;
3354d4b2bab4STejun Heo 
3355d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3356d4b2bab4STejun Heo 			return 0;
3357936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3358d4b2bab4STejun Heo 			return -ENODEV;
3359d4b2bab4STejun Heo 		if (time_after(now, deadline))
3360d4b2bab4STejun Heo 			return -EBUSY;
3361d4b2bab4STejun Heo 
3362d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3363d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3364d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3365d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3366d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3367d4b2bab4STejun Heo 			warned = 1;
3368d4b2bab4STejun Heo 		}
3369d4b2bab4STejun Heo 
3370d4b2bab4STejun Heo 		msleep(50);
3371d4b2bab4STejun Heo 	}
3372d4b2bab4STejun Heo }
3373d4b2bab4STejun Heo 
3374d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3375d4b2bab4STejun Heo 			      unsigned long deadline)
3376c6fd2807SJeff Garzik {
3377c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3378c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3379c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
33809b89391cSTejun Heo 	int rc, ret = 0;
3381c6fd2807SJeff Garzik 
3382c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3383c6fd2807SJeff Garzik 	 * BSY bit to clear
3384c6fd2807SJeff Garzik 	 */
3385d4b2bab4STejun Heo 	if (dev0) {
3386d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
33879b89391cSTejun Heo 		if (rc) {
33889b89391cSTejun Heo 			if (rc != -ENODEV)
3389d4b2bab4STejun Heo 				return rc;
33909b89391cSTejun Heo 			ret = rc;
33919b89391cSTejun Heo 		}
3392d4b2bab4STejun Heo 	}
3393c6fd2807SJeff Garzik 
3394e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3395e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3396c6fd2807SJeff Garzik 	 */
3397e141d999STejun Heo 	if (dev1) {
3398e141d999STejun Heo 		int i;
3399c6fd2807SJeff Garzik 
3400c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3401e141d999STejun Heo 
3402e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3403e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3404e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3405e141d999STejun Heo 		 */
3406e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3407e141d999STejun Heo 			u8 nsect, lbal;
3408e141d999STejun Heo 
34090d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
34100d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3411c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3412c6fd2807SJeff Garzik 				break;
3413c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3414c6fd2807SJeff Garzik 		}
3415e141d999STejun Heo 
3416d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
34179b89391cSTejun Heo 		if (rc) {
34189b89391cSTejun Heo 			if (rc != -ENODEV)
3419d4b2bab4STejun Heo 				return rc;
34209b89391cSTejun Heo 			ret = rc;
34219b89391cSTejun Heo 		}
3422d4b2bab4STejun Heo 	}
3423c6fd2807SJeff Garzik 
3424c6fd2807SJeff Garzik 	/* is all this really necessary? */
3425c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3426c6fd2807SJeff Garzik 	if (dev1)
3427c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3428c6fd2807SJeff Garzik 	if (dev0)
3429c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3430d4b2bab4STejun Heo 
34319b89391cSTejun Heo 	return ret;
3432c6fd2807SJeff Garzik }
3433c6fd2807SJeff Garzik 
3434d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3435d4b2bab4STejun Heo 			     unsigned long deadline)
3436c6fd2807SJeff Garzik {
3437c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3438c6fd2807SJeff Garzik 
343944877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3440c6fd2807SJeff Garzik 
3441c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
34420d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3443c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34440d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3445c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34460d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3447c6fd2807SJeff Garzik 
344888ff6eafSTejun Heo 	/* wait a while before checking status */
344988ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3450c6fd2807SJeff Garzik 
3451c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3452c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3453c6fd2807SJeff Garzik 	 * pulldown resistor.
3454c6fd2807SJeff Garzik 	 */
3455150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
34569b89391cSTejun Heo 		return -ENODEV;
3457c6fd2807SJeff Garzik 
3458d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3459c6fd2807SJeff Garzik }
3460c6fd2807SJeff Garzik 
3461c6fd2807SJeff Garzik /**
3462c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3463c6fd2807SJeff Garzik  *	@ap: port to reset
3464c6fd2807SJeff Garzik  *
3465c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3466c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3467c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3468c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3469c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3470c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3471c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3472c6fd2807SJeff Garzik  *
3473c6fd2807SJeff Garzik  *	LOCKING:
3474c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3475cca3974eSJeff Garzik  *	Obtains host lock.
3476c6fd2807SJeff Garzik  *
3477c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3478c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3479c6fd2807SJeff Garzik  */
3480c6fd2807SJeff Garzik 
3481c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3482c6fd2807SJeff Garzik {
34839af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3484c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3485c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3486c6fd2807SJeff Garzik 	u8 err;
3487c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
34889b89391cSTejun Heo 	int rc;
3489c6fd2807SJeff Garzik 
349044877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3491c6fd2807SJeff Garzik 
3492c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3493c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3494c6fd2807SJeff Garzik 		dev0 = 1;
3495c6fd2807SJeff Garzik 	else {
3496c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3497c6fd2807SJeff Garzik 		if (slave_possible)
3498c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3499c6fd2807SJeff Garzik 	}
3500c6fd2807SJeff Garzik 
3501c6fd2807SJeff Garzik 	if (dev0)
3502c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3503c6fd2807SJeff Garzik 	if (dev1)
3504c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3505c6fd2807SJeff Garzik 
3506c6fd2807SJeff Garzik 	/* select device 0 again */
3507c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3508c6fd2807SJeff Garzik 
3509c6fd2807SJeff Garzik 	/* issue bus reset */
35109b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
35119b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
35129b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3513c6fd2807SJeff Garzik 			goto err_out;
35149b89391cSTejun Heo 	}
3515c6fd2807SJeff Garzik 
3516c6fd2807SJeff Garzik 	/*
3517c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3518c6fd2807SJeff Garzik 	 */
35193f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3520c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
35213f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3522c6fd2807SJeff Garzik 
3523c6fd2807SJeff Garzik 	/* is double-select really necessary? */
35249af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3525c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
35269af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3527c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3528c6fd2807SJeff Garzik 
3529c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
35309af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
35319af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3532c6fd2807SJeff Garzik 		goto err_out;
3533c6fd2807SJeff Garzik 
3534c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3535c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
35360d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3537c6fd2807SJeff Garzik 	}
3538c6fd2807SJeff Garzik 
3539c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3540c6fd2807SJeff Garzik 	return;
3541c6fd2807SJeff Garzik 
3542c6fd2807SJeff Garzik err_out:
3543c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3544ac8869d5SJeff Garzik 	ata_port_disable(ap);
3545c6fd2807SJeff Garzik 
3546c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3547c6fd2807SJeff Garzik }
3548c6fd2807SJeff Garzik 
3549c6fd2807SJeff Garzik /**
3550936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3551936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3552c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3553d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3554c6fd2807SJeff Garzik  *
3555936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3556c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3557c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3558d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3559d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3560c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3561c6fd2807SJeff Garzik  *
3562d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3563d4b2bab4STejun Heo  *	two is used.
3564d4b2bab4STejun Heo  *
3565c6fd2807SJeff Garzik  *	LOCKING:
3566c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3567c6fd2807SJeff Garzik  *
3568c6fd2807SJeff Garzik  *	RETURNS:
3569c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3570c6fd2807SJeff Garzik  */
3571936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3572d4b2bab4STejun Heo 		       unsigned long deadline)
3573c6fd2807SJeff Garzik {
3574c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3575d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3576d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3577c6fd2807SJeff Garzik 	u32 last, cur;
3578c6fd2807SJeff Garzik 	int rc;
3579c6fd2807SJeff Garzik 
3580d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3581d4b2bab4STejun Heo 	if (time_before(t, deadline))
3582d4b2bab4STejun Heo 		deadline = t;
3583d4b2bab4STejun Heo 
3584936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3585c6fd2807SJeff Garzik 		return rc;
3586c6fd2807SJeff Garzik 	cur &= 0xf;
3587c6fd2807SJeff Garzik 
3588c6fd2807SJeff Garzik 	last = cur;
3589c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3590c6fd2807SJeff Garzik 
3591c6fd2807SJeff Garzik 	while (1) {
3592c6fd2807SJeff Garzik 		msleep(interval_msec);
3593936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3594c6fd2807SJeff Garzik 			return rc;
3595c6fd2807SJeff Garzik 		cur &= 0xf;
3596c6fd2807SJeff Garzik 
3597c6fd2807SJeff Garzik 		/* DET stable? */
3598c6fd2807SJeff Garzik 		if (cur == last) {
3599d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3600c6fd2807SJeff Garzik 				continue;
3601c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3602c6fd2807SJeff Garzik 				return 0;
3603c6fd2807SJeff Garzik 			continue;
3604c6fd2807SJeff Garzik 		}
3605c6fd2807SJeff Garzik 
3606c6fd2807SJeff Garzik 		/* unstable, start over */
3607c6fd2807SJeff Garzik 		last = cur;
3608c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3609c6fd2807SJeff Garzik 
3610f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3611f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3612f1545154STejun Heo 		 */
3613d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3614f1545154STejun Heo 			return -EPIPE;
3615c6fd2807SJeff Garzik 	}
3616c6fd2807SJeff Garzik }
3617c6fd2807SJeff Garzik 
3618c6fd2807SJeff Garzik /**
3619936fd732STejun Heo  *	sata_link_resume - resume SATA link
3620936fd732STejun Heo  *	@link: ATA link to resume SATA
3621c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3622d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3623c6fd2807SJeff Garzik  *
3624936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3625c6fd2807SJeff Garzik  *
3626c6fd2807SJeff Garzik  *	LOCKING:
3627c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3628c6fd2807SJeff Garzik  *
3629c6fd2807SJeff Garzik  *	RETURNS:
3630c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3631c6fd2807SJeff Garzik  */
3632936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3633d4b2bab4STejun Heo 		     unsigned long deadline)
3634c6fd2807SJeff Garzik {
3635c6fd2807SJeff Garzik 	u32 scontrol;
3636c6fd2807SJeff Garzik 	int rc;
3637c6fd2807SJeff Garzik 
3638936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3639c6fd2807SJeff Garzik 		return rc;
3640c6fd2807SJeff Garzik 
3641c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3642c6fd2807SJeff Garzik 
3643936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3644c6fd2807SJeff Garzik 		return rc;
3645c6fd2807SJeff Garzik 
3646c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3647c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3648c6fd2807SJeff Garzik 	 */
3649c6fd2807SJeff Garzik 	msleep(200);
3650c6fd2807SJeff Garzik 
3651936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3652c6fd2807SJeff Garzik }
3653c6fd2807SJeff Garzik 
3654c6fd2807SJeff Garzik /**
3655c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3656cc0680a5STejun Heo  *	@link: ATA link to be reset
3657d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3658c6fd2807SJeff Garzik  *
3659cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3660b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3661b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3662b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3663b8cffc6aSTejun Heo  *	should just whine, not fail.
3664c6fd2807SJeff Garzik  *
3665c6fd2807SJeff Garzik  *	LOCKING:
3666c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3667c6fd2807SJeff Garzik  *
3668c6fd2807SJeff Garzik  *	RETURNS:
3669c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3670c6fd2807SJeff Garzik  */
3671cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3672c6fd2807SJeff Garzik {
3673cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3674936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3675c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3676c6fd2807SJeff Garzik 	int rc;
3677c6fd2807SJeff Garzik 
367831daabdaSTejun Heo 	/* handle link resume */
3679c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
36800c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3681c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3682c6fd2807SJeff Garzik 
3683633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3684633273a3STejun Heo 	 * is supported.
3685633273a3STejun Heo 	 */
3686633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3687633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3688633273a3STejun Heo 
3689c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3690c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3691c6fd2807SJeff Garzik 		return 0;
3692c6fd2807SJeff Garzik 
3693936fd732STejun Heo 	/* if SATA, resume link */
3694a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3695936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3696b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3697b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3698cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3699c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3700c6fd2807SJeff Garzik 	}
3701c6fd2807SJeff Garzik 
3702c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3703c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3704c6fd2807SJeff Garzik 	 */
37050c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3706b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
37076dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3708cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3709b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3710b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3711b8cffc6aSTejun Heo 		}
3712b8cffc6aSTejun Heo 	}
3713c6fd2807SJeff Garzik 
3714c6fd2807SJeff Garzik 	return 0;
3715c6fd2807SJeff Garzik }
3716c6fd2807SJeff Garzik 
3717c6fd2807SJeff Garzik /**
3718c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3719cc0680a5STejun Heo  *	@link: ATA link to reset
3720c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3721d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3722c6fd2807SJeff Garzik  *
3723c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3724c6fd2807SJeff Garzik  *
3725c6fd2807SJeff Garzik  *	LOCKING:
3726c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3727c6fd2807SJeff Garzik  *
3728c6fd2807SJeff Garzik  *	RETURNS:
3729c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3730c6fd2807SJeff Garzik  */
3731cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3732d4b2bab4STejun Heo 		      unsigned long deadline)
3733c6fd2807SJeff Garzik {
3734cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3735c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3736d4b2bab4STejun Heo 	unsigned int devmask = 0;
3737d4b2bab4STejun Heo 	int rc;
3738c6fd2807SJeff Garzik 	u8 err;
3739c6fd2807SJeff Garzik 
3740c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3741c6fd2807SJeff Garzik 
3742936fd732STejun Heo 	if (ata_link_offline(link)) {
3743c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3744c6fd2807SJeff Garzik 		goto out;
3745c6fd2807SJeff Garzik 	}
3746c6fd2807SJeff Garzik 
3747c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3748c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3749c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3750c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3751c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3752c6fd2807SJeff Garzik 
3753c6fd2807SJeff Garzik 	/* select device 0 again */
3754c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3755c6fd2807SJeff Garzik 
3756c6fd2807SJeff Garzik 	/* issue bus reset */
3757c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3758d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
37599b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3760936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3761cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3762d4b2bab4STejun Heo 		return rc;
3763c6fd2807SJeff Garzik 	}
3764c6fd2807SJeff Garzik 
3765c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
37663f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
37673f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3768c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
37693f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
37703f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3771c6fd2807SJeff Garzik 
3772c6fd2807SJeff Garzik  out:
3773c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3774c6fd2807SJeff Garzik 	return 0;
3775c6fd2807SJeff Garzik }
3776c6fd2807SJeff Garzik 
3777c6fd2807SJeff Garzik /**
3778cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3779cc0680a5STejun Heo  *	@link: link to reset
3780b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3781d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3782c6fd2807SJeff Garzik  *
3783cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3784c6fd2807SJeff Garzik  *
3785c6fd2807SJeff Garzik  *	LOCKING:
3786c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3787c6fd2807SJeff Garzik  *
3788c6fd2807SJeff Garzik  *	RETURNS:
3789c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3790c6fd2807SJeff Garzik  */
3791cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3792d4b2bab4STejun Heo 			unsigned long deadline)
3793c6fd2807SJeff Garzik {
3794c6fd2807SJeff Garzik 	u32 scontrol;
3795c6fd2807SJeff Garzik 	int rc;
3796c6fd2807SJeff Garzik 
3797c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3798c6fd2807SJeff Garzik 
3799936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3800c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3801c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3802c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3803c6fd2807SJeff Garzik 		 * and Sil3124.
3804c6fd2807SJeff Garzik 		 */
3805936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3806b6103f6dSTejun Heo 			goto out;
3807c6fd2807SJeff Garzik 
3808cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3809c6fd2807SJeff Garzik 
3810936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3811b6103f6dSTejun Heo 			goto out;
3812c6fd2807SJeff Garzik 
3813936fd732STejun Heo 		sata_set_spd(link);
3814c6fd2807SJeff Garzik 	}
3815c6fd2807SJeff Garzik 
3816c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3817936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3818b6103f6dSTejun Heo 		goto out;
3819c6fd2807SJeff Garzik 
3820c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3821c6fd2807SJeff Garzik 
3822936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3823b6103f6dSTejun Heo 		goto out;
3824c6fd2807SJeff Garzik 
3825c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3826c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3827c6fd2807SJeff Garzik 	 */
3828c6fd2807SJeff Garzik 	msleep(1);
3829c6fd2807SJeff Garzik 
3830936fd732STejun Heo 	/* bring link back */
3831936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3832b6103f6dSTejun Heo  out:
3833b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3834b6103f6dSTejun Heo 	return rc;
3835b6103f6dSTejun Heo }
3836b6103f6dSTejun Heo 
3837b6103f6dSTejun Heo /**
3838b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3839cc0680a5STejun Heo  *	@link: link to reset
3840b6103f6dSTejun Heo  *	@class: resulting class of attached device
3841d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3842b6103f6dSTejun Heo  *
3843b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3844b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3845b6103f6dSTejun Heo  *
3846b6103f6dSTejun Heo  *	LOCKING:
3847b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3848b6103f6dSTejun Heo  *
3849b6103f6dSTejun Heo  *	RETURNS:
3850b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3851b6103f6dSTejun Heo  */
3852cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3853d4b2bab4STejun Heo 		       unsigned long deadline)
3854b6103f6dSTejun Heo {
3855cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3856936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3857b6103f6dSTejun Heo 	int rc;
3858b6103f6dSTejun Heo 
3859b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3860b6103f6dSTejun Heo 
3861b6103f6dSTejun Heo 	/* do hardreset */
3862cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3863b6103f6dSTejun Heo 	if (rc) {
3864cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3865b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3866b6103f6dSTejun Heo 		return rc;
3867b6103f6dSTejun Heo 	}
3868c6fd2807SJeff Garzik 
3869c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3870936fd732STejun Heo 	if (ata_link_offline(link)) {
3871c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3872c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3873c6fd2807SJeff Garzik 		return 0;
3874c6fd2807SJeff Garzik 	}
3875c6fd2807SJeff Garzik 
387688ff6eafSTejun Heo 	/* wait a while before checking status */
387788ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
387834fee227STejun Heo 
3879633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3880633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3881633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3882633273a3STejun Heo 	 * second and request follow-up SRST.
3883633273a3STejun Heo 	 */
3884633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3885633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3886633273a3STejun Heo 		return -EAGAIN;
3887633273a3STejun Heo 	}
3888633273a3STejun Heo 
3889d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
38909b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
38919b89391cSTejun Heo 	if (rc) {
3892cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3893d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3894d4b2bab4STejun Heo 		return rc;
3895c6fd2807SJeff Garzik 	}
3896c6fd2807SJeff Garzik 
3897c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3898c6fd2807SJeff Garzik 
38993f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3900c6fd2807SJeff Garzik 
3901c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3902c6fd2807SJeff Garzik 	return 0;
3903c6fd2807SJeff Garzik }
3904c6fd2807SJeff Garzik 
3905c6fd2807SJeff Garzik /**
3906c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3907cc0680a5STejun Heo  *	@link: the target ata_link
3908c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3909c6fd2807SJeff Garzik  *
3910c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3911c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3912c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3913c6fd2807SJeff Garzik  *
3914c6fd2807SJeff Garzik  *	LOCKING:
3915c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3916c6fd2807SJeff Garzik  */
3917cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3918c6fd2807SJeff Garzik {
3919cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3920c6fd2807SJeff Garzik 	u32 serror;
3921c6fd2807SJeff Garzik 
3922c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3923c6fd2807SJeff Garzik 
3924c6fd2807SJeff Garzik 	/* print link status */
3925936fd732STejun Heo 	sata_print_link_status(link);
3926c6fd2807SJeff Garzik 
3927c6fd2807SJeff Garzik 	/* clear SError */
3928936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3929936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3930f7fe7ad4STejun Heo 	link->eh_info.serror = 0;
3931c6fd2807SJeff Garzik 
3932c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3933c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3934c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3935c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3936c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3937c6fd2807SJeff Garzik 
3938c6fd2807SJeff Garzik 	/* bail out if no device is present */
3939c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3940c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3941c6fd2807SJeff Garzik 		return;
3942c6fd2807SJeff Garzik 	}
3943c6fd2807SJeff Garzik 
3944c6fd2807SJeff Garzik 	/* set up device control */
39450d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
39460d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3947c6fd2807SJeff Garzik 
3948c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3949c6fd2807SJeff Garzik }
3950c6fd2807SJeff Garzik 
3951c6fd2807SJeff Garzik /**
3952c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3953c6fd2807SJeff Garzik  *	@dev: device to compare against
3954c6fd2807SJeff Garzik  *	@new_class: class of the new device
3955c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3956c6fd2807SJeff Garzik  *
3957c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3958c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3959c6fd2807SJeff Garzik  *	@new_id.
3960c6fd2807SJeff Garzik  *
3961c6fd2807SJeff Garzik  *	LOCKING:
3962c6fd2807SJeff Garzik  *	None.
3963c6fd2807SJeff Garzik  *
3964c6fd2807SJeff Garzik  *	RETURNS:
3965c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3966c6fd2807SJeff Garzik  */
3967c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3968c6fd2807SJeff Garzik 			       const u16 *new_id)
3969c6fd2807SJeff Garzik {
3970c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3971a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3972a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3973c6fd2807SJeff Garzik 
3974c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3975c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3976c6fd2807SJeff Garzik 			       dev->class, new_class);
3977c6fd2807SJeff Garzik 		return 0;
3978c6fd2807SJeff Garzik 	}
3979c6fd2807SJeff Garzik 
3980a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3981a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3982a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3983a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3984c6fd2807SJeff Garzik 
3985c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3986c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3987c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3988c6fd2807SJeff Garzik 		return 0;
3989c6fd2807SJeff Garzik 	}
3990c6fd2807SJeff Garzik 
3991c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3992c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3993c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3994c6fd2807SJeff Garzik 		return 0;
3995c6fd2807SJeff Garzik 	}
3996c6fd2807SJeff Garzik 
3997c6fd2807SJeff Garzik 	return 1;
3998c6fd2807SJeff Garzik }
3999c6fd2807SJeff Garzik 
4000c6fd2807SJeff Garzik /**
4001fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
40023fae450cSHenrik Kretzschmar  *	@dev: target ATA device
4003bff04647STejun Heo  *	@readid_flags: read ID flags
4004c6fd2807SJeff Garzik  *
4005c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
4006c6fd2807SJeff Garzik  *	the port.
4007c6fd2807SJeff Garzik  *
4008c6fd2807SJeff Garzik  *	LOCKING:
4009c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4010c6fd2807SJeff Garzik  *
4011c6fd2807SJeff Garzik  *	RETURNS:
4012c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
4013c6fd2807SJeff Garzik  */
4014fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4015c6fd2807SJeff Garzik {
4016c6fd2807SJeff Garzik 	unsigned int class = dev->class;
40179af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
4018c6fd2807SJeff Garzik 	int rc;
4019c6fd2807SJeff Garzik 
4020c6fd2807SJeff Garzik 	/* read ID data */
4021bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4022c6fd2807SJeff Garzik 	if (rc)
4023fe30911bSTejun Heo 		return rc;
4024c6fd2807SJeff Garzik 
4025c6fd2807SJeff Garzik 	/* is the device still there? */
4026fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
4027fe30911bSTejun Heo 		return -ENODEV;
4028c6fd2807SJeff Garzik 
4029c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4030fe30911bSTejun Heo 	return 0;
4031fe30911bSTejun Heo }
4032fe30911bSTejun Heo 
4033fe30911bSTejun Heo /**
4034fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
4035fe30911bSTejun Heo  *	@dev: device to revalidate
4036422c9daaSTejun Heo  *	@new_class: new class code
4037fe30911bSTejun Heo  *	@readid_flags: read ID flags
4038fe30911bSTejun Heo  *
4039fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4040fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
4041fe30911bSTejun Heo  *
4042fe30911bSTejun Heo  *	LOCKING:
4043fe30911bSTejun Heo  *	Kernel thread context (may sleep)
4044fe30911bSTejun Heo  *
4045fe30911bSTejun Heo  *	RETURNS:
4046fe30911bSTejun Heo  *	0 on success, negative errno otherwise
4047fe30911bSTejun Heo  */
4048422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4049422c9daaSTejun Heo 		       unsigned int readid_flags)
4050fe30911bSTejun Heo {
40516ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
4052fe30911bSTejun Heo 	int rc;
4053fe30911bSTejun Heo 
4054fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
4055fe30911bSTejun Heo 		return -ENODEV;
4056fe30911bSTejun Heo 
4057422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4058422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
4059422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4060422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4061422c9daaSTejun Heo 			       dev->class, new_class);
4062422c9daaSTejun Heo 		rc = -ENODEV;
4063422c9daaSTejun Heo 		goto fail;
4064422c9daaSTejun Heo 	}
4065422c9daaSTejun Heo 
4066fe30911bSTejun Heo 	/* re-read ID */
4067fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
4068fe30911bSTejun Heo 	if (rc)
4069fe30911bSTejun Heo 		goto fail;
4070c6fd2807SJeff Garzik 
4071c6fd2807SJeff Garzik 	/* configure device according to the new ID */
4072efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
40736ddcd3b0STejun Heo 	if (rc)
40746ddcd3b0STejun Heo 		goto fail;
40756ddcd3b0STejun Heo 
40766ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
4077b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4078b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
40796ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
40806ddcd3b0STejun Heo 			       "%llu != %llu\n",
40816ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
40826ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
40838270bec4STejun Heo 
40848270bec4STejun Heo 		/* restore original n_sectors */
40858270bec4STejun Heo 		dev->n_sectors = n_sectors;
40868270bec4STejun Heo 
40876ddcd3b0STejun Heo 		rc = -ENODEV;
40886ddcd3b0STejun Heo 		goto fail;
40896ddcd3b0STejun Heo 	}
40906ddcd3b0STejun Heo 
4091c6fd2807SJeff Garzik 	return 0;
4092c6fd2807SJeff Garzik 
4093c6fd2807SJeff Garzik  fail:
4094c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4095c6fd2807SJeff Garzik 	return rc;
4096c6fd2807SJeff Garzik }
4097c6fd2807SJeff Garzik 
40986919a0a6SAlan Cox struct ata_blacklist_entry {
40996919a0a6SAlan Cox 	const char *model_num;
41006919a0a6SAlan Cox 	const char *model_rev;
41016919a0a6SAlan Cox 	unsigned long horkage;
41026919a0a6SAlan Cox };
41036919a0a6SAlan Cox 
41046919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
41056919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
41066919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
41076919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
41086919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
41096919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
41106919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
41116919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
41126919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
41136919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
41146919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
41156919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
41166919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
41176919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
41186919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
41196919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
41206919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
41216919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
41226919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
41236919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
41246919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
41256919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
41266919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
41276919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
41286919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
41296919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
41306919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
41316919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
41326919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
41336919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
41346919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
413539f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
41363af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
41373af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
41383af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
41396919a0a6SAlan Cox 
414018d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
414140a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
414218d6e9d5SAlbert Lee 
41436919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
41446919a0a6SAlan Cox 
41456919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
41466919a0a6SAlan Cox 	/* NCQ is slow */
41476919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4148459ad688STejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
414909125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
415009125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
41517acfaf30SPaul Rolland 	/* NCQ is broken */
4152539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
41530e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
41540b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
41550b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
4156da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4157e41bd3e8STejun Heo 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4158539cc7c7SJeff Garzik 
415936e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
416036e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
416136e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
416236e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
416336e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
41646919a0a6SAlan Cox 
416516c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
416616c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
416716c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
416816c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
416916c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
41706919a0a6SAlan Cox 
417193328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
417293328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
417393328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
417493328e11SAlan Cox 
41756bbfd53dSAlan Cox 	/* Devices which get the IVB wrong */
41766bbfd53dSAlan Cox 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
41776bbfd53dSAlan Cox 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4178e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4179e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4180e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
41816bbfd53dSAlan Cox 
41826919a0a6SAlan Cox 	/* End Marker */
41836919a0a6SAlan Cox 	{ }
4184c6fd2807SJeff Garzik };
4185c6fd2807SJeff Garzik 
4186741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4187539cc7c7SJeff Garzik {
4188539cc7c7SJeff Garzik 	const char *p;
4189539cc7c7SJeff Garzik 	int len;
4190539cc7c7SJeff Garzik 
4191539cc7c7SJeff Garzik 	/*
4192539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4193539cc7c7SJeff Garzik 	 */
4194539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4195539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4196539cc7c7SJeff Garzik 		len = p - patt;
4197317b50b8SAndrew Paprocki 	else {
4198539cc7c7SJeff Garzik 		len = strlen(name);
4199317b50b8SAndrew Paprocki 		if (!len) {
4200317b50b8SAndrew Paprocki 			if (!*patt)
4201317b50b8SAndrew Paprocki 				return 0;
4202317b50b8SAndrew Paprocki 			return -1;
4203317b50b8SAndrew Paprocki 		}
4204317b50b8SAndrew Paprocki 	}
4205539cc7c7SJeff Garzik 
4206539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4207539cc7c7SJeff Garzik }
4208539cc7c7SJeff Garzik 
420975683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4210c6fd2807SJeff Garzik {
42118bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
42128bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
42136919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4214c6fd2807SJeff Garzik 
42158bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
42168bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4217c6fd2807SJeff Garzik 
42186919a0a6SAlan Cox 	while (ad->model_num) {
4219539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
42206919a0a6SAlan Cox 			if (ad->model_rev == NULL)
42216919a0a6SAlan Cox 				return ad->horkage;
4222539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
42236919a0a6SAlan Cox 				return ad->horkage;
4224c6fd2807SJeff Garzik 		}
42256919a0a6SAlan Cox 		ad++;
4226c6fd2807SJeff Garzik 	}
4227c6fd2807SJeff Garzik 	return 0;
4228c6fd2807SJeff Garzik }
4229c6fd2807SJeff Garzik 
42306919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
42316919a0a6SAlan Cox {
42326919a0a6SAlan Cox 	/* We don't support polling DMA.
42336919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
42346919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
42356919a0a6SAlan Cox 	 */
42369af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
42376919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
42386919a0a6SAlan Cox 		return 1;
423975683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
42406919a0a6SAlan Cox }
42416919a0a6SAlan Cox 
4242c6fd2807SJeff Garzik /**
42436bbfd53dSAlan Cox  *	ata_is_40wire		-	check drive side detection
42446bbfd53dSAlan Cox  *	@dev: device
42456bbfd53dSAlan Cox  *
42466bbfd53dSAlan Cox  *	Perform drive side detection decoding, allowing for device vendors
42476bbfd53dSAlan Cox  *	who can't follow the documentation.
42486bbfd53dSAlan Cox  */
42496bbfd53dSAlan Cox 
42506bbfd53dSAlan Cox static int ata_is_40wire(struct ata_device *dev)
42516bbfd53dSAlan Cox {
42526bbfd53dSAlan Cox 	if (dev->horkage & ATA_HORKAGE_IVB)
42536bbfd53dSAlan Cox 		return ata_drive_40wire_relaxed(dev->id);
42546bbfd53dSAlan Cox 	return ata_drive_40wire(dev->id);
42556bbfd53dSAlan Cox }
42566bbfd53dSAlan Cox 
42576bbfd53dSAlan Cox /**
4258c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4259c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4260c6fd2807SJeff Garzik  *
4261c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4262c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4263c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4264c6fd2807SJeff Garzik  *	blacklist, etc...
4265c6fd2807SJeff Garzik  *
4266c6fd2807SJeff Garzik  *	LOCKING:
4267c6fd2807SJeff Garzik  *	None.
4268c6fd2807SJeff Garzik  */
4269c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4270c6fd2807SJeff Garzik {
42719af5c9c9STejun Heo 	struct ata_link *link = dev->link;
42729af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4273cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4274c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4275c6fd2807SJeff Garzik 
4276c6fd2807SJeff Garzik 	/* controller modes available */
4277c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4278c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4279c6fd2807SJeff Garzik 
42808343f889SRobert Hancock 	/* drive modes available */
4281c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4282c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4283c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4284c6fd2807SJeff Garzik 
4285b352e57dSAlan Cox 	/*
4286b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4287b352e57dSAlan Cox 	 *	cable
4288b352e57dSAlan Cox 	 */
4289b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4290b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4291b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4292b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4293b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4294b352e57dSAlan Cox 	}
4295b352e57dSAlan Cox 
4296c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4297c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4298c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4299c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4300c6fd2807SJeff Garzik 	}
4301c6fd2807SJeff Garzik 
430214d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
430314d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4304c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4305c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4306c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4307c6fd2807SJeff Garzik 	}
4308c6fd2807SJeff Garzik 
4309e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4310e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4311e424675fSJeff Garzik 
4312c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4313a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4314c6fd2807SJeff Garzik 
43158343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
43168343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
43178343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
43188343f889SRobert Hancock 	 * solely limited by the cable.
43198343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
43208343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
43218343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
43228343f889SRobert Hancock 	 */
43238343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
43248343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
43258343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
43266bbfd53dSAlan Cox 		    (ata_is_40wire(dev) &&
43278343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
43288343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
43298343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
43308343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
43318343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
43328343f889SRobert Hancock 		}
43338343f889SRobert Hancock 
4334c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4335c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4336c6fd2807SJeff Garzik }
4337c6fd2807SJeff Garzik 
4338c6fd2807SJeff Garzik /**
4339c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4340c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4341c6fd2807SJeff Garzik  *
4342c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4343c6fd2807SJeff Garzik  *	on port @ap.
4344c6fd2807SJeff Garzik  *
4345c6fd2807SJeff Garzik  *	LOCKING:
4346c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4347c6fd2807SJeff Garzik  *
4348c6fd2807SJeff Garzik  *	RETURNS:
4349c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4350c6fd2807SJeff Garzik  */
4351c6fd2807SJeff Garzik 
4352c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4353c6fd2807SJeff Garzik {
4354c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4355c6fd2807SJeff Garzik 	unsigned int err_mask;
4356c6fd2807SJeff Garzik 
4357c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4358c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4359c6fd2807SJeff Garzik 
4360464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4361464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4362464cf177STejun Heo 	 */
4363c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4364c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4365c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4366464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4367c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4368b9f8ab2dSAlan Cox 	/* If we are using IORDY we must send the mode setting command */
4369b9f8ab2dSAlan Cox 	if (ata_pio_need_iordy(dev))
4370c6fd2807SJeff Garzik 		tf.nsect = dev->xfer_mode;
4371b9f8ab2dSAlan Cox 	/* If the device has IORDY and the controller does not - turn it off */
4372b9f8ab2dSAlan Cox  	else if (ata_id_has_iordy(dev->id))
4373b9f8ab2dSAlan Cox 		tf.nsect = 0x01;
4374b9f8ab2dSAlan Cox 	else /* In the ancient relic department - skip all of this */
4375b9f8ab2dSAlan Cox 		return 0;
4376c6fd2807SJeff Garzik 
43772b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4378c6fd2807SJeff Garzik 
4379c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4380c6fd2807SJeff Garzik 	return err_mask;
4381c6fd2807SJeff Garzik }
4382c6fd2807SJeff Garzik /**
4383218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
43849f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
43859f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4386218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
43879f45cbd3SKristen Carlson Accardi  *
43889f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4389218f3d30SJeff Garzik  *	on port @ap with sector count
43909f45cbd3SKristen Carlson Accardi  *
43919f45cbd3SKristen Carlson Accardi  *	LOCKING:
43929f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
43939f45cbd3SKristen Carlson Accardi  *
43949f45cbd3SKristen Carlson Accardi  *	RETURNS:
43959f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
43969f45cbd3SKristen Carlson Accardi  */
4397218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4398218f3d30SJeff Garzik 					u8 feature)
43999f45cbd3SKristen Carlson Accardi {
44009f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
44019f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
44029f45cbd3SKristen Carlson Accardi 
44039f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
44049f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
44059f45cbd3SKristen Carlson Accardi 
44069f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
44079f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
44089f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
44099f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
44109f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4411218f3d30SJeff Garzik 	tf.nsect = feature;
44129f45cbd3SKristen Carlson Accardi 
44132b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
44149f45cbd3SKristen Carlson Accardi 
44159f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
44169f45cbd3SKristen Carlson Accardi 	return err_mask;
44179f45cbd3SKristen Carlson Accardi }
44189f45cbd3SKristen Carlson Accardi 
44199f45cbd3SKristen Carlson Accardi /**
4420c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4421c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4422c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4423c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4424c6fd2807SJeff Garzik  *
4425c6fd2807SJeff Garzik  *	LOCKING:
4426c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4427c6fd2807SJeff Garzik  *
4428c6fd2807SJeff Garzik  *	RETURNS:
4429c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4430c6fd2807SJeff Garzik  */
4431c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4432c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4433c6fd2807SJeff Garzik {
4434c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4435c6fd2807SJeff Garzik 	unsigned int err_mask;
4436c6fd2807SJeff Garzik 
4437c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4438c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4439c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4440c6fd2807SJeff Garzik 
4441c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4442c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4443c6fd2807SJeff Garzik 
4444c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4445c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4446c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4447c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4448c6fd2807SJeff Garzik 	tf.nsect = sectors;
4449c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4450c6fd2807SJeff Garzik 
44512b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
445218b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
445318b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
445418b2466cSAlan Cox 	   drive reported working geometry */
445518b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
445618b2466cSAlan Cox 		err_mask = 0;
4457c6fd2807SJeff Garzik 
4458c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4459c6fd2807SJeff Garzik 	return err_mask;
4460c6fd2807SJeff Garzik }
4461c6fd2807SJeff Garzik 
4462c6fd2807SJeff Garzik /**
4463c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4464c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4465c6fd2807SJeff Garzik  *
4466c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4467c6fd2807SJeff Garzik  *
4468c6fd2807SJeff Garzik  *	LOCKING:
4469cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4470c6fd2807SJeff Garzik  */
447170e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4472c6fd2807SJeff Garzik {
4473c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4474c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4475c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4476c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4477c6fd2807SJeff Garzik 
4478c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4479c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4480c6fd2807SJeff Garzik 
4481c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4482c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4483c6fd2807SJeff Garzik 
4484c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4485c6fd2807SJeff Garzik 
4486c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4487c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4488c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4489c6fd2807SJeff Garzik 	 */
4490c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4491c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4492c6fd2807SJeff Garzik 
4493c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4494c6fd2807SJeff Garzik 		if (qc->n_elem)
4495c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4496c6fd2807SJeff Garzik 		/* restore last sg */
449787260216SJens Axboe 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4498c6fd2807SJeff Garzik 		if (pad_buf) {
4499c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
450045711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4501c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4502c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4503c6fd2807SJeff Garzik 		}
4504c6fd2807SJeff Garzik 	} else {
4505c6fd2807SJeff Garzik 		if (qc->n_elem)
4506c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4507c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4508c6fd2807SJeff Garzik 				dir);
4509c6fd2807SJeff Garzik 		/* restore sg */
4510c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4511c6fd2807SJeff Garzik 		if (pad_buf)
4512c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4513c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4514c6fd2807SJeff Garzik 	}
4515c6fd2807SJeff Garzik 
4516c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4517c6fd2807SJeff Garzik 	qc->__sg = NULL;
4518c6fd2807SJeff Garzik }
4519c6fd2807SJeff Garzik 
4520c6fd2807SJeff Garzik /**
4521c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4522c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4523c6fd2807SJeff Garzik  *
4524c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4525c6fd2807SJeff Garzik  *	associated with the current disk command.
4526c6fd2807SJeff Garzik  *
4527c6fd2807SJeff Garzik  *	LOCKING:
4528cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4529c6fd2807SJeff Garzik  *
4530c6fd2807SJeff Garzik  */
4531c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4532c6fd2807SJeff Garzik {
4533c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4534c6fd2807SJeff Garzik 	struct scatterlist *sg;
4535c6fd2807SJeff Garzik 	unsigned int idx;
4536c6fd2807SJeff Garzik 
4537c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4538c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4539c6fd2807SJeff Garzik 
4540c6fd2807SJeff Garzik 	idx = 0;
4541c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4542c6fd2807SJeff Garzik 		u32 addr, offset;
4543c6fd2807SJeff Garzik 		u32 sg_len, len;
4544c6fd2807SJeff Garzik 
4545c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4546c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4547c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4548c6fd2807SJeff Garzik 		 */
4549c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4550c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4551c6fd2807SJeff Garzik 
4552c6fd2807SJeff Garzik 		while (sg_len) {
4553c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4554c6fd2807SJeff Garzik 			len = sg_len;
4555c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4556c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4557c6fd2807SJeff Garzik 
4558c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4559c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4560c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4561c6fd2807SJeff Garzik 
4562c6fd2807SJeff Garzik 			idx++;
4563c6fd2807SJeff Garzik 			sg_len -= len;
4564c6fd2807SJeff Garzik 			addr += len;
4565c6fd2807SJeff Garzik 		}
4566c6fd2807SJeff Garzik 	}
4567c6fd2807SJeff Garzik 
4568c6fd2807SJeff Garzik 	if (idx)
4569c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4570c6fd2807SJeff Garzik }
4571b9a4197eSTejun Heo 
4572c6fd2807SJeff Garzik /**
4573d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4574d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4575d26fc955SAlan Cox  *
4576d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4577d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4578d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4579d26fc955SAlan Cox  *	controllers that don't follow the spec.
4580d26fc955SAlan Cox  *
4581d26fc955SAlan Cox  *	LOCKING:
4582d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4583d26fc955SAlan Cox  *
4584d26fc955SAlan Cox  */
4585d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4586d26fc955SAlan Cox {
4587d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4588d26fc955SAlan Cox 	struct scatterlist *sg;
4589d26fc955SAlan Cox 	unsigned int idx;
4590d26fc955SAlan Cox 
4591d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4592d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4593d26fc955SAlan Cox 
4594d26fc955SAlan Cox 	idx = 0;
4595d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4596d26fc955SAlan Cox 		u32 addr, offset;
4597d26fc955SAlan Cox 		u32 sg_len, len, blen;
4598d26fc955SAlan Cox 
4599d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4600d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4601d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4602d26fc955SAlan Cox 		 */
4603d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4604d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4605d26fc955SAlan Cox 
4606d26fc955SAlan Cox 		while (sg_len) {
4607d26fc955SAlan Cox 			offset = addr & 0xffff;
4608d26fc955SAlan Cox 			len = sg_len;
4609d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4610d26fc955SAlan Cox 				len = 0x10000 - offset;
4611d26fc955SAlan Cox 
4612d26fc955SAlan Cox 			blen = len & 0xffff;
4613d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4614d26fc955SAlan Cox 			if (blen == 0) {
4615d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4616d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4617d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4618d26fc955SAlan Cox 				blen = 0x8000;
4619d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4620d26fc955SAlan Cox 			}
4621d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4622d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4623d26fc955SAlan Cox 
4624d26fc955SAlan Cox 			idx++;
4625d26fc955SAlan Cox 			sg_len -= len;
4626d26fc955SAlan Cox 			addr += len;
4627d26fc955SAlan Cox 		}
4628d26fc955SAlan Cox 	}
4629d26fc955SAlan Cox 
4630d26fc955SAlan Cox 	if (idx)
4631d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4632d26fc955SAlan Cox }
4633d26fc955SAlan Cox 
4634d26fc955SAlan Cox /**
4635c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4636c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4637c6fd2807SJeff Garzik  *
4638c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4639c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4640c6fd2807SJeff Garzik  *	supplied PACKET command.
4641c6fd2807SJeff Garzik  *
4642c6fd2807SJeff Garzik  *	LOCKING:
4643cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4644c6fd2807SJeff Garzik  *
4645c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4646c6fd2807SJeff Garzik  *               nonzero otherwise
4647c6fd2807SJeff Garzik  */
4648c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4649c6fd2807SJeff Garzik {
4650c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4651c6fd2807SJeff Garzik 
4652b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4653b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4654b9a4197eSTejun Heo 	 */
4655b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
46566f23a31dSAlbert Lee 		return 1;
46576f23a31dSAlbert Lee 
4658c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4659b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4660c6fd2807SJeff Garzik 
4661b9a4197eSTejun Heo 	return 0;
4662c6fd2807SJeff Garzik }
4663b9a4197eSTejun Heo 
4664c6fd2807SJeff Garzik /**
4665140b5e59STejun Heo  *	atapi_qc_may_overflow - Check whether data transfer may overflow
4666140b5e59STejun Heo  *	@qc: ATA command in question
4667140b5e59STejun Heo  *
4668140b5e59STejun Heo  *	ATAPI commands which transfer variable length data to host
4669140b5e59STejun Heo  *	might overflow due to application error or hardare bug.  This
4670140b5e59STejun Heo  *	function checks whether overflow should be drained and ignored
4671140b5e59STejun Heo  *	for @qc.
4672140b5e59STejun Heo  *
4673140b5e59STejun Heo  *	LOCKING:
4674140b5e59STejun Heo  *	None.
4675140b5e59STejun Heo  *
4676140b5e59STejun Heo  *	RETURNS:
4677140b5e59STejun Heo  *	1 if @qc may overflow; otherwise, 0.
4678140b5e59STejun Heo  */
4679140b5e59STejun Heo static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4680140b5e59STejun Heo {
46810dc36888STejun Heo 	if (qc->tf.protocol != ATAPI_PROT_PIO &&
46820dc36888STejun Heo 	    qc->tf.protocol != ATAPI_PROT_DMA)
4683140b5e59STejun Heo 		return 0;
4684140b5e59STejun Heo 
4685140b5e59STejun Heo 	if (qc->tf.flags & ATA_TFLAG_WRITE)
4686140b5e59STejun Heo 		return 0;
4687140b5e59STejun Heo 
4688140b5e59STejun Heo 	switch (qc->cdb[0]) {
4689140b5e59STejun Heo 	case READ_10:
4690140b5e59STejun Heo 	case READ_12:
4691140b5e59STejun Heo 	case WRITE_10:
4692140b5e59STejun Heo 	case WRITE_12:
4693140b5e59STejun Heo 	case GPCMD_READ_CD:
4694140b5e59STejun Heo 	case GPCMD_READ_CD_MSF:
4695140b5e59STejun Heo 		return 0;
4696140b5e59STejun Heo 	}
4697140b5e59STejun Heo 
4698140b5e59STejun Heo 	return 1;
4699140b5e59STejun Heo }
4700140b5e59STejun Heo 
4701140b5e59STejun Heo /**
470231cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
470331cc23b3STejun Heo  *	@qc: ATA command in question
470431cc23b3STejun Heo  *
470531cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
470631cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
470731cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
470831cc23b3STejun Heo  *	whether a new command @qc can be issued.
470931cc23b3STejun Heo  *
471031cc23b3STejun Heo  *	LOCKING:
471131cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
471231cc23b3STejun Heo  *
471331cc23b3STejun Heo  *	RETURNS:
471431cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
471531cc23b3STejun Heo  */
471631cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
471731cc23b3STejun Heo {
471831cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
471931cc23b3STejun Heo 
472031cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
472131cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
472231cc23b3STejun Heo 			return 0;
472331cc23b3STejun Heo 	} else {
472431cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
472531cc23b3STejun Heo 			return 0;
472631cc23b3STejun Heo 	}
472731cc23b3STejun Heo 
472831cc23b3STejun Heo 	return ATA_DEFER_LINK;
472931cc23b3STejun Heo }
473031cc23b3STejun Heo 
473131cc23b3STejun Heo /**
4732c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4733c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4734c6fd2807SJeff Garzik  *
4735c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4736c6fd2807SJeff Garzik  *
4737c6fd2807SJeff Garzik  *	LOCKING:
4738cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4739c6fd2807SJeff Garzik  */
4740c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4741c6fd2807SJeff Garzik {
4742c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4743c6fd2807SJeff Garzik 		return;
4744c6fd2807SJeff Garzik 
4745c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4746c6fd2807SJeff Garzik }
4747c6fd2807SJeff Garzik 
4748d26fc955SAlan Cox /**
4749d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4750d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4751d26fc955SAlan Cox  *
4752d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4753d26fc955SAlan Cox  *
4754d26fc955SAlan Cox  *	LOCKING:
4755d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4756d26fc955SAlan Cox  */
4757d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4758d26fc955SAlan Cox {
4759d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4760d26fc955SAlan Cox 		return;
4761d26fc955SAlan Cox 
4762d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4763d26fc955SAlan Cox }
4764d26fc955SAlan Cox 
4765c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4766c6fd2807SJeff Garzik 
4767c6fd2807SJeff Garzik /**
4768c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4769c6fd2807SJeff Garzik  *	@qc: Command to be associated
4770c6fd2807SJeff Garzik  *	@buf: Memory buffer
4771c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4772c6fd2807SJeff Garzik  *
4773c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4774c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4775c6fd2807SJeff Garzik  *
4776c6fd2807SJeff Garzik  *	LOCKING:
4777cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4778c6fd2807SJeff Garzik  */
4779c6fd2807SJeff Garzik 
4780c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4781c6fd2807SJeff Garzik {
4782c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4783c6fd2807SJeff Garzik 
4784c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4785c6fd2807SJeff Garzik 	qc->n_elem = 1;
4786c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4787c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4788c6fd2807SJeff Garzik 	qc->nbytes = buflen;
478987260216SJens Axboe 	qc->cursg = qc->__sg;
4790c6fd2807SJeff Garzik 
479161c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4792c6fd2807SJeff Garzik }
4793c6fd2807SJeff Garzik 
4794c6fd2807SJeff Garzik /**
4795c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4796c6fd2807SJeff Garzik  *	@qc: Command to be associated
4797c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4798c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4799c6fd2807SJeff Garzik  *
4800c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4801c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4802c6fd2807SJeff Garzik  *	elements.
4803c6fd2807SJeff Garzik  *
4804c6fd2807SJeff Garzik  *	LOCKING:
4805cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4806c6fd2807SJeff Garzik  */
4807c6fd2807SJeff Garzik 
4808c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4809c6fd2807SJeff Garzik 		 unsigned int n_elem)
4810c6fd2807SJeff Garzik {
4811c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4812c6fd2807SJeff Garzik 	qc->__sg = sg;
4813c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4814c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
481587260216SJens Axboe 	qc->cursg = qc->__sg;
4816c6fd2807SJeff Garzik }
4817c6fd2807SJeff Garzik 
4818c6fd2807SJeff Garzik /**
4819c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4820c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4821c6fd2807SJeff Garzik  *
4822c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4823c6fd2807SJeff Garzik  *
4824c6fd2807SJeff Garzik  *	LOCKING:
4825cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4826c6fd2807SJeff Garzik  *
4827c6fd2807SJeff Garzik  *	RETURNS:
4828c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4829c6fd2807SJeff Garzik  */
4830c6fd2807SJeff Garzik 
4831c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4832c6fd2807SJeff Garzik {
4833c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4834c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4835c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4836c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4837c6fd2807SJeff Garzik 	int trim_sg = 0;
4838c6fd2807SJeff Garzik 
4839c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4840c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4841c6fd2807SJeff Garzik 	if (qc->pad_len) {
4842c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4843c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4844c6fd2807SJeff Garzik 
4845c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4846c6fd2807SJeff Garzik 
4847c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4848c6fd2807SJeff Garzik 
4849c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4850c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4851c6fd2807SJeff Garzik 			       qc->pad_len);
4852c6fd2807SJeff Garzik 
4853c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4854c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4855c6fd2807SJeff Garzik 		/* trim sg */
4856c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4857c6fd2807SJeff Garzik 		if (sg->length == 0)
4858c6fd2807SJeff Garzik 			trim_sg = 1;
4859c6fd2807SJeff Garzik 
4860c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4861c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4862c6fd2807SJeff Garzik 	}
4863c6fd2807SJeff Garzik 
4864c6fd2807SJeff Garzik 	if (trim_sg) {
4865c6fd2807SJeff Garzik 		qc->n_elem--;
4866c6fd2807SJeff Garzik 		goto skip_map;
4867c6fd2807SJeff Garzik 	}
4868c6fd2807SJeff Garzik 
4869c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4870c6fd2807SJeff Garzik 				     sg->length, dir);
4871c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4872c6fd2807SJeff Garzik 		/* restore sg */
4873c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4874c6fd2807SJeff Garzik 		return -1;
4875c6fd2807SJeff Garzik 	}
4876c6fd2807SJeff Garzik 
4877c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4878c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4879c6fd2807SJeff Garzik 
4880c6fd2807SJeff Garzik skip_map:
4881c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4882c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4883c6fd2807SJeff Garzik 
4884c6fd2807SJeff Garzik 	return 0;
4885c6fd2807SJeff Garzik }
4886c6fd2807SJeff Garzik 
4887c6fd2807SJeff Garzik /**
4888c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4889c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4890c6fd2807SJeff Garzik  *
4891c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4892c6fd2807SJeff Garzik  *
4893c6fd2807SJeff Garzik  *	LOCKING:
4894cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4895c6fd2807SJeff Garzik  *
4896c6fd2807SJeff Garzik  *	RETURNS:
4897c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4898c6fd2807SJeff Garzik  *
4899c6fd2807SJeff Garzik  */
4900c6fd2807SJeff Garzik 
4901c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4902c6fd2807SJeff Garzik {
4903c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4904c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
490587260216SJens Axboe 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4906c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4907c6fd2807SJeff Garzik 
490844877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4909c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4910c6fd2807SJeff Garzik 
4911c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4912c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4913c6fd2807SJeff Garzik 	if (qc->pad_len) {
4914c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4915c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4916c6fd2807SJeff Garzik 		unsigned int offset;
4917c6fd2807SJeff Garzik 
4918c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4919c6fd2807SJeff Garzik 
4920c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4921c6fd2807SJeff Garzik 
4922c6fd2807SJeff Garzik 		/*
4923c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4924c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4925c6fd2807SJeff Garzik 		 */
4926c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4927acd054a5SAnton Blanchard 		sg_init_table(psg, 1);
4928642f1490SJens Axboe 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4929642f1490SJens Axboe 				qc->pad_len, offset_in_page(offset));
4930c6fd2807SJeff Garzik 
4931c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
493245711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4933c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4934c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4935c6fd2807SJeff Garzik 		}
4936c6fd2807SJeff Garzik 
4937c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4938c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4939c6fd2807SJeff Garzik 		/* trim last sg */
4940c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4941c6fd2807SJeff Garzik 		if (lsg->length == 0)
4942c6fd2807SJeff Garzik 			trim_sg = 1;
4943c6fd2807SJeff Garzik 
4944c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4945c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4946c6fd2807SJeff Garzik 	}
4947c6fd2807SJeff Garzik 
4948c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4949c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4950c6fd2807SJeff Garzik 		pre_n_elem--;
4951c6fd2807SJeff Garzik 
4952c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4953c6fd2807SJeff Garzik 		n_elem = 0;
4954c6fd2807SJeff Garzik 		goto skip_map;
4955c6fd2807SJeff Garzik 	}
4956c6fd2807SJeff Garzik 
4957c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4958c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4959c6fd2807SJeff Garzik 	if (n_elem < 1) {
4960c6fd2807SJeff Garzik 		/* restore last sg */
4961c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4962c6fd2807SJeff Garzik 		return -1;
4963c6fd2807SJeff Garzik 	}
4964c6fd2807SJeff Garzik 
4965c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4966c6fd2807SJeff Garzik 
4967c6fd2807SJeff Garzik skip_map:
4968c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4969c6fd2807SJeff Garzik 
4970c6fd2807SJeff Garzik 	return 0;
4971c6fd2807SJeff Garzik }
4972c6fd2807SJeff Garzik 
4973c6fd2807SJeff Garzik /**
4974c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4975c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4976c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4977c6fd2807SJeff Garzik  *
4978c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4979c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4980c6fd2807SJeff Garzik  *	vice-versa.
4981c6fd2807SJeff Garzik  *
4982c6fd2807SJeff Garzik  *	LOCKING:
4983c6fd2807SJeff Garzik  *	Inherited from caller.
4984c6fd2807SJeff Garzik  */
4985c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4986c6fd2807SJeff Garzik {
4987c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4988c6fd2807SJeff Garzik 	unsigned int i;
4989c6fd2807SJeff Garzik 
4990c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4991c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4992c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4993c6fd2807SJeff Garzik }
4994c6fd2807SJeff Garzik 
4995c6fd2807SJeff Garzik /**
49960d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
499755dba312STejun Heo  *	@dev: device to target
4998c6fd2807SJeff Garzik  *	@buf: data buffer
4999c6fd2807SJeff Garzik  *	@buflen: buffer length
5000c6fd2807SJeff Garzik  *	@write_data: read/write
5001c6fd2807SJeff Garzik  *
5002c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
5003c6fd2807SJeff Garzik  *
5004c6fd2807SJeff Garzik  *	LOCKING:
5005c6fd2807SJeff Garzik  *	Inherited from caller.
500655dba312STejun Heo  *
500755dba312STejun Heo  *	RETURNS:
500855dba312STejun Heo  *	Bytes consumed.
5009c6fd2807SJeff Garzik  */
501055dba312STejun Heo unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
501155dba312STejun Heo 			   unsigned int buflen, int rw)
5012c6fd2807SJeff Garzik {
501355dba312STejun Heo 	struct ata_port *ap = dev->link->ap;
501455dba312STejun Heo 	void __iomem *data_addr = ap->ioaddr.data_addr;
5015c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
5016c6fd2807SJeff Garzik 
5017c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
501855dba312STejun Heo 	if (rw == READ)
501955dba312STejun Heo 		ioread16_rep(data_addr, buf, words);
5020c6fd2807SJeff Garzik 	else
502155dba312STejun Heo 		iowrite16_rep(data_addr, buf, words);
5022c6fd2807SJeff Garzik 
5023c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
5024c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
5025c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
5026c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
5027c6fd2807SJeff Garzik 
502855dba312STejun Heo 		if (rw == READ) {
502955dba312STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(data_addr));
5030c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
503155dba312STejun Heo 		} else {
503255dba312STejun Heo 			memcpy(align_buf, trailing_buf, 1);
503355dba312STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
5034c6fd2807SJeff Garzik 		}
503555dba312STejun Heo 		words++;
5036c6fd2807SJeff Garzik 	}
503755dba312STejun Heo 
503855dba312STejun Heo 	return words << 1;
5039c6fd2807SJeff Garzik }
5040c6fd2807SJeff Garzik 
5041c6fd2807SJeff Garzik /**
50420d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
504355dba312STejun Heo  *	@dev: device to target
5044c6fd2807SJeff Garzik  *	@buf: data buffer
5045c6fd2807SJeff Garzik  *	@buflen: buffer length
5046c6fd2807SJeff Garzik  *	@write_data: read/write
5047c6fd2807SJeff Garzik  *
5048c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
5049c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
5050c6fd2807SJeff Garzik  *
5051c6fd2807SJeff Garzik  *	LOCKING:
5052c6fd2807SJeff Garzik  *	Inherited from caller.
505355dba312STejun Heo  *
505455dba312STejun Heo  *	RETURNS:
505555dba312STejun Heo  *	Bytes consumed.
5056c6fd2807SJeff Garzik  */
505755dba312STejun Heo unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
505855dba312STejun Heo 				 unsigned int buflen, int rw)
5059c6fd2807SJeff Garzik {
5060c6fd2807SJeff Garzik 	unsigned long flags;
506155dba312STejun Heo 	unsigned int consumed;
506255dba312STejun Heo 
5063c6fd2807SJeff Garzik 	local_irq_save(flags);
506455dba312STejun Heo 	consumed = ata_data_xfer(dev, buf, buflen, rw);
5065c6fd2807SJeff Garzik 	local_irq_restore(flags);
506655dba312STejun Heo 
506755dba312STejun Heo 	return consumed;
5068c6fd2807SJeff Garzik }
5069c6fd2807SJeff Garzik 
5070c6fd2807SJeff Garzik 
5071c6fd2807SJeff Garzik /**
50725a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
5073c6fd2807SJeff Garzik  *	@qc: Command on going
5074c6fd2807SJeff Garzik  *
50755a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
5076c6fd2807SJeff Garzik  *
5077c6fd2807SJeff Garzik  *	LOCKING:
5078c6fd2807SJeff Garzik  *	Inherited from caller.
5079c6fd2807SJeff Garzik  */
5080c6fd2807SJeff Garzik 
5081c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
5082c6fd2807SJeff Garzik {
5083c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5084c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5085c6fd2807SJeff Garzik 	struct page *page;
5086c6fd2807SJeff Garzik 	unsigned int offset;
5087c6fd2807SJeff Garzik 	unsigned char *buf;
5088c6fd2807SJeff Garzik 
50895a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
5090c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5091c6fd2807SJeff Garzik 
509245711f1aSJens Axboe 	page = sg_page(qc->cursg);
509387260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
5094c6fd2807SJeff Garzik 
5095c6fd2807SJeff Garzik 	/* get the current page and offset */
5096c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5097c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5098c6fd2807SJeff Garzik 
5099c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5100c6fd2807SJeff Garzik 
5101c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5102c6fd2807SJeff Garzik 		unsigned long flags;
5103c6fd2807SJeff Garzik 
5104c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
5105c6fd2807SJeff Garzik 		local_irq_save(flags);
5106c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5107c6fd2807SJeff Garzik 
5108c6fd2807SJeff Garzik 		/* do the actual data transfer */
51095a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5110c6fd2807SJeff Garzik 
5111c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5112c6fd2807SJeff Garzik 		local_irq_restore(flags);
5113c6fd2807SJeff Garzik 	} else {
5114c6fd2807SJeff Garzik 		buf = page_address(page);
51155a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5116c6fd2807SJeff Garzik 	}
5117c6fd2807SJeff Garzik 
51185a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
51195a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
5120c6fd2807SJeff Garzik 
512187260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
512287260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5123c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5124c6fd2807SJeff Garzik 	}
5125c6fd2807SJeff Garzik }
5126c6fd2807SJeff Garzik 
5127c6fd2807SJeff Garzik /**
51285a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
5129c6fd2807SJeff Garzik  *	@qc: Command on going
5130c6fd2807SJeff Garzik  *
51315a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
5132c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
5133c6fd2807SJeff Garzik  *
5134c6fd2807SJeff Garzik  *	LOCKING:
5135c6fd2807SJeff Garzik  *	Inherited from caller.
5136c6fd2807SJeff Garzik  */
5137c6fd2807SJeff Garzik 
5138c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
5139c6fd2807SJeff Garzik {
5140c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
5141c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
5142c6fd2807SJeff Garzik 		unsigned int nsect;
5143c6fd2807SJeff Garzik 
5144c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
5145c6fd2807SJeff Garzik 
51465a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5147726f0785STejun Heo 			    qc->dev->multi_count);
5148c6fd2807SJeff Garzik 		while (nsect--)
5149c6fd2807SJeff Garzik 			ata_pio_sector(qc);
5150c6fd2807SJeff Garzik 	} else
5151c6fd2807SJeff Garzik 		ata_pio_sector(qc);
51524cc980b3SAlbert Lee 
51534cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
5154c6fd2807SJeff Garzik }
5155c6fd2807SJeff Garzik 
5156c6fd2807SJeff Garzik /**
5157c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
5158c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
5159c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
5160c6fd2807SJeff Garzik  *
5161c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
5162c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
5163c6fd2807SJeff Garzik  *
5164c6fd2807SJeff Garzik  *	LOCKING:
5165c6fd2807SJeff Garzik  *	caller.
5166c6fd2807SJeff Garzik  */
5167c6fd2807SJeff Garzik 
5168c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5169c6fd2807SJeff Garzik {
5170c6fd2807SJeff Garzik 	/* send SCSI cdb */
5171c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
5172c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
5173c6fd2807SJeff Garzik 
5174c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5175c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
5176c6fd2807SJeff Garzik 
5177c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
51780dc36888STejun Heo 	case ATAPI_PROT_PIO:
5179c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
5180c6fd2807SJeff Garzik 		break;
51810dc36888STejun Heo 	case ATAPI_PROT_NODATA:
5182c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5183c6fd2807SJeff Garzik 		break;
51840dc36888STejun Heo 	case ATAPI_PROT_DMA:
5185c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5186c6fd2807SJeff Garzik 		/* initiate bmdma */
5187c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
5188c6fd2807SJeff Garzik 		break;
5189c6fd2807SJeff Garzik 	}
5190c6fd2807SJeff Garzik }
5191c6fd2807SJeff Garzik 
5192c6fd2807SJeff Garzik /**
5193c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
5194c6fd2807SJeff Garzik  *	@qc: Command on going
5195c6fd2807SJeff Garzik  *	@bytes: number of bytes
5196c6fd2807SJeff Garzik  *
5197c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5198c6fd2807SJeff Garzik  *
5199c6fd2807SJeff Garzik  *	LOCKING:
5200c6fd2807SJeff Garzik  *	Inherited from caller.
5201c6fd2807SJeff Garzik  *
5202c6fd2807SJeff Garzik  */
5203140b5e59STejun Heo static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5204c6fd2807SJeff Garzik {
5205c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5206c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5207140b5e59STejun Heo 	struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5208140b5e59STejun Heo 	struct scatterlist *sg;
5209c6fd2807SJeff Garzik 	struct page *page;
5210c6fd2807SJeff Garzik 	unsigned char *buf;
5211c6fd2807SJeff Garzik 	unsigned int offset, count;
5212c6fd2807SJeff Garzik 
5213c6fd2807SJeff Garzik next_sg:
5214140b5e59STejun Heo 	sg = qc->cursg;
5215140b5e59STejun Heo 	if (unlikely(!sg)) {
5216c6fd2807SJeff Garzik 		/*
5217c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
5218c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
5219c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
5220c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
5221c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
5222c6fd2807SJeff Garzik 		 */
5223c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
5224c6fd2807SJeff Garzik 		unsigned int i;
5225c6fd2807SJeff Garzik 
5226140b5e59STejun Heo 		if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5227140b5e59STejun Heo 			ata_ehi_push_desc(ehi, "too much trailing data "
5228140b5e59STejun Heo 					  "buf=%u cur=%u bytes=%u",
5229140b5e59STejun Heo 					  qc->nbytes, qc->curbytes, bytes);
5230140b5e59STejun Heo 			return -1;
5231c6fd2807SJeff Garzik 		}
5232c6fd2807SJeff Garzik 
5233140b5e59STejun Heo 		 /* overflow is exptected for misc ATAPI commands */
5234140b5e59STejun Heo 		if (bytes && !atapi_qc_may_overflow(qc))
5235140b5e59STejun Heo 			ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5236140b5e59STejun Heo 				       "trailing data (cdb=%02x nbytes=%u)\n",
5237140b5e59STejun Heo 				       bytes, qc->cdb[0], qc->nbytes);
5238140b5e59STejun Heo 
5239140b5e59STejun Heo 		for (i = 0; i < (bytes + 1) / 2; i++)
5240140b5e59STejun Heo 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5241140b5e59STejun Heo 
5242140b5e59STejun Heo 		qc->curbytes += bytes;
5243140b5e59STejun Heo 
5244140b5e59STejun Heo 		return 0;
5245140b5e59STejun Heo 	}
5246c6fd2807SJeff Garzik 
524745711f1aSJens Axboe 	page = sg_page(sg);
5248c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5249c6fd2807SJeff Garzik 
5250c6fd2807SJeff Garzik 	/* get the current page and offset */
5251c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5252c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5253c6fd2807SJeff Garzik 
5254c6fd2807SJeff Garzik 	/* don't overrun current sg */
5255c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5256c6fd2807SJeff Garzik 
5257c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5258c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5259c6fd2807SJeff Garzik 
5260c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5263c6fd2807SJeff Garzik 		unsigned long flags;
5264c6fd2807SJeff Garzik 
5265c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5266c6fd2807SJeff Garzik 		local_irq_save(flags);
5267c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5268c6fd2807SJeff Garzik 
5269c6fd2807SJeff Garzik 		/* do the actual data transfer */
5270c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5271c6fd2807SJeff Garzik 
5272c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5273c6fd2807SJeff Garzik 		local_irq_restore(flags);
5274c6fd2807SJeff Garzik 	} else {
5275c6fd2807SJeff Garzik 		buf = page_address(page);
5276c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5277c6fd2807SJeff Garzik 	}
5278c6fd2807SJeff Garzik 
5279c6fd2807SJeff Garzik 	bytes -= count;
5280140b5e59STejun Heo 	if ((count & 1) && bytes)
5281140b5e59STejun Heo 		bytes--;
5282c6fd2807SJeff Garzik 	qc->curbytes += count;
5283c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5284c6fd2807SJeff Garzik 
5285c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
528687260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5287c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5288c6fd2807SJeff Garzik 	}
5289c6fd2807SJeff Garzik 
5290c6fd2807SJeff Garzik 	if (bytes)
5291c6fd2807SJeff Garzik 		goto next_sg;
5292140b5e59STejun Heo 
5293140b5e59STejun Heo 	return 0;
5294c6fd2807SJeff Garzik }
5295c6fd2807SJeff Garzik 
5296c6fd2807SJeff Garzik /**
5297c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5298c6fd2807SJeff Garzik  *	@qc: Command on going
5299c6fd2807SJeff Garzik  *
5300c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5301c6fd2807SJeff Garzik  *
5302c6fd2807SJeff Garzik  *	LOCKING:
5303c6fd2807SJeff Garzik  *	Inherited from caller.
5304c6fd2807SJeff Garzik  */
5305c6fd2807SJeff Garzik 
5306c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5307c6fd2807SJeff Garzik {
5308c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5309c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5310c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5311c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5312c6fd2807SJeff Garzik 
5313c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5314c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5315c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5316c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5317c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5318c6fd2807SJeff Garzik 	 */
5319c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5320c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5321c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5322c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5323c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5324c6fd2807SJeff Garzik 
5325c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
53260106372dSAlbert Lee 	if (unlikely(ireason & (1 << 0)))
5327c6fd2807SJeff Garzik 		goto err_out;
5328c6fd2807SJeff Garzik 
5329c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5330c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
53310106372dSAlbert Lee 	if (unlikely(do_write != i_write))
53320106372dSAlbert Lee 		goto err_out;
53330106372dSAlbert Lee 
53340106372dSAlbert Lee 	if (unlikely(!bytes))
5335c6fd2807SJeff Garzik 		goto err_out;
5336c6fd2807SJeff Garzik 
533744877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5338c6fd2807SJeff Garzik 
5339140b5e59STejun Heo 	if (__atapi_pio_bytes(qc, bytes))
5340140b5e59STejun Heo 		goto err_out;
53414cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5342c6fd2807SJeff Garzik 
5343c6fd2807SJeff Garzik 	return;
5344c6fd2807SJeff Garzik 
5345c6fd2807SJeff Garzik err_out:
5346c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5347c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5348c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5349c6fd2807SJeff Garzik }
5350c6fd2807SJeff Garzik 
5351c6fd2807SJeff Garzik /**
5352c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5353c6fd2807SJeff Garzik  *	@ap: the target ata_port
5354c6fd2807SJeff Garzik  *	@qc: qc on going
5355c6fd2807SJeff Garzik  *
5356c6fd2807SJeff Garzik  *	RETURNS:
5357c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5358c6fd2807SJeff Garzik  */
5359c6fd2807SJeff Garzik 
5360c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5361c6fd2807SJeff Garzik {
5362c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5363c6fd2807SJeff Garzik 		return 1;
5364c6fd2807SJeff Garzik 
5365c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5366c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5367c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5368c6fd2807SJeff Garzik 		    return 1;
5369c6fd2807SJeff Garzik 
5370405e66b3STejun Heo 		if (ata_is_atapi(qc->tf.protocol) &&
5371c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5372c6fd2807SJeff Garzik 			return 1;
5373c6fd2807SJeff Garzik 	}
5374c6fd2807SJeff Garzik 
5375c6fd2807SJeff Garzik 	return 0;
5376c6fd2807SJeff Garzik }
5377c6fd2807SJeff Garzik 
5378c6fd2807SJeff Garzik /**
5379c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5380c6fd2807SJeff Garzik  *	@qc: Command to complete
5381c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5382c6fd2807SJeff Garzik  *
5383c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5384c6fd2807SJeff Garzik  *
5385c6fd2807SJeff Garzik  *	LOCKING:
5386cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5387c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5388c6fd2807SJeff Garzik  */
5389c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5390c6fd2807SJeff Garzik {
5391c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5392c6fd2807SJeff Garzik 	unsigned long flags;
5393c6fd2807SJeff Garzik 
5394c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5395c6fd2807SJeff Garzik 		if (in_wq) {
5396c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5397c6fd2807SJeff Garzik 
5398cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5399cca3974eSJeff Garzik 			 * released.
5400c6fd2807SJeff Garzik 			 */
5401c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5402c6fd2807SJeff Garzik 			if (qc) {
5403c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
540483625006SAkira Iguchi 					ap->ops->irq_on(ap);
5405c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5406c6fd2807SJeff Garzik 				} else
5407c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5408c6fd2807SJeff Garzik 			}
5409c6fd2807SJeff Garzik 
5410c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5411c6fd2807SJeff Garzik 		} else {
5412c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5413c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5414c6fd2807SJeff Garzik 			else
5415c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5416c6fd2807SJeff Garzik 		}
5417c6fd2807SJeff Garzik 	} else {
5418c6fd2807SJeff Garzik 		if (in_wq) {
5419c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
542083625006SAkira Iguchi 			ap->ops->irq_on(ap);
5421c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5422c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5423c6fd2807SJeff Garzik 		} else
5424c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5425c6fd2807SJeff Garzik 	}
5426c6fd2807SJeff Garzik }
5427c6fd2807SJeff Garzik 
5428c6fd2807SJeff Garzik /**
5429c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5430c6fd2807SJeff Garzik  *	@ap: the target ata_port
5431c6fd2807SJeff Garzik  *	@qc: qc on going
5432c6fd2807SJeff Garzik  *	@status: current device status
5433c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5434c6fd2807SJeff Garzik  *
5435c6fd2807SJeff Garzik  *	RETURNS:
5436c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5437c6fd2807SJeff Garzik  */
5438c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5439c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5440c6fd2807SJeff Garzik {
5441c6fd2807SJeff Garzik 	unsigned long flags = 0;
5442c6fd2807SJeff Garzik 	int poll_next;
5443c6fd2807SJeff Garzik 
5444c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5445c6fd2807SJeff Garzik 
5446c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5447c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5448c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5449c6fd2807SJeff Garzik 	 */
5450c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5451c6fd2807SJeff Garzik 
5452c6fd2807SJeff Garzik fsm_start:
5453c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
545444877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5455c6fd2807SJeff Garzik 
5456c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5457c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5458c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5459c6fd2807SJeff Garzik 
5460c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5461c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5462c6fd2807SJeff Garzik 		 * takes over after sending the data.
5463c6fd2807SJeff Garzik 		 */
5464c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5465c6fd2807SJeff Garzik 
5466c6fd2807SJeff Garzik 		/* check device status */
5467c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5468c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5469c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5470c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5471c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5472c6fd2807SJeff Garzik 			else
5473c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5474c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5475c6fd2807SJeff Garzik 
5476c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5477c6fd2807SJeff Garzik 			goto fsm_start;
5478c6fd2807SJeff Garzik 		}
5479c6fd2807SJeff Garzik 
5480c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5481c6fd2807SJeff Garzik 		 * when it finds something wrong.
5482c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5483c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5484c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5485c6fd2807SJeff Garzik 		 */
5486c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
54872d3b8eeaSAlbert Lee 			/* Some ATAPI tape drives forget to clear the ERR bit
54882d3b8eeaSAlbert Lee 			 * when doing the next command (mostly request sense).
54892d3b8eeaSAlbert Lee 			 * We ignore ERR here to workaround and proceed sending
54902d3b8eeaSAlbert Lee 			 * the CDB.
54912d3b8eeaSAlbert Lee 			 */
54922d3b8eeaSAlbert Lee 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
54932d3b8eeaSAlbert Lee 				ata_port_printk(ap, KERN_WARNING,
54942d3b8eeaSAlbert Lee 						"DRQ=1 with device error, "
54952d3b8eeaSAlbert Lee 						"dev_stat 0x%X\n", status);
5496c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5497c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5498c6fd2807SJeff Garzik 				goto fsm_start;
5499c6fd2807SJeff Garzik 			}
55002d3b8eeaSAlbert Lee 		}
5501c6fd2807SJeff Garzik 
5502c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5503c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5504c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5505c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5506c6fd2807SJeff Garzik 		 */
5507c6fd2807SJeff Garzik 		if (in_wq)
5508c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5509c6fd2807SJeff Garzik 
5510c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5511c6fd2807SJeff Garzik 			/* PIO data out protocol.
5512c6fd2807SJeff Garzik 			 * send first data block.
5513c6fd2807SJeff Garzik 			 */
5514c6fd2807SJeff Garzik 
5515c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5516c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5517c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5518c6fd2807SJeff Garzik 			 */
5519c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5520c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5521c6fd2807SJeff Garzik 		} else
5522c6fd2807SJeff Garzik 			/* send CDB */
5523c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5524c6fd2807SJeff Garzik 
5525c6fd2807SJeff Garzik 		if (in_wq)
5526c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5527c6fd2807SJeff Garzik 
5528c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5529c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5530c6fd2807SJeff Garzik 		 */
5531c6fd2807SJeff Garzik 		break;
5532c6fd2807SJeff Garzik 
5533c6fd2807SJeff Garzik 	case HSM_ST:
5534c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
55350dc36888STejun Heo 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
5536c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5537c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5538c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5539c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5540c6fd2807SJeff Garzik 				 */
5541c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5542c6fd2807SJeff Garzik 				goto fsm_start;
5543c6fd2807SJeff Garzik 			}
5544c6fd2807SJeff Garzik 
5545c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5546c6fd2807SJeff Garzik 			 * when it finds something wrong.
5547c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5548c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5549c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5550c6fd2807SJeff Garzik 			 */
5551c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
555244877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
555344877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
555444877b4eSTejun Heo 						status);
5555c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5556c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5557c6fd2807SJeff Garzik 				goto fsm_start;
5558c6fd2807SJeff Garzik 			}
5559c6fd2807SJeff Garzik 
5560c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5561c6fd2807SJeff Garzik 
5562c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5563c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5564c6fd2807SJeff Garzik 				goto fsm_start;
5565c6fd2807SJeff Garzik 
5566c6fd2807SJeff Garzik 		} else {
5567c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5568c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5569c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5570c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5571c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5572c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5573c6fd2807SJeff Garzik 				else
557455a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
557555a8e2c8STejun Heo 					 * Phantom devices also trigger this
557655a8e2c8STejun Heo 					 * condition.  Mark hint.
557755a8e2c8STejun Heo 					 */
557855a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
557955a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5580c6fd2807SJeff Garzik 
5581c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5582c6fd2807SJeff Garzik 				goto fsm_start;
5583c6fd2807SJeff Garzik 			}
5584c6fd2807SJeff Garzik 
5585c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5586c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5587c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5588c6fd2807SJeff Garzik 			 * block of junk data before changing the
5589c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5590c6fd2807SJeff Garzik 			 *
5591c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5592c6fd2807SJeff Garzik 			 * sense since the data block has been
5593c6fd2807SJeff Garzik 			 * transferred to the device.
5594c6fd2807SJeff Garzik 			 */
5595c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5596c6fd2807SJeff Garzik 				/* data might be corrputed */
5597c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5598c6fd2807SJeff Garzik 
5599c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5600c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5601c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5602c6fd2807SJeff Garzik 				}
5603c6fd2807SJeff Garzik 
5604c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5605c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5606c6fd2807SJeff Garzik 
5607c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5608c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5609c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5610c6fd2807SJeff Garzik 				 */
5611c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5612c6fd2807SJeff Garzik 				goto fsm_start;
5613c6fd2807SJeff Garzik 			}
5614c6fd2807SJeff Garzik 
5615c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5616c6fd2807SJeff Garzik 
5617c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5618c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5619c6fd2807SJeff Garzik 				/* all data read */
5620c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5621c6fd2807SJeff Garzik 				goto fsm_start;
5622c6fd2807SJeff Garzik 			}
5623c6fd2807SJeff Garzik 		}
5624c6fd2807SJeff Garzik 
5625c6fd2807SJeff Garzik 		poll_next = 1;
5626c6fd2807SJeff Garzik 		break;
5627c6fd2807SJeff Garzik 
5628c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5629c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5630c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5631c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5632c6fd2807SJeff Garzik 			goto fsm_start;
5633c6fd2807SJeff Garzik 		}
5634c6fd2807SJeff Garzik 
5635c6fd2807SJeff Garzik 		/* no more data to transfer */
5636c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
563744877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5638c6fd2807SJeff Garzik 
5639c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5640c6fd2807SJeff Garzik 
5641c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5642c6fd2807SJeff Garzik 
5643c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5644c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5645c6fd2807SJeff Garzik 
5646c6fd2807SJeff Garzik 		poll_next = 0;
5647c6fd2807SJeff Garzik 		break;
5648c6fd2807SJeff Garzik 
5649c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5650c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5651c6fd2807SJeff Garzik 		 * know what's wrong and recover
5652c6fd2807SJeff Garzik 		 */
5653c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5654c6fd2807SJeff Garzik 
5655c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5656c6fd2807SJeff Garzik 
5657c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5658c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5659c6fd2807SJeff Garzik 
5660c6fd2807SJeff Garzik 		poll_next = 0;
5661c6fd2807SJeff Garzik 		break;
5662c6fd2807SJeff Garzik 	default:
5663c6fd2807SJeff Garzik 		poll_next = 0;
5664c6fd2807SJeff Garzik 		BUG();
5665c6fd2807SJeff Garzik 	}
5666c6fd2807SJeff Garzik 
5667c6fd2807SJeff Garzik 	return poll_next;
5668c6fd2807SJeff Garzik }
5669c6fd2807SJeff Garzik 
567065f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5671c6fd2807SJeff Garzik {
567265f27f38SDavid Howells 	struct ata_port *ap =
567365f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
567465f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5675c6fd2807SJeff Garzik 	u8 status;
5676c6fd2807SJeff Garzik 	int poll_next;
5677c6fd2807SJeff Garzik 
5678c6fd2807SJeff Garzik fsm_start:
5679c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5680c6fd2807SJeff Garzik 
5681c6fd2807SJeff Garzik 	/*
5682c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5683c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5684c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5685c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5686c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5687c6fd2807SJeff Garzik 	 */
5688c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5689c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5690c6fd2807SJeff Garzik 		msleep(2);
5691c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5692c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5693c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5694c6fd2807SJeff Garzik 			return;
5695c6fd2807SJeff Garzik 		}
5696c6fd2807SJeff Garzik 	}
5697c6fd2807SJeff Garzik 
5698c6fd2807SJeff Garzik 	/* move the HSM */
5699c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5700c6fd2807SJeff Garzik 
5701c6fd2807SJeff Garzik 	/* another command or interrupt handler
5702c6fd2807SJeff Garzik 	 * may be running at this point.
5703c6fd2807SJeff Garzik 	 */
5704c6fd2807SJeff Garzik 	if (poll_next)
5705c6fd2807SJeff Garzik 		goto fsm_start;
5706c6fd2807SJeff Garzik }
5707c6fd2807SJeff Garzik 
5708c6fd2807SJeff Garzik /**
5709c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5710c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5711c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5712c6fd2807SJeff Garzik  *
5713c6fd2807SJeff Garzik  *	LOCKING:
5714c6fd2807SJeff Garzik  *	None.
5715c6fd2807SJeff Garzik  */
5716c6fd2807SJeff Garzik 
5717c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5718c6fd2807SJeff Garzik {
5719c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5720c6fd2807SJeff Garzik 	unsigned int i;
5721c6fd2807SJeff Garzik 
5722c6fd2807SJeff Garzik 	/* no command while frozen */
5723c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5724c6fd2807SJeff Garzik 		return NULL;
5725c6fd2807SJeff Garzik 
5726c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5727c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5728c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5729c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5730c6fd2807SJeff Garzik 			break;
5731c6fd2807SJeff Garzik 		}
5732c6fd2807SJeff Garzik 
5733c6fd2807SJeff Garzik 	if (qc)
5734c6fd2807SJeff Garzik 		qc->tag = i;
5735c6fd2807SJeff Garzik 
5736c6fd2807SJeff Garzik 	return qc;
5737c6fd2807SJeff Garzik }
5738c6fd2807SJeff Garzik 
5739c6fd2807SJeff Garzik /**
5740c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5741c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5742c6fd2807SJeff Garzik  *
5743c6fd2807SJeff Garzik  *	LOCKING:
5744c6fd2807SJeff Garzik  *	None.
5745c6fd2807SJeff Garzik  */
5746c6fd2807SJeff Garzik 
5747c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5748c6fd2807SJeff Garzik {
57499af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5750c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5751c6fd2807SJeff Garzik 
5752c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5753c6fd2807SJeff Garzik 	if (qc) {
5754c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5755c6fd2807SJeff Garzik 		qc->ap = ap;
5756c6fd2807SJeff Garzik 		qc->dev = dev;
5757c6fd2807SJeff Garzik 
5758c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5759c6fd2807SJeff Garzik 	}
5760c6fd2807SJeff Garzik 
5761c6fd2807SJeff Garzik 	return qc;
5762c6fd2807SJeff Garzik }
5763c6fd2807SJeff Garzik 
5764c6fd2807SJeff Garzik /**
5765c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5766c6fd2807SJeff Garzik  *	@qc: Command to complete
5767c6fd2807SJeff Garzik  *
5768c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5769c6fd2807SJeff Garzik  *	in case something prevents using it.
5770c6fd2807SJeff Garzik  *
5771c6fd2807SJeff Garzik  *	LOCKING:
5772cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5773c6fd2807SJeff Garzik  */
5774c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5775c6fd2807SJeff Garzik {
5776c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5777c6fd2807SJeff Garzik 	unsigned int tag;
5778c6fd2807SJeff Garzik 
5779c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5780c6fd2807SJeff Garzik 
5781c6fd2807SJeff Garzik 	qc->flags = 0;
5782c6fd2807SJeff Garzik 	tag = qc->tag;
5783c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5784c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5785c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5786c6fd2807SJeff Garzik 	}
5787c6fd2807SJeff Garzik }
5788c6fd2807SJeff Garzik 
5789c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5790c6fd2807SJeff Garzik {
5791c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57929af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5793c6fd2807SJeff Garzik 
5794c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5795c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5796c6fd2807SJeff Garzik 
5797c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5798c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5799c6fd2807SJeff Garzik 
5800c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5801da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
58029af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5803da917d69STejun Heo 		if (!link->sactive)
5804da917d69STejun Heo 			ap->nr_active_links--;
5805da917d69STejun Heo 	} else {
58069af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5807da917d69STejun Heo 		ap->nr_active_links--;
5808da917d69STejun Heo 	}
5809da917d69STejun Heo 
5810da917d69STejun Heo 	/* clear exclusive status */
5811da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5812da917d69STejun Heo 		     ap->excl_link == link))
5813da917d69STejun Heo 		ap->excl_link = NULL;
5814c6fd2807SJeff Garzik 
5815c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5816c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5817c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5818c6fd2807SJeff Garzik 	 */
5819c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5820c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5821c6fd2807SJeff Garzik 
5822c6fd2807SJeff Garzik 	/* call completion callback */
5823c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5824c6fd2807SJeff Garzik }
5825c6fd2807SJeff Garzik 
582639599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
582739599a53STejun Heo {
582839599a53STejun Heo 	struct ata_port *ap = qc->ap;
582939599a53STejun Heo 
583039599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
58314742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
583239599a53STejun Heo }
583339599a53STejun Heo 
583400115e0fSTejun Heo static void ata_verify_xfer(struct ata_queued_cmd *qc)
583500115e0fSTejun Heo {
583600115e0fSTejun Heo 	struct ata_device *dev = qc->dev;
583700115e0fSTejun Heo 
583800115e0fSTejun Heo 	if (ata_tag_internal(qc->tag))
583900115e0fSTejun Heo 		return;
584000115e0fSTejun Heo 
584100115e0fSTejun Heo 	if (ata_is_nodata(qc->tf.protocol))
584200115e0fSTejun Heo 		return;
584300115e0fSTejun Heo 
584400115e0fSTejun Heo 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
584500115e0fSTejun Heo 		return;
584600115e0fSTejun Heo 
584700115e0fSTejun Heo 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
584800115e0fSTejun Heo }
584900115e0fSTejun Heo 
5850c6fd2807SJeff Garzik /**
5851c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5852c6fd2807SJeff Garzik  *	@qc: Command to complete
5853c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5854c6fd2807SJeff Garzik  *
5855c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5856c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5857c6fd2807SJeff Garzik  *
5858c6fd2807SJeff Garzik  *	LOCKING:
5859cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5860c6fd2807SJeff Garzik  */
5861c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5862c6fd2807SJeff Garzik {
5863c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5864c6fd2807SJeff Garzik 
5865c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5866c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5867c6fd2807SJeff Garzik 	 *
5868c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5869c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5870c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5871c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5872c6fd2807SJeff Garzik 	 *
5873c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5874c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5875c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5876c6fd2807SJeff Garzik 	 * taken care of.
5877c6fd2807SJeff Garzik 	 */
5878c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
58794dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
58804dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
58814dbfa39bSTejun Heo 
5882c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5883c6fd2807SJeff Garzik 
5884c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5885c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5886c6fd2807SJeff Garzik 
5887c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5888c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5889c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
589039599a53STejun Heo 				fill_result_tf(qc);
5891c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5892c6fd2807SJeff Garzik 				return;
5893c6fd2807SJeff Garzik 			}
5894c6fd2807SJeff Garzik 		}
5895c6fd2807SJeff Garzik 
5896c6fd2807SJeff Garzik 		/* read result TF if requested */
5897c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
589839599a53STejun Heo 			fill_result_tf(qc);
5899c6fd2807SJeff Garzik 
59004dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
59014dbfa39bSTejun Heo 		 * completion.
59024dbfa39bSTejun Heo 		 */
59034dbfa39bSTejun Heo 		switch (qc->tf.command) {
59044dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
59054dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
59064dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
59074dbfa39bSTejun Heo 				break;
59084dbfa39bSTejun Heo 			/* fall through */
59094dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
59104dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
59114dbfa39bSTejun Heo 			/* revalidate device */
59124dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
59134dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
59144dbfa39bSTejun Heo 			break;
5915054a5fbaSTejun Heo 
5916054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5917054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5918054a5fbaSTejun Heo 			break;
59194dbfa39bSTejun Heo 		}
59204dbfa39bSTejun Heo 
592100115e0fSTejun Heo 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
592200115e0fSTejun Heo 			ata_verify_xfer(qc);
592300115e0fSTejun Heo 
5924c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5925c6fd2807SJeff Garzik 	} else {
5926c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5927c6fd2807SJeff Garzik 			return;
5928c6fd2807SJeff Garzik 
5929c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5930c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
593139599a53STejun Heo 			fill_result_tf(qc);
5932c6fd2807SJeff Garzik 
5933c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5934c6fd2807SJeff Garzik 	}
5935c6fd2807SJeff Garzik }
5936c6fd2807SJeff Garzik 
5937c6fd2807SJeff Garzik /**
5938c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5939c6fd2807SJeff Garzik  *	@ap: port in question
5940c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5941c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5942c6fd2807SJeff Garzik  *
5943c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5944c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5945c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5946c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5947c6fd2807SJeff Garzik  *
5948c6fd2807SJeff Garzik  *	LOCKING:
5949cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5950c6fd2807SJeff Garzik  *
5951c6fd2807SJeff Garzik  *	RETURNS:
5952c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5953c6fd2807SJeff Garzik  */
5954c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5955c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5956c6fd2807SJeff Garzik {
5957c6fd2807SJeff Garzik 	int nr_done = 0;
5958c6fd2807SJeff Garzik 	u32 done_mask;
5959c6fd2807SJeff Garzik 	int i;
5960c6fd2807SJeff Garzik 
5961c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5962c6fd2807SJeff Garzik 
5963c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5964c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5965c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5966c6fd2807SJeff Garzik 		return -EINVAL;
5967c6fd2807SJeff Garzik 	}
5968c6fd2807SJeff Garzik 
5969c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5970c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5971c6fd2807SJeff Garzik 
5972c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5973c6fd2807SJeff Garzik 			continue;
5974c6fd2807SJeff Garzik 
5975c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5976c6fd2807SJeff Garzik 			if (finish_qc)
5977c6fd2807SJeff Garzik 				finish_qc(qc);
5978c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5979c6fd2807SJeff Garzik 			nr_done++;
5980c6fd2807SJeff Garzik 		}
5981c6fd2807SJeff Garzik 	}
5982c6fd2807SJeff Garzik 
5983c6fd2807SJeff Garzik 	return nr_done;
5984c6fd2807SJeff Garzik }
5985c6fd2807SJeff Garzik 
5986c6fd2807SJeff Garzik /**
5987c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5988c6fd2807SJeff Garzik  *	@qc: command to issue to device
5989c6fd2807SJeff Garzik  *
5990c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5991c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5992c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5993c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5994c6fd2807SJeff Garzik  *
5995c6fd2807SJeff Garzik  *	LOCKING:
5996cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5997c6fd2807SJeff Garzik  */
5998c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5999c6fd2807SJeff Garzik {
6000c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
60019af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
6002405e66b3STejun Heo 	u8 prot = qc->tf.protocol;
6003c6fd2807SJeff Garzik 
6004c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
6005c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
6006c6fd2807SJeff Garzik 	 * request ATAPI sense.
6007c6fd2807SJeff Garzik 	 */
60089af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
6009c6fd2807SJeff Garzik 
60101973a023STejun Heo 	if (ata_is_ncq(prot)) {
60119af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
6012da917d69STejun Heo 
6013da917d69STejun Heo 		if (!link->sactive)
6014da917d69STejun Heo 			ap->nr_active_links++;
60159af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
6016c6fd2807SJeff Garzik 	} else {
60179af5c9c9STejun Heo 		WARN_ON(link->sactive);
6018da917d69STejun Heo 
6019da917d69STejun Heo 		ap->nr_active_links++;
60209af5c9c9STejun Heo 		link->active_tag = qc->tag;
6021c6fd2807SJeff Garzik 	}
6022c6fd2807SJeff Garzik 
6023c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
6024c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
6025c6fd2807SJeff Garzik 
6026405e66b3STejun Heo 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
6027405e66b3STejun Heo 				 (ap->flags & ATA_FLAG_PIO_DMA))) {
6028c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
6029c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
6030c6fd2807SJeff Garzik 				goto sg_err;
6031c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
6032c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
6033c6fd2807SJeff Garzik 				goto sg_err;
6034c6fd2807SJeff Garzik 		}
6035c6fd2807SJeff Garzik 	} else {
6036c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
6037c6fd2807SJeff Garzik 	}
6038c6fd2807SJeff Garzik 
6039054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
6040054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6041054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
6042054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6043054a5fbaSTejun Heo 		ata_link_abort(link);
6044054a5fbaSTejun Heo 		return;
6045054a5fbaSTejun Heo 	}
6046054a5fbaSTejun Heo 
6047c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
6048c6fd2807SJeff Garzik 
6049c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
6050c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
6051c6fd2807SJeff Garzik 		goto err;
6052c6fd2807SJeff Garzik 	return;
6053c6fd2807SJeff Garzik 
6054c6fd2807SJeff Garzik sg_err:
6055c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
6056c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
6057c6fd2807SJeff Garzik err:
6058c6fd2807SJeff Garzik 	ata_qc_complete(qc);
6059c6fd2807SJeff Garzik }
6060c6fd2807SJeff Garzik 
6061c6fd2807SJeff Garzik /**
6062c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6063c6fd2807SJeff Garzik  *	@qc: command to issue to device
6064c6fd2807SJeff Garzik  *
6065c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
6066c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
6067c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
6068c6fd2807SJeff Garzik  *	is slightly different.
6069c6fd2807SJeff Garzik  *
6070c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
6071c6fd2807SJeff Garzik  *
6072c6fd2807SJeff Garzik  *	LOCKING:
6073cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6074c6fd2807SJeff Garzik  *
6075c6fd2807SJeff Garzik  *	RETURNS:
6076c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
6077c6fd2807SJeff Garzik  */
6078c6fd2807SJeff Garzik 
6079c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6080c6fd2807SJeff Garzik {
6081c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
6082c6fd2807SJeff Garzik 
6083c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
6084c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
6085c6fd2807SJeff Garzik 	 */
6086c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
6087c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
6088c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
6089e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
60900dc36888STejun Heo 		case ATAPI_PROT_PIO:
60910dc36888STejun Heo 		case ATAPI_PROT_NODATA:
6092c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
6093c6fd2807SJeff Garzik 			break;
60940dc36888STejun Heo 		case ATAPI_PROT_DMA:
6095c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6096c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
6097c6fd2807SJeff Garzik 				BUG();
6098c6fd2807SJeff Garzik 			break;
6099c6fd2807SJeff Garzik 		default:
6100c6fd2807SJeff Garzik 			break;
6101c6fd2807SJeff Garzik 		}
6102c6fd2807SJeff Garzik 	}
6103c6fd2807SJeff Garzik 
6104c6fd2807SJeff Garzik 	/* select the device */
6105c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
6106c6fd2807SJeff Garzik 
6107c6fd2807SJeff Garzik 	/* start the command */
6108c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
6109c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
6110c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6111c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6112c6fd2807SJeff Garzik 
6113c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6114c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6115c6fd2807SJeff Garzik 
6116c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6117c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6118c6fd2807SJeff Garzik 
6119c6fd2807SJeff Garzik 		break;
6120c6fd2807SJeff Garzik 
6121c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
6122c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6123c6fd2807SJeff Garzik 
6124c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6125c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6126c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
6127c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6128c6fd2807SJeff Garzik 		break;
6129c6fd2807SJeff Garzik 
6130c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
6131c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6132c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6133c6fd2807SJeff Garzik 
6134c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6135c6fd2807SJeff Garzik 
6136c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
6137c6fd2807SJeff Garzik 			/* PIO data out protocol */
6138c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
6139c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6140c6fd2807SJeff Garzik 
6141c6fd2807SJeff Garzik 			/* always send first data block using
6142c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
6143c6fd2807SJeff Garzik 			 */
6144c6fd2807SJeff Garzik 		} else {
6145c6fd2807SJeff Garzik 			/* PIO data in protocol */
6146c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
6147c6fd2807SJeff Garzik 
6148c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
6149c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
6150c6fd2807SJeff Garzik 
6151c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
6152c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
6153c6fd2807SJeff Garzik 			 */
6154c6fd2807SJeff Garzik 		}
6155c6fd2807SJeff Garzik 
6156c6fd2807SJeff Garzik 		break;
6157c6fd2807SJeff Garzik 
61580dc36888STejun Heo 	case ATAPI_PROT_PIO:
61590dc36888STejun Heo 	case ATAPI_PROT_NODATA:
6160c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6161c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6162c6fd2807SJeff Garzik 
6163c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6164c6fd2807SJeff Garzik 
6165c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6166c6fd2807SJeff Garzik 
6167c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6168c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6169c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
6170c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6171c6fd2807SJeff Garzik 		break;
6172c6fd2807SJeff Garzik 
61730dc36888STejun Heo 	case ATAPI_PROT_DMA:
6174c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6175c6fd2807SJeff Garzik 
6176c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6177c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6178c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6179c6fd2807SJeff Garzik 
6180c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6181c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6182c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6183c6fd2807SJeff Garzik 		break;
6184c6fd2807SJeff Garzik 
6185c6fd2807SJeff Garzik 	default:
6186c6fd2807SJeff Garzik 		WARN_ON(1);
6187c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
6188c6fd2807SJeff Garzik 	}
6189c6fd2807SJeff Garzik 
6190c6fd2807SJeff Garzik 	return 0;
6191c6fd2807SJeff Garzik }
6192c6fd2807SJeff Garzik 
6193c6fd2807SJeff Garzik /**
6194c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
6195c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
6196c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
6197c6fd2807SJeff Garzik  *
6198c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
6199c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
6200c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
6201c6fd2807SJeff Garzik  *
6202c6fd2807SJeff Garzik  *	LOCKING:
6203cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6204c6fd2807SJeff Garzik  *
6205c6fd2807SJeff Garzik  *	RETURNS:
6206c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
6207c6fd2807SJeff Garzik  */
6208c6fd2807SJeff Garzik 
6209c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
6210c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
6211c6fd2807SJeff Garzik {
62129af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
6213c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
6214c6fd2807SJeff Garzik 
6215c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
621644877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6217c6fd2807SJeff Garzik 
6218c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
6219c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
6220c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
6221c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
6222c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6223c6fd2807SJeff Garzik 		 */
6224c6fd2807SJeff Garzik 
6225c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6226405e66b3STejun Heo 		 * The flag was turned on only for atapi devices.  No
6227405e66b3STejun Heo 		 * need to check ata_is_atapi(qc->tf.protocol) again.
6228c6fd2807SJeff Garzik 		 */
6229c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6230c6fd2807SJeff Garzik 			goto idle_irq;
6231c6fd2807SJeff Garzik 		break;
6232c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6233c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
62340dc36888STejun Heo 		    qc->tf.protocol == ATAPI_PROT_DMA) {
6235c6fd2807SJeff Garzik 			/* check status of DMA engine */
6236c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
623744877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
623844877b4eSTejun Heo 				ap->print_id, host_stat);
6239c6fd2807SJeff Garzik 
6240c6fd2807SJeff Garzik 			/* if it's not our irq... */
6241c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6242c6fd2807SJeff Garzik 				goto idle_irq;
6243c6fd2807SJeff Garzik 
6244c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6245c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6246c6fd2807SJeff Garzik 
6247c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6248c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6249c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6250c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6251c6fd2807SJeff Garzik 			}
6252c6fd2807SJeff Garzik 		}
6253c6fd2807SJeff Garzik 		break;
6254c6fd2807SJeff Garzik 	case HSM_ST:
6255c6fd2807SJeff Garzik 		break;
6256c6fd2807SJeff Garzik 	default:
6257c6fd2807SJeff Garzik 		goto idle_irq;
6258c6fd2807SJeff Garzik 	}
6259c6fd2807SJeff Garzik 
6260c6fd2807SJeff Garzik 	/* check altstatus */
6261c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6262c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6263c6fd2807SJeff Garzik 		goto idle_irq;
6264c6fd2807SJeff Garzik 
6265c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6266c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6267c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6268c6fd2807SJeff Garzik 		goto idle_irq;
6269c6fd2807SJeff Garzik 
6270c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6271c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6272c6fd2807SJeff Garzik 
6273c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6274ea54763fSTejun Heo 
6275ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
62760dc36888STejun Heo 				       qc->tf.protocol == ATAPI_PROT_DMA))
6277ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6278ea54763fSTejun Heo 
6279c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6280c6fd2807SJeff Garzik 
6281c6fd2807SJeff Garzik idle_irq:
6282c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6283c6fd2807SJeff Garzik 
6284c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6285c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
62866d32d30fSJeff Garzik 		ata_chk_status(ap);
62876d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6288c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6289c6fd2807SJeff Garzik 		return 1;
6290c6fd2807SJeff Garzik 	}
6291c6fd2807SJeff Garzik #endif
6292c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6293c6fd2807SJeff Garzik }
6294c6fd2807SJeff Garzik 
6295c6fd2807SJeff Garzik /**
6296c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6297c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6298cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6299c6fd2807SJeff Garzik  *
6300c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6301c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6302c6fd2807SJeff Garzik  *
6303c6fd2807SJeff Garzik  *	LOCKING:
6304cca3974eSJeff Garzik  *	Obtains host lock during operation.
6305c6fd2807SJeff Garzik  *
6306c6fd2807SJeff Garzik  *	RETURNS:
6307c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6308c6fd2807SJeff Garzik  */
6309c6fd2807SJeff Garzik 
63107d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6311c6fd2807SJeff Garzik {
6312cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6313c6fd2807SJeff Garzik 	unsigned int i;
6314c6fd2807SJeff Garzik 	unsigned int handled = 0;
6315c6fd2807SJeff Garzik 	unsigned long flags;
6316c6fd2807SJeff Garzik 
6317c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6318cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6319c6fd2807SJeff Garzik 
6320cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6321c6fd2807SJeff Garzik 		struct ata_port *ap;
6322c6fd2807SJeff Garzik 
6323cca3974eSJeff Garzik 		ap = host->ports[i];
6324c6fd2807SJeff Garzik 		if (ap &&
6325c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6326c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6327c6fd2807SJeff Garzik 
63289af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6329c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6330c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6331c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6332c6fd2807SJeff Garzik 		}
6333c6fd2807SJeff Garzik 	}
6334c6fd2807SJeff Garzik 
6335cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6336c6fd2807SJeff Garzik 
6337c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6338c6fd2807SJeff Garzik }
6339c6fd2807SJeff Garzik 
6340c6fd2807SJeff Garzik /**
6341c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6342936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6343c6fd2807SJeff Garzik  *
6344936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6345c6fd2807SJeff Garzik  *
6346c6fd2807SJeff Garzik  *	LOCKING:
6347c6fd2807SJeff Garzik  *	None.
6348c6fd2807SJeff Garzik  *
6349c6fd2807SJeff Garzik  *	RETURNS:
6350c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6351c6fd2807SJeff Garzik  */
6352936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6353c6fd2807SJeff Garzik {
6354936fd732STejun Heo 	struct ata_port *ap = link->ap;
6355936fd732STejun Heo 
6356a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6357c6fd2807SJeff Garzik }
6358c6fd2807SJeff Garzik 
6359c6fd2807SJeff Garzik /**
6360c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6361936fd732STejun Heo  *	@link: ATA link to read SCR for
6362c6fd2807SJeff Garzik  *	@reg: SCR to read
6363c6fd2807SJeff Garzik  *	@val: Place to store read value
6364c6fd2807SJeff Garzik  *
6365936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6366633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6367633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6368c6fd2807SJeff Garzik  *
6369c6fd2807SJeff Garzik  *	LOCKING:
6370633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6371c6fd2807SJeff Garzik  *
6372c6fd2807SJeff Garzik  *	RETURNS:
6373c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6374c6fd2807SJeff Garzik  */
6375936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6376c6fd2807SJeff Garzik {
6377633273a3STejun Heo 	if (ata_is_host_link(link)) {
6378936fd732STejun Heo 		struct ata_port *ap = link->ap;
6379936fd732STejun Heo 
6380936fd732STejun Heo 		if (sata_scr_valid(link))
6381da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6382c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6383c6fd2807SJeff Garzik 	}
6384c6fd2807SJeff Garzik 
6385633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6386633273a3STejun Heo }
6387633273a3STejun Heo 
6388c6fd2807SJeff Garzik /**
6389c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6390936fd732STejun Heo  *	@link: ATA link to write SCR for
6391c6fd2807SJeff Garzik  *	@reg: SCR to write
6392c6fd2807SJeff Garzik  *	@val: value to write
6393c6fd2807SJeff Garzik  *
6394936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6395633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6396633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6397c6fd2807SJeff Garzik  *
6398c6fd2807SJeff Garzik  *	LOCKING:
6399633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6400c6fd2807SJeff Garzik  *
6401c6fd2807SJeff Garzik  *	RETURNS:
6402c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6403c6fd2807SJeff Garzik  */
6404936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6405c6fd2807SJeff Garzik {
6406633273a3STejun Heo 	if (ata_is_host_link(link)) {
6407936fd732STejun Heo 		struct ata_port *ap = link->ap;
6408936fd732STejun Heo 
6409936fd732STejun Heo 		if (sata_scr_valid(link))
6410da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6411c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6412c6fd2807SJeff Garzik 	}
6413c6fd2807SJeff Garzik 
6414633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6415633273a3STejun Heo }
6416633273a3STejun Heo 
6417c6fd2807SJeff Garzik /**
6418c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6419936fd732STejun Heo  *	@link: ATA link to write SCR for
6420c6fd2807SJeff Garzik  *	@reg: SCR to write
6421c6fd2807SJeff Garzik  *	@val: value to write
6422c6fd2807SJeff Garzik  *
6423c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6424c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6425c6fd2807SJeff Garzik  *
6426c6fd2807SJeff Garzik  *	LOCKING:
6427633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6428c6fd2807SJeff Garzik  *
6429c6fd2807SJeff Garzik  *	RETURNS:
6430c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6431c6fd2807SJeff Garzik  */
6432936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6433c6fd2807SJeff Garzik {
6434633273a3STejun Heo 	if (ata_is_host_link(link)) {
6435936fd732STejun Heo 		struct ata_port *ap = link->ap;
6436da3dbb17STejun Heo 		int rc;
6437da3dbb17STejun Heo 
6438936fd732STejun Heo 		if (sata_scr_valid(link)) {
6439da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6440da3dbb17STejun Heo 			if (rc == 0)
6441da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6442da3dbb17STejun Heo 			return rc;
6443c6fd2807SJeff Garzik 		}
6444c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6445c6fd2807SJeff Garzik 	}
6446c6fd2807SJeff Garzik 
6447633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6448633273a3STejun Heo }
6449633273a3STejun Heo 
6450c6fd2807SJeff Garzik /**
6451936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6452936fd732STejun Heo  *	@link: ATA link to test
6453c6fd2807SJeff Garzik  *
6454936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6455936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6456936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6457c6fd2807SJeff Garzik  *
6458c6fd2807SJeff Garzik  *	LOCKING:
6459c6fd2807SJeff Garzik  *	None.
6460c6fd2807SJeff Garzik  *
6461c6fd2807SJeff Garzik  *	RETURNS:
6462c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6463c6fd2807SJeff Garzik  */
6464936fd732STejun Heo int ata_link_online(struct ata_link *link)
6465c6fd2807SJeff Garzik {
6466c6fd2807SJeff Garzik 	u32 sstatus;
6467c6fd2807SJeff Garzik 
6468936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6469936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6470c6fd2807SJeff Garzik 		return 1;
6471c6fd2807SJeff Garzik 	return 0;
6472c6fd2807SJeff Garzik }
6473c6fd2807SJeff Garzik 
6474c6fd2807SJeff Garzik /**
6475936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6476936fd732STejun Heo  *	@link: ATA link to test
6477c6fd2807SJeff Garzik  *
6478936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6479936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6480936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6481c6fd2807SJeff Garzik  *
6482c6fd2807SJeff Garzik  *	LOCKING:
6483c6fd2807SJeff Garzik  *	None.
6484c6fd2807SJeff Garzik  *
6485c6fd2807SJeff Garzik  *	RETURNS:
6486c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6487c6fd2807SJeff Garzik  */
6488936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6489c6fd2807SJeff Garzik {
6490c6fd2807SJeff Garzik 	u32 sstatus;
6491c6fd2807SJeff Garzik 
6492936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6493936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6494c6fd2807SJeff Garzik 		return 1;
6495c6fd2807SJeff Garzik 	return 0;
6496c6fd2807SJeff Garzik }
6497c6fd2807SJeff Garzik 
6498c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6499c6fd2807SJeff Garzik {
6500c6fd2807SJeff Garzik 	unsigned int err_mask;
6501c6fd2807SJeff Garzik 	u8 cmd;
6502c6fd2807SJeff Garzik 
6503c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6504c6fd2807SJeff Garzik 		return 0;
6505c6fd2807SJeff Garzik 
65066fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6507c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6508c6fd2807SJeff Garzik 	else
6509c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6510c6fd2807SJeff Garzik 
65114f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
65124f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
65134f34337bSAlan Cox 	   a further flush command to continue the writeback until it
65144f34337bSAlan Cox 	   does not error */
6515c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6516c6fd2807SJeff Garzik 	if (err_mask) {
6517c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6518c6fd2807SJeff Garzik 		return -EIO;
6519c6fd2807SJeff Garzik 	}
6520c6fd2807SJeff Garzik 
6521c6fd2807SJeff Garzik 	return 0;
6522c6fd2807SJeff Garzik }
6523c6fd2807SJeff Garzik 
65246ffa01d8STejun Heo #ifdef CONFIG_PM
6525cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6526cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6527cca3974eSJeff Garzik 			       int wait)
6528c6fd2807SJeff Garzik {
6529c6fd2807SJeff Garzik 	unsigned long flags;
6530c6fd2807SJeff Garzik 	int i, rc;
6531c6fd2807SJeff Garzik 
6532cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6533cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6534e3667ebfSTejun Heo 		struct ata_link *link;
6535c6fd2807SJeff Garzik 
6536c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6537c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6538c6fd2807SJeff Garzik 		 */
6539c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6540c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6541c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6542c6fd2807SJeff Garzik 		}
6543c6fd2807SJeff Garzik 
6544c6fd2807SJeff Garzik 		/* request PM ops to EH */
6545c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6546c6fd2807SJeff Garzik 
6547c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6548c6fd2807SJeff Garzik 		if (wait) {
6549c6fd2807SJeff Garzik 			rc = 0;
6550c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6551c6fd2807SJeff Garzik 		}
6552c6fd2807SJeff Garzik 
6553c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6554e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6555e3667ebfSTejun Heo 			link->eh_info.action |= action;
6556e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6557e3667ebfSTejun Heo 		}
6558c6fd2807SJeff Garzik 
6559c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6560c6fd2807SJeff Garzik 
6561c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6562c6fd2807SJeff Garzik 
6563c6fd2807SJeff Garzik 		/* wait and check result */
6564c6fd2807SJeff Garzik 		if (wait) {
6565c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6566c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6567c6fd2807SJeff Garzik 			if (rc)
6568c6fd2807SJeff Garzik 				return rc;
6569c6fd2807SJeff Garzik 		}
6570c6fd2807SJeff Garzik 	}
6571c6fd2807SJeff Garzik 
6572c6fd2807SJeff Garzik 	return 0;
6573c6fd2807SJeff Garzik }
6574c6fd2807SJeff Garzik 
6575c6fd2807SJeff Garzik /**
6576cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6577cca3974eSJeff Garzik  *	@host: host to suspend
6578c6fd2807SJeff Garzik  *	@mesg: PM message
6579c6fd2807SJeff Garzik  *
6580cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6581c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6582c6fd2807SJeff Garzik  *	to finish.
6583c6fd2807SJeff Garzik  *
6584c6fd2807SJeff Garzik  *	LOCKING:
6585c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6586c6fd2807SJeff Garzik  *
6587c6fd2807SJeff Garzik  *	RETURNS:
6588c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6589c6fd2807SJeff Garzik  */
6590cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6591c6fd2807SJeff Garzik {
65929666f400STejun Heo 	int rc;
6593c6fd2807SJeff Garzik 
6594ca77329fSKristen Carlson Accardi 	/*
6595ca77329fSKristen Carlson Accardi 	 * disable link pm on all ports before requesting
6596ca77329fSKristen Carlson Accardi 	 * any pm activity
6597ca77329fSKristen Carlson Accardi 	 */
6598ca77329fSKristen Carlson Accardi 	ata_lpm_enable(host);
6599ca77329fSKristen Carlson Accardi 
6600cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
66019666f400STejun Heo 	if (rc == 0)
6602cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6603c6fd2807SJeff Garzik 	return rc;
6604c6fd2807SJeff Garzik }
6605c6fd2807SJeff Garzik 
6606c6fd2807SJeff Garzik /**
6607cca3974eSJeff Garzik  *	ata_host_resume - resume host
6608cca3974eSJeff Garzik  *	@host: host to resume
6609c6fd2807SJeff Garzik  *
6610cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6611c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6612c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6613c6fd2807SJeff Garzik  *
6614c6fd2807SJeff Garzik  *	LOCKING:
6615c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6616c6fd2807SJeff Garzik  */
6617cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6618c6fd2807SJeff Garzik {
6619cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6620c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6621cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6622ca77329fSKristen Carlson Accardi 
6623ca77329fSKristen Carlson Accardi 	/* reenable link pm */
6624ca77329fSKristen Carlson Accardi 	ata_lpm_disable(host);
6625c6fd2807SJeff Garzik }
66266ffa01d8STejun Heo #endif
6627c6fd2807SJeff Garzik 
6628c6fd2807SJeff Garzik /**
6629c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6630c6fd2807SJeff Garzik  *	@ap: Port to initialize
6631c6fd2807SJeff Garzik  *
6632c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6633c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6634c6fd2807SJeff Garzik  *
6635c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6636c6fd2807SJeff Garzik  *
6637c6fd2807SJeff Garzik  *	LOCKING:
6638c6fd2807SJeff Garzik  *	Inherited from caller.
6639c6fd2807SJeff Garzik  */
6640c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6641c6fd2807SJeff Garzik {
6642c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6643c6fd2807SJeff Garzik 	int rc;
6644c6fd2807SJeff Garzik 
6645f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6646f0d36efdSTejun Heo 				      GFP_KERNEL);
6647c6fd2807SJeff Garzik 	if (!ap->prd)
6648c6fd2807SJeff Garzik 		return -ENOMEM;
6649c6fd2807SJeff Garzik 
6650c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6651f0d36efdSTejun Heo 	if (rc)
6652c6fd2807SJeff Garzik 		return rc;
6653c6fd2807SJeff Garzik 
6654f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6655f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6656c6fd2807SJeff Garzik 	return 0;
6657c6fd2807SJeff Garzik }
6658c6fd2807SJeff Garzik 
6659c6fd2807SJeff Garzik /**
6660c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6661c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6662c6fd2807SJeff Garzik  *
6663c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6664c6fd2807SJeff Garzik  *
6665c6fd2807SJeff Garzik  *	LOCKING:
6666c6fd2807SJeff Garzik  *	Inherited from caller.
6667c6fd2807SJeff Garzik  */
6668c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6669c6fd2807SJeff Garzik {
66709af5c9c9STejun Heo 	struct ata_link *link = dev->link;
66719af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6672c6fd2807SJeff Garzik 	unsigned long flags;
6673c6fd2807SJeff Garzik 
6674c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
66759af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66769af5c9c9STejun Heo 	link->sata_spd = 0;
6677c6fd2807SJeff Garzik 
6678c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6679c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6680cca3974eSJeff Garzik 	 * host lock.
6681c6fd2807SJeff Garzik 	 */
6682c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6683c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
66843dcc323fSTejun Heo 	dev->horkage = 0;
6685c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6686c6fd2807SJeff Garzik 
6687c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6688c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6689c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6690c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6691c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6692c6fd2807SJeff Garzik }
6693c6fd2807SJeff Garzik 
6694c6fd2807SJeff Garzik /**
66954fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
66964fb37a25STejun Heo  *	@ap: ATA port link is attached to
66974fb37a25STejun Heo  *	@link: Link structure to initialize
66988989805dSTejun Heo  *	@pmp: Port multiplier port number
66994fb37a25STejun Heo  *
67004fb37a25STejun Heo  *	Initialize @link.
67014fb37a25STejun Heo  *
67024fb37a25STejun Heo  *	LOCKING:
67034fb37a25STejun Heo  *	Kernel thread context (may sleep)
67044fb37a25STejun Heo  */
6705fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
67064fb37a25STejun Heo {
67074fb37a25STejun Heo 	int i;
67084fb37a25STejun Heo 
67094fb37a25STejun Heo 	/* clear everything except for devices */
67104fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
67114fb37a25STejun Heo 
67124fb37a25STejun Heo 	link->ap = ap;
67138989805dSTejun Heo 	link->pmp = pmp;
67144fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
67154fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
67164fb37a25STejun Heo 
67174fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
67184fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
67194fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
67204fb37a25STejun Heo 
67214fb37a25STejun Heo 		dev->link = link;
67224fb37a25STejun Heo 		dev->devno = dev - link->device;
67234fb37a25STejun Heo 		ata_dev_init(dev);
67244fb37a25STejun Heo 	}
67254fb37a25STejun Heo }
67264fb37a25STejun Heo 
67274fb37a25STejun Heo /**
67284fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
67294fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
67304fb37a25STejun Heo  *
67314fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
67324fb37a25STejun Heo  *	configured value.
67334fb37a25STejun Heo  *
67344fb37a25STejun Heo  *	LOCKING:
67354fb37a25STejun Heo  *	Kernel thread context (may sleep).
67364fb37a25STejun Heo  *
67374fb37a25STejun Heo  *	RETURNS:
67384fb37a25STejun Heo  *	0 on success, -errno on failure.
67394fb37a25STejun Heo  */
6740fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
67414fb37a25STejun Heo {
67424fb37a25STejun Heo 	u32 scontrol, spd;
67434fb37a25STejun Heo 	int rc;
67444fb37a25STejun Heo 
67454fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
67464fb37a25STejun Heo 	if (rc)
67474fb37a25STejun Heo 		return rc;
67484fb37a25STejun Heo 
67494fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
67504fb37a25STejun Heo 	if (spd)
67514fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
67524fb37a25STejun Heo 
67534fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
67544fb37a25STejun Heo 
67554fb37a25STejun Heo 	return 0;
67564fb37a25STejun Heo }
67574fb37a25STejun Heo 
67584fb37a25STejun Heo /**
6759f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6760f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6761c6fd2807SJeff Garzik  *
6762f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6763f3187195STejun Heo  *
6764f3187195STejun Heo  *	RETURNS:
6765f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6766c6fd2807SJeff Garzik  *
6767c6fd2807SJeff Garzik  *	LOCKING:
6768f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6769c6fd2807SJeff Garzik  */
6770f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6771c6fd2807SJeff Garzik {
6772f3187195STejun Heo 	struct ata_port *ap;
6773c6fd2807SJeff Garzik 
6774f3187195STejun Heo 	DPRINTK("ENTER\n");
6775f3187195STejun Heo 
6776f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6777f3187195STejun Heo 	if (!ap)
6778f3187195STejun Heo 		return NULL;
6779f3187195STejun Heo 
6780f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6781cca3974eSJeff Garzik 	ap->lock = &host->lock;
6782c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6783f3187195STejun Heo 	ap->print_id = -1;
6784c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6785cca3974eSJeff Garzik 	ap->host = host;
6786f3187195STejun Heo 	ap->dev = host->dev;
6787c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6788c6fd2807SJeff Garzik 
6789c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6790c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6791c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6792c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6793c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6794c6fd2807SJeff Garzik #else
6795c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6796c6fd2807SJeff Garzik #endif
6797c6fd2807SJeff Garzik 
679865f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
679965f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
680065f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6801c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6802c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
68035ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
68045ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
68055ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6806c6fd2807SJeff Garzik 
6807c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6808c6fd2807SJeff Garzik 
68098989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6810c6fd2807SJeff Garzik 
6811c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6812c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6813c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6814c6fd2807SJeff Garzik #endif
6815c6fd2807SJeff Garzik 	return ap;
6816c6fd2807SJeff Garzik }
6817c6fd2807SJeff Garzik 
6818f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6819f0d36efdSTejun Heo {
6820f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6821f0d36efdSTejun Heo 	int i;
6822f0d36efdSTejun Heo 
6823f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6824f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6825f0d36efdSTejun Heo 
6826ecef7253STejun Heo 		if (!ap)
6827ecef7253STejun Heo 			continue;
6828ecef7253STejun Heo 
68294911487aSTejun Heo 		if (ap->scsi_host)
68301aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
68311aa506e4STejun Heo 
6832633273a3STejun Heo 		kfree(ap->pmp_link);
68334911487aSTejun Heo 		kfree(ap);
68341aa506e4STejun Heo 		host->ports[i] = NULL;
68351aa506e4STejun Heo 	}
68361aa506e4STejun Heo 
68371aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6838f0d36efdSTejun Heo }
6839f0d36efdSTejun Heo 
6840c6fd2807SJeff Garzik /**
6841f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6842f3187195STejun Heo  *	@dev: generic device this host is associated with
6843f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6844f3187195STejun Heo  *
6845f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6846f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6847f3187195STejun Heo  *	attaches it using ata_host_register().
6848f3187195STejun Heo  *
6849f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6850f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6851f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6852f3187195STejun Heo  *	ports will be automatically freed on registration.
6853f3187195STejun Heo  *
6854f3187195STejun Heo  *	RETURNS:
6855f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6856f3187195STejun Heo  *
6857f3187195STejun Heo  *	LOCKING:
6858f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6859f3187195STejun Heo  */
6860f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6861f3187195STejun Heo {
6862f3187195STejun Heo 	struct ata_host *host;
6863f3187195STejun Heo 	size_t sz;
6864f3187195STejun Heo 	int i;
6865f3187195STejun Heo 
6866f3187195STejun Heo 	DPRINTK("ENTER\n");
6867f3187195STejun Heo 
6868f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6869f3187195STejun Heo 		return NULL;
6870f3187195STejun Heo 
6871f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6872f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6873f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6874f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6875f3187195STejun Heo 	if (!host)
6876f3187195STejun Heo 		goto err_out;
6877f3187195STejun Heo 
6878f3187195STejun Heo 	devres_add(dev, host);
6879f3187195STejun Heo 	dev_set_drvdata(dev, host);
6880f3187195STejun Heo 
6881f3187195STejun Heo 	spin_lock_init(&host->lock);
6882f3187195STejun Heo 	host->dev = dev;
6883f3187195STejun Heo 	host->n_ports = max_ports;
6884f3187195STejun Heo 
6885f3187195STejun Heo 	/* allocate ports bound to this host */
6886f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6887f3187195STejun Heo 		struct ata_port *ap;
6888f3187195STejun Heo 
6889f3187195STejun Heo 		ap = ata_port_alloc(host);
6890f3187195STejun Heo 		if (!ap)
6891f3187195STejun Heo 			goto err_out;
6892f3187195STejun Heo 
6893f3187195STejun Heo 		ap->port_no = i;
6894f3187195STejun Heo 		host->ports[i] = ap;
6895f3187195STejun Heo 	}
6896f3187195STejun Heo 
6897f3187195STejun Heo 	devres_remove_group(dev, NULL);
6898f3187195STejun Heo 	return host;
6899f3187195STejun Heo 
6900f3187195STejun Heo  err_out:
6901f3187195STejun Heo 	devres_release_group(dev, NULL);
6902f3187195STejun Heo 	return NULL;
6903f3187195STejun Heo }
6904f3187195STejun Heo 
6905f3187195STejun Heo /**
6906f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6907f5cda257STejun Heo  *	@dev: generic device this host is associated with
6908f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6909f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6910f5cda257STejun Heo  *
6911f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6912f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6913f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6914f5cda257STejun Heo  *
6915f5cda257STejun Heo  *	RETURNS:
6916f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6917f5cda257STejun Heo  *
6918f5cda257STejun Heo  *	LOCKING:
6919f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6920f5cda257STejun Heo  */
6921f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6922f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6923f5cda257STejun Heo 				      int n_ports)
6924f5cda257STejun Heo {
6925f5cda257STejun Heo 	const struct ata_port_info *pi;
6926f5cda257STejun Heo 	struct ata_host *host;
6927f5cda257STejun Heo 	int i, j;
6928f5cda257STejun Heo 
6929f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6930f5cda257STejun Heo 	if (!host)
6931f5cda257STejun Heo 		return NULL;
6932f5cda257STejun Heo 
6933f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6934f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6935f5cda257STejun Heo 
6936f5cda257STejun Heo 		if (ppi[j])
6937f5cda257STejun Heo 			pi = ppi[j++];
6938f5cda257STejun Heo 
6939f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6940f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6941f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6942f5cda257STejun Heo 		ap->flags |= pi->flags;
69430c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6944f5cda257STejun Heo 		ap->ops = pi->port_ops;
6945f5cda257STejun Heo 
6946f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6947f5cda257STejun Heo 			host->ops = pi->port_ops;
6948f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6949f5cda257STejun Heo 			host->private_data = pi->private_data;
6950f5cda257STejun Heo 	}
6951f5cda257STejun Heo 
6952f5cda257STejun Heo 	return host;
6953f5cda257STejun Heo }
6954f5cda257STejun Heo 
695532ebbc0cSTejun Heo static void ata_host_stop(struct device *gendev, void *res)
695632ebbc0cSTejun Heo {
695732ebbc0cSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
695832ebbc0cSTejun Heo 	int i;
695932ebbc0cSTejun Heo 
696032ebbc0cSTejun Heo 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
696132ebbc0cSTejun Heo 
696232ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
696332ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
696432ebbc0cSTejun Heo 
696532ebbc0cSTejun Heo 		if (ap->ops->port_stop)
696632ebbc0cSTejun Heo 			ap->ops->port_stop(ap);
696732ebbc0cSTejun Heo 	}
696832ebbc0cSTejun Heo 
696932ebbc0cSTejun Heo 	if (host->ops->host_stop)
697032ebbc0cSTejun Heo 		host->ops->host_stop(host);
697132ebbc0cSTejun Heo }
697232ebbc0cSTejun Heo 
6973f5cda257STejun Heo /**
6974ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6975ecef7253STejun Heo  *	@host: ATA host to start ports for
6976ecef7253STejun Heo  *
6977ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6978ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6979ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6980f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6981f3187195STejun Heo  *	first non-dummy port ops.
6982ecef7253STejun Heo  *
6983ecef7253STejun Heo  *	LOCKING:
6984ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6985ecef7253STejun Heo  *
6986ecef7253STejun Heo  *	RETURNS:
6987ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6988ecef7253STejun Heo  */
6989ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6990ecef7253STejun Heo {
699132ebbc0cSTejun Heo 	int have_stop = 0;
699232ebbc0cSTejun Heo 	void *start_dr = NULL;
6993ecef7253STejun Heo 	int i, rc;
6994ecef7253STejun Heo 
6995ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6996ecef7253STejun Heo 		return 0;
6997ecef7253STejun Heo 
6998ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6999ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
7000ecef7253STejun Heo 
7001f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
7002f3187195STejun Heo 			host->ops = ap->ops;
7003f3187195STejun Heo 
700432ebbc0cSTejun Heo 		if (ap->ops->port_stop)
700532ebbc0cSTejun Heo 			have_stop = 1;
700632ebbc0cSTejun Heo 	}
700732ebbc0cSTejun Heo 
700832ebbc0cSTejun Heo 	if (host->ops->host_stop)
700932ebbc0cSTejun Heo 		have_stop = 1;
701032ebbc0cSTejun Heo 
701132ebbc0cSTejun Heo 	if (have_stop) {
701232ebbc0cSTejun Heo 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
701332ebbc0cSTejun Heo 		if (!start_dr)
701432ebbc0cSTejun Heo 			return -ENOMEM;
701532ebbc0cSTejun Heo 	}
701632ebbc0cSTejun Heo 
701732ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
701832ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
701932ebbc0cSTejun Heo 
7020ecef7253STejun Heo 		if (ap->ops->port_start) {
7021ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
7022ecef7253STejun Heo 			if (rc) {
70230f9fe9b7SAlan Cox 				if (rc != -ENODEV)
70240f757743SAndrew Morton 					dev_printk(KERN_ERR, host->dev,
70250f757743SAndrew Morton 						"failed to start port %d "
70260f757743SAndrew Morton 						"(errno=%d)\n", i, rc);
7027ecef7253STejun Heo 				goto err_out;
7028ecef7253STejun Heo 			}
7029ecef7253STejun Heo 		}
7030ecef7253STejun Heo 		ata_eh_freeze_port(ap);
7031ecef7253STejun Heo 	}
7032ecef7253STejun Heo 
703332ebbc0cSTejun Heo 	if (start_dr)
703432ebbc0cSTejun Heo 		devres_add(host->dev, start_dr);
7035ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
7036ecef7253STejun Heo 	return 0;
7037ecef7253STejun Heo 
7038ecef7253STejun Heo  err_out:
7039ecef7253STejun Heo 	while (--i >= 0) {
7040ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
7041ecef7253STejun Heo 
7042ecef7253STejun Heo 		if (ap->ops->port_stop)
7043ecef7253STejun Heo 			ap->ops->port_stop(ap);
7044ecef7253STejun Heo 	}
704532ebbc0cSTejun Heo 	devres_free(start_dr);
7046ecef7253STejun Heo 	return rc;
7047ecef7253STejun Heo }
7048ecef7253STejun Heo 
7049ecef7253STejun Heo /**
7050cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
7051cca3974eSJeff Garzik  *	@host:	host to initialize
7052cca3974eSJeff Garzik  *	@dev:	device host is attached to
7053cca3974eSJeff Garzik  *	@flags:	host flags
7054c6fd2807SJeff Garzik  *	@ops:	port_ops
7055c6fd2807SJeff Garzik  *
7056c6fd2807SJeff Garzik  *	LOCKING:
7057c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
7058c6fd2807SJeff Garzik  *
7059c6fd2807SJeff Garzik  */
7060f3187195STejun Heo /* KILLME - the only user left is ipr */
7061cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
7062cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
7063c6fd2807SJeff Garzik {
7064cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
7065cca3974eSJeff Garzik 	host->dev = dev;
7066cca3974eSJeff Garzik 	host->flags = flags;
7067cca3974eSJeff Garzik 	host->ops = ops;
7068c6fd2807SJeff Garzik }
7069c6fd2807SJeff Garzik 
7070c6fd2807SJeff Garzik /**
7071f3187195STejun Heo  *	ata_host_register - register initialized ATA host
7072f3187195STejun Heo  *	@host: ATA host to register
7073f3187195STejun Heo  *	@sht: template for SCSI host
7074c6fd2807SJeff Garzik  *
7075f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
7076f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
7077f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
7078f3187195STejun Heo  *	probe registered devices.
7079c6fd2807SJeff Garzik  *
7080c6fd2807SJeff Garzik  *	LOCKING:
7081f3187195STejun Heo  *	Inherited from calling layer (may sleep).
7082c6fd2807SJeff Garzik  *
7083c6fd2807SJeff Garzik  *	RETURNS:
7084f3187195STejun Heo  *	0 on success, -errno otherwise.
7085c6fd2807SJeff Garzik  */
7086f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7087c6fd2807SJeff Garzik {
7088f3187195STejun Heo 	int i, rc;
7089c6fd2807SJeff Garzik 
7090f3187195STejun Heo 	/* host must have been started */
7091f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
7092f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
7093f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
7094f3187195STejun Heo 		WARN_ON(1);
7095f3187195STejun Heo 		return -EINVAL;
709602f076aaSAlan Cox 	}
7097f0d36efdSTejun Heo 
7098f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
7099f3187195STejun Heo 	 * determine the exact number of ports to allocate at
7100f3187195STejun Heo 	 * allocation time.
7101f3187195STejun Heo 	 */
7102f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
7103f3187195STejun Heo 		kfree(host->ports[i]);
7104f0d36efdSTejun Heo 
7105f3187195STejun Heo 	/* give ports names and add SCSI hosts */
7106f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
7107f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
7108c6fd2807SJeff Garzik 
7109f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
7110ecef7253STejun Heo 	if (rc)
7111f3187195STejun Heo 		return rc;
7112ecef7253STejun Heo 
7113fafbae87STejun Heo 	/* associate with ACPI nodes */
7114fafbae87STejun Heo 	ata_acpi_associate(host);
7115fafbae87STejun Heo 
7116f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
7117cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7118cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7119f3187195STejun Heo 		unsigned long xfer_mask;
7120f3187195STejun Heo 
7121f3187195STejun Heo 		/* set SATA cable type if still unset */
7122f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7123f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
7124c6fd2807SJeff Garzik 
7125c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
71264fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
7127c6fd2807SJeff Garzik 
7128cbcdd875STejun Heo 		/* print per-port info to dmesg */
7129f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7130f3187195STejun Heo 					      ap->udma_mask);
7131f3187195STejun Heo 
7132abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
7133cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
7134cbcdd875STejun Heo 					"%cATA max %s %s\n",
7135a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7136f3187195STejun Heo 					ata_mode_string(xfer_mask),
7137cbcdd875STejun Heo 					ap->link.eh_info.desc);
7138abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
7139abf6e8edSTejun Heo 		} else
7140f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7141c6fd2807SJeff Garzik 	}
7142c6fd2807SJeff Garzik 
7143f3187195STejun Heo 	/* perform each probe synchronously */
7144f3187195STejun Heo 	DPRINTK("probe begin\n");
7145f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
7146f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
7147f3187195STejun Heo 		int rc;
7148f3187195STejun Heo 
7149f3187195STejun Heo 		/* probe */
7150c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
71519af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
7152c6fd2807SJeff Garzik 			unsigned long flags;
7153c6fd2807SJeff Garzik 
7154c6fd2807SJeff Garzik 			ata_port_probe(ap);
7155c6fd2807SJeff Garzik 
7156c6fd2807SJeff Garzik 			/* kick EH for boot probing */
7157c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
7158c6fd2807SJeff Garzik 
7159f58229f8STejun Heo 			ehi->probe_mask =
7160f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
7161c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
7162c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7163c6fd2807SJeff Garzik 
7164f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7165c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
7166c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
7167c6fd2807SJeff Garzik 
7168c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
7169c6fd2807SJeff Garzik 
7170c6fd2807SJeff Garzik 			/* wait for EH to finish */
7171c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
7172c6fd2807SJeff Garzik 		} else {
717344877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7174c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
717544877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
7176c6fd2807SJeff Garzik 
7177c6fd2807SJeff Garzik 			if (rc) {
7178c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
7179c6fd2807SJeff Garzik 				 * Current libata behavior will
7180c6fd2807SJeff Garzik 				 * tear down everything when
7181c6fd2807SJeff Garzik 				 * the module is removed
7182c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
7183c6fd2807SJeff Garzik 				 */
7184c6fd2807SJeff Garzik 			}
7185c6fd2807SJeff Garzik 		}
7186c6fd2807SJeff Garzik 	}
7187c6fd2807SJeff Garzik 
7188c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
7189c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
7190cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7191cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7192c6fd2807SJeff Garzik 
71931ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
7194ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
7195c6fd2807SJeff Garzik 	}
7196c6fd2807SJeff Garzik 
7197f3187195STejun Heo 	return 0;
7198f3187195STejun Heo }
7199f3187195STejun Heo 
7200f3187195STejun Heo /**
7201f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
7202f5cda257STejun Heo  *	@host: target ATA host
7203f5cda257STejun Heo  *	@irq: IRQ to request
7204f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
7205f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
7206f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
7207f5cda257STejun Heo  *
7208f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
7209f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
7210f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
7211f5cda257STejun Heo  *	arguments and performs the three steps in one go.
7212f5cda257STejun Heo  *
72133d46b2e2SPaul Mundt  *	An invalid IRQ skips the IRQ registration and expects the host to
72143d46b2e2SPaul Mundt  *	have set polling mode on the port. In this case, @irq_handler
72153d46b2e2SPaul Mundt  *	should be NULL.
72163d46b2e2SPaul Mundt  *
7217f5cda257STejun Heo  *	LOCKING:
7218f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
7219f5cda257STejun Heo  *
7220f5cda257STejun Heo  *	RETURNS:
7221f5cda257STejun Heo  *	0 on success, -errno otherwise.
7222f5cda257STejun Heo  */
7223f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
7224f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
7225f5cda257STejun Heo 		      struct scsi_host_template *sht)
7226f5cda257STejun Heo {
7227cbcdd875STejun Heo 	int i, rc;
7228f5cda257STejun Heo 
7229f5cda257STejun Heo 	rc = ata_host_start(host);
7230f5cda257STejun Heo 	if (rc)
7231f5cda257STejun Heo 		return rc;
7232f5cda257STejun Heo 
72333d46b2e2SPaul Mundt 	/* Special case for polling mode */
72343d46b2e2SPaul Mundt 	if (!irq) {
72353d46b2e2SPaul Mundt 		WARN_ON(irq_handler);
72363d46b2e2SPaul Mundt 		return ata_host_register(host, sht);
72373d46b2e2SPaul Mundt 	}
72383d46b2e2SPaul Mundt 
7239f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7240f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
7241f5cda257STejun Heo 	if (rc)
7242f5cda257STejun Heo 		return rc;
7243f5cda257STejun Heo 
7244cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
7245cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
72464031826bSTejun Heo 
7247f5cda257STejun Heo 	rc = ata_host_register(host, sht);
7248f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
7249f5cda257STejun Heo 	if (rc)
7250f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
7251f5cda257STejun Heo 
7252f5cda257STejun Heo 	return rc;
7253f5cda257STejun Heo }
7254f5cda257STejun Heo 
7255f5cda257STejun Heo /**
7256c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
7257c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
7258c6fd2807SJeff Garzik  *
7259c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
7260c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
7261c6fd2807SJeff Garzik  *	be quiescent on return from this function.
7262c6fd2807SJeff Garzik  *
7263c6fd2807SJeff Garzik  *	LOCKING:
7264c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
7265c6fd2807SJeff Garzik  */
7266741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
7267c6fd2807SJeff Garzik {
7268c6fd2807SJeff Garzik 	unsigned long flags;
726941bda9c9STejun Heo 	struct ata_link *link;
7270f58229f8STejun Heo 	struct ata_device *dev;
7271c6fd2807SJeff Garzik 
7272c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7273c6fd2807SJeff Garzik 		goto skip_eh;
7274c6fd2807SJeff Garzik 
7275c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7276c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7277c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7278c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7279c6fd2807SJeff Garzik 
7280c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7281c6fd2807SJeff Garzik 
72827f9ad9b8STejun Heo 	/* EH is now guaranteed to see UNLOADING - EH context belongs
72837f9ad9b8STejun Heo 	 * to us.  Disable all existing devices.
7284c6fd2807SJeff Garzik 	 */
728541bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
728641bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7287f58229f8STejun Heo 			ata_dev_disable(dev);
728841bda9c9STejun Heo 	}
7289c6fd2807SJeff Garzik 
7290c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7291c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7292c6fd2807SJeff Garzik 	 * target.
7293c6fd2807SJeff Garzik 	 */
7294c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7295c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7296c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7297c6fd2807SJeff Garzik 
7298c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
729945a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7300c6fd2807SJeff Garzik 
7301c6fd2807SJeff Garzik  skip_eh:
7302c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7303cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7304c6fd2807SJeff Garzik }
7305c6fd2807SJeff Garzik 
7306c6fd2807SJeff Garzik /**
73070529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
73080529c159STejun Heo  *	@host: Host to detach
73090529c159STejun Heo  *
73100529c159STejun Heo  *	Detach all ports of @host.
73110529c159STejun Heo  *
73120529c159STejun Heo  *	LOCKING:
73130529c159STejun Heo  *	Kernel thread context (may sleep).
73140529c159STejun Heo  */
73150529c159STejun Heo void ata_host_detach(struct ata_host *host)
73160529c159STejun Heo {
73170529c159STejun Heo 	int i;
73180529c159STejun Heo 
73190529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
73200529c159STejun Heo 		ata_port_detach(host->ports[i]);
7321562f0c2dSTejun Heo 
7322562f0c2dSTejun Heo 	/* the host is dead now, dissociate ACPI */
7323562f0c2dSTejun Heo 	ata_acpi_dissociate(host);
73240529c159STejun Heo }
73250529c159STejun Heo 
7326c6fd2807SJeff Garzik /**
7327c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7328c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7329c6fd2807SJeff Garzik  *
7330c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7331c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7332c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7333c6fd2807SJeff Garzik  *	relative to cmd_addr.
7334c6fd2807SJeff Garzik  *
7335c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7336c6fd2807SJeff Garzik  */
7337c6fd2807SJeff Garzik 
7338c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7339c6fd2807SJeff Garzik {
7340c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7341c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7342c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7343c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7344c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7345c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7346c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7347c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7348c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7349c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7350c6fd2807SJeff Garzik }
7351c6fd2807SJeff Garzik 
7352c6fd2807SJeff Garzik 
7353c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7354c6fd2807SJeff Garzik 
7355c6fd2807SJeff Garzik /**
7356c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7357c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7358c6fd2807SJeff Garzik  *
7359b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7360b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7361b878ca5dSTejun Heo  *	release is handled via devres.
7362c6fd2807SJeff Garzik  *
7363c6fd2807SJeff Garzik  *	LOCKING:
7364c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7365c6fd2807SJeff Garzik  */
7366c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7367c6fd2807SJeff Garzik {
73682855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7369cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7370c6fd2807SJeff Garzik 
7371f0d36efdSTejun Heo 	ata_host_detach(host);
7372c6fd2807SJeff Garzik }
7373c6fd2807SJeff Garzik 
7374c6fd2807SJeff Garzik /* move to PCI subsystem */
7375c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7376c6fd2807SJeff Garzik {
7377c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7378c6fd2807SJeff Garzik 
7379c6fd2807SJeff Garzik 	switch (bits->width) {
7380c6fd2807SJeff Garzik 	case 1: {
7381c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7382c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7383c6fd2807SJeff Garzik 		tmp = tmp8;
7384c6fd2807SJeff Garzik 		break;
7385c6fd2807SJeff Garzik 	}
7386c6fd2807SJeff Garzik 	case 2: {
7387c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7388c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7389c6fd2807SJeff Garzik 		tmp = tmp16;
7390c6fd2807SJeff Garzik 		break;
7391c6fd2807SJeff Garzik 	}
7392c6fd2807SJeff Garzik 	case 4: {
7393c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7394c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7395c6fd2807SJeff Garzik 		tmp = tmp32;
7396c6fd2807SJeff Garzik 		break;
7397c6fd2807SJeff Garzik 	}
7398c6fd2807SJeff Garzik 
7399c6fd2807SJeff Garzik 	default:
7400c6fd2807SJeff Garzik 		return -EINVAL;
7401c6fd2807SJeff Garzik 	}
7402c6fd2807SJeff Garzik 
7403c6fd2807SJeff Garzik 	tmp &= bits->mask;
7404c6fd2807SJeff Garzik 
7405c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7406c6fd2807SJeff Garzik }
7407c6fd2807SJeff Garzik 
74086ffa01d8STejun Heo #ifdef CONFIG_PM
7409c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7410c6fd2807SJeff Garzik {
7411c6fd2807SJeff Garzik 	pci_save_state(pdev);
7412c6fd2807SJeff Garzik 	pci_disable_device(pdev);
74134c90d971STejun Heo 
74144c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7415c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7416c6fd2807SJeff Garzik }
7417c6fd2807SJeff Garzik 
7418553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7419c6fd2807SJeff Garzik {
7420553c4aa6STejun Heo 	int rc;
7421553c4aa6STejun Heo 
7422c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7423c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7424553c4aa6STejun Heo 
7425f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7426553c4aa6STejun Heo 	if (rc) {
7427553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7428553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7429553c4aa6STejun Heo 		return rc;
7430553c4aa6STejun Heo 	}
7431553c4aa6STejun Heo 
7432c6fd2807SJeff Garzik 	pci_set_master(pdev);
7433553c4aa6STejun Heo 	return 0;
7434c6fd2807SJeff Garzik }
7435c6fd2807SJeff Garzik 
7436c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7437c6fd2807SJeff Garzik {
7438cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7439c6fd2807SJeff Garzik 	int rc = 0;
7440c6fd2807SJeff Garzik 
7441cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7442c6fd2807SJeff Garzik 	if (rc)
7443c6fd2807SJeff Garzik 		return rc;
7444c6fd2807SJeff Garzik 
7445c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7446c6fd2807SJeff Garzik 
7447c6fd2807SJeff Garzik 	return 0;
7448c6fd2807SJeff Garzik }
7449c6fd2807SJeff Garzik 
7450c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7451c6fd2807SJeff Garzik {
7452cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7453553c4aa6STejun Heo 	int rc;
7454c6fd2807SJeff Garzik 
7455553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7456553c4aa6STejun Heo 	if (rc == 0)
7457cca3974eSJeff Garzik 		ata_host_resume(host);
7458553c4aa6STejun Heo 	return rc;
7459c6fd2807SJeff Garzik }
74606ffa01d8STejun Heo #endif /* CONFIG_PM */
74616ffa01d8STejun Heo 
7462c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7463c6fd2807SJeff Garzik 
7464c6fd2807SJeff Garzik 
7465c6fd2807SJeff Garzik static int __init ata_init(void)
7466c6fd2807SJeff Garzik {
7467c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7468c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7469c6fd2807SJeff Garzik 	if (!ata_wq)
7470c6fd2807SJeff Garzik 		return -ENOMEM;
7471c6fd2807SJeff Garzik 
7472c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7473c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7474c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7475c6fd2807SJeff Garzik 		return -ENOMEM;
7476c6fd2807SJeff Garzik 	}
7477c6fd2807SJeff Garzik 
7478c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7479c6fd2807SJeff Garzik 	return 0;
7480c6fd2807SJeff Garzik }
7481c6fd2807SJeff Garzik 
7482c6fd2807SJeff Garzik static void __exit ata_exit(void)
7483c6fd2807SJeff Garzik {
7484c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7485c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7486c6fd2807SJeff Garzik }
7487c6fd2807SJeff Garzik 
7488a4625085SBrian King subsys_initcall(ata_init);
7489c6fd2807SJeff Garzik module_exit(ata_exit);
7490c6fd2807SJeff Garzik 
7491c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7492c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7493c6fd2807SJeff Garzik 
7494c6fd2807SJeff Garzik int ata_ratelimit(void)
7495c6fd2807SJeff Garzik {
7496c6fd2807SJeff Garzik 	int rc;
7497c6fd2807SJeff Garzik 	unsigned long flags;
7498c6fd2807SJeff Garzik 
7499c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7500c6fd2807SJeff Garzik 
7501c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7502c6fd2807SJeff Garzik 		rc = 1;
7503c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7504c6fd2807SJeff Garzik 	} else
7505c6fd2807SJeff Garzik 		rc = 0;
7506c6fd2807SJeff Garzik 
7507c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7508c6fd2807SJeff Garzik 
7509c6fd2807SJeff Garzik 	return rc;
7510c6fd2807SJeff Garzik }
7511c6fd2807SJeff Garzik 
7512c6fd2807SJeff Garzik /**
7513c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7514c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7515c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7516c6fd2807SJeff Garzik  *	@val: Wait condition
7517c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7518c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7519c6fd2807SJeff Garzik  *
7520c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7521c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7522c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7523c6fd2807SJeff Garzik  *
7524c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7525c6fd2807SJeff Garzik  *
7526c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7527c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7528c6fd2807SJeff Garzik  *
7529c6fd2807SJeff Garzik  *	LOCKING:
7530c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7531c6fd2807SJeff Garzik  *
7532c6fd2807SJeff Garzik  *	RETURNS:
7533c6fd2807SJeff Garzik  *	The final register value.
7534c6fd2807SJeff Garzik  */
7535c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7536c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7537c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7538c6fd2807SJeff Garzik {
7539c6fd2807SJeff Garzik 	unsigned long timeout;
7540c6fd2807SJeff Garzik 	u32 tmp;
7541c6fd2807SJeff Garzik 
7542c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7543c6fd2807SJeff Garzik 
7544c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7545c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7546c6fd2807SJeff Garzik 	 * eat away the timeout.
7547c6fd2807SJeff Garzik 	 */
7548c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7549c6fd2807SJeff Garzik 
7550c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7551c6fd2807SJeff Garzik 		msleep(interval_msec);
7552c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7553c6fd2807SJeff Garzik 	}
7554c6fd2807SJeff Garzik 
7555c6fd2807SJeff Garzik 	return tmp;
7556c6fd2807SJeff Garzik }
7557c6fd2807SJeff Garzik 
7558c6fd2807SJeff Garzik /*
7559c6fd2807SJeff Garzik  * Dummy port_ops
7560c6fd2807SJeff Garzik  */
7561c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7562c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7563c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7564c6fd2807SJeff Garzik 
7565c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7566c6fd2807SJeff Garzik {
7567c6fd2807SJeff Garzik 	return ATA_DRDY;
7568c6fd2807SJeff Garzik }
7569c6fd2807SJeff Garzik 
7570c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7571c6fd2807SJeff Garzik {
7572c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7573c6fd2807SJeff Garzik }
7574c6fd2807SJeff Garzik 
7575c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7576c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7577c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7578c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7579c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7580c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7581c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7582c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7583c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7584c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7585c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7586c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7587c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7588c6fd2807SJeff Garzik };
7589c6fd2807SJeff Garzik 
759021b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
759121b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
759221b0ad4fSTejun Heo };
759321b0ad4fSTejun Heo 
7594c6fd2807SJeff Garzik /*
7595c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7596c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7597c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7598c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7599c6fd2807SJeff Garzik  */
7600c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7601c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7602c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7603c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
760421b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7605c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7606c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7607cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7608f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7609f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7610ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7611f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7612f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
76130529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7614c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7615c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7616c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7617c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7618c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7619c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7620c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7621c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7622c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7623c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
762443727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7625c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7626c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
76276357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_pack_xfermask);
76286357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
76296357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
76306357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
76316357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
76326357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_mode_string);
76336357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_id_xfermask);
7634c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7635c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7636c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7637c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7638d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7639c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
764004351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
76410d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
76420d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
764331cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7644c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7645d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7646c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7647c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7648c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7649c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7650c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7651c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7652c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7653c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7654c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7655c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7656c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7657c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
765810305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7659c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7660936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7661936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7662c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7663c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7664c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7665cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7666c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7667c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7668c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7669c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7670c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7671c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7672c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7673c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
767488ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7675d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7676c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7677c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7678c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7679c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7680c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7681c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7682c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7683c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7684c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7685c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7686c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7687936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7688936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
76896ffa01d8STejun Heo #ifdef CONFIG_PM
7690cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7691cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
76926ffa01d8STejun Heo #endif /* CONFIG_PM */
7693c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7694c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
7695c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7696c6fd2807SJeff Garzik 
7697c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
76986357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7699c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7700c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7701a0f79b92STejun Heo EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7702c6fd2807SJeff Garzik 
7703c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7704c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7705d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
77061626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7707d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7708c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7709c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
77106ffa01d8STejun Heo #ifdef CONFIG_PM
7711c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7712c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7713c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7714c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
77156ffa01d8STejun Heo #endif /* CONFIG_PM */
7716c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7717c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7718c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7719c6fd2807SJeff Garzik 
772031f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
77213af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
77223af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
77233af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
77243af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
77253af9a77aSTejun Heo 
7726b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7727b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7728b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7729cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7730cbcdd875STejun Heo #ifdef CONFIG_PCI
7731cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7732cbcdd875STejun Heo #endif /* CONFIG_PCI */
7733c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7734dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7735c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7736c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
77377d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7738c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7739c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7740c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7741c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7742c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
774383625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7744a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7745be0d18dfSAlan Cox 
7746be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7747be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7748be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7749c88f90c3STejun Heo EXPORT_SYMBOL_GPL(ata_cable_ignore);
7750be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7751