xref: /openbmc/linux/drivers/ata/libata-core.c (revision 0affa456)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
3392c52c52SAlan Cox  *  Standards documents from:
3492c52c52SAlan Cox  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
3592c52c52SAlan Cox  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
3692c52c52SAlan Cox  *	http://www.sata-io.org (SATA)
3792c52c52SAlan Cox  *	http://www.compactflash.org (CF)
3892c52c52SAlan Cox  *	http://www.qic.org (QIC157 - Tape and DSC)
3992c52c52SAlan Cox  *	http://www.ce-ata.org (CE-ATA: not supported)
4092c52c52SAlan Cox  *
41c6fd2807SJeff Garzik  */
42c6fd2807SJeff Garzik 
43c6fd2807SJeff Garzik #include <linux/kernel.h>
44c6fd2807SJeff Garzik #include <linux/module.h>
45c6fd2807SJeff Garzik #include <linux/pci.h>
46c6fd2807SJeff Garzik #include <linux/init.h>
47c6fd2807SJeff Garzik #include <linux/list.h>
48c6fd2807SJeff Garzik #include <linux/mm.h>
49c6fd2807SJeff Garzik #include <linux/highmem.h>
50c6fd2807SJeff Garzik #include <linux/spinlock.h>
51c6fd2807SJeff Garzik #include <linux/blkdev.h>
52c6fd2807SJeff Garzik #include <linux/delay.h>
53c6fd2807SJeff Garzik #include <linux/timer.h>
54c6fd2807SJeff Garzik #include <linux/interrupt.h>
55c6fd2807SJeff Garzik #include <linux/completion.h>
56c6fd2807SJeff Garzik #include <linux/suspend.h>
57c6fd2807SJeff Garzik #include <linux/workqueue.h>
58c6fd2807SJeff Garzik #include <linux/jiffies.h>
59c6fd2807SJeff Garzik #include <linux/scatterlist.h>
602dcb407eSJeff Garzik #include <linux/io.h>
61c6fd2807SJeff Garzik #include <scsi/scsi.h>
62c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
63c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
64c6fd2807SJeff Garzik #include <linux/libata.h>
65c6fd2807SJeff Garzik #include <asm/semaphore.h>
66c6fd2807SJeff Garzik #include <asm/byteorder.h>
67140b5e59STejun Heo #include <linux/cdrom.h>
68c6fd2807SJeff Garzik 
69c6fd2807SJeff Garzik #include "libata.h"
70c6fd2807SJeff Garzik 
71fda0efc5SJeff Garzik 
72c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
73c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
74c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
75c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
76c6fd2807SJeff Garzik 
77c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
78c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
79c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
81218f3d30SJeff Garzik 					u8 enable, u8 feature);
82c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
8375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
84c6fd2807SJeff Garzik 
85f3187195STejun Heo unsigned int ata_print_id = 1;
86c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
89c6fd2807SJeff Garzik 
90c6fd2807SJeff Garzik int atapi_enabled = 1;
91c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
92c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93c6fd2807SJeff Garzik 
94c6fd2807SJeff Garzik int atapi_dmadir = 0;
95c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
96c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97c6fd2807SJeff Garzik 
98baf4fdfaSMark Lord int atapi_passthru16 = 1;
99baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
100baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101baf4fdfaSMark Lord 
102c6fd2807SJeff Garzik int libata_fua = 0;
103c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
104c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105c6fd2807SJeff Garzik 
1062dcb407eSJeff Garzik static int ata_ignore_hpa;
1071e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
1081e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1091e999736SAlan Cox 
110b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
112b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113b3a70601SAlan Cox 
114c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
116c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117c6fd2807SJeff Garzik 
1186ebe9d86SJeff Garzik int libata_noacpi = 0;
119d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1206ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
12111ef697bSKristen Carlson Accardi 
122ae8d4ee7SAlan Cox int libata_allow_tpm = 0;
123ae8d4ee7SAlan Cox module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124ae8d4ee7SAlan Cox MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125ae8d4ee7SAlan Cox 
126c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
127c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
128c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
129c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
130c6fd2807SJeff Garzik 
131c6fd2807SJeff Garzik 
132c6fd2807SJeff Garzik /**
133c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
135c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1369977126cSTejun Heo  *	@is_cmd: This FIS is for command
1379977126cSTejun Heo  *	@fis: Buffer into which data will output
138c6fd2807SJeff Garzik  *
139c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
140c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
141c6fd2807SJeff Garzik  *
142c6fd2807SJeff Garzik  *	LOCKING:
143c6fd2807SJeff Garzik  *	Inherited from caller.
144c6fd2807SJeff Garzik  */
1459977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
146c6fd2807SJeff Garzik {
147c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1489977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1499977126cSTejun Heo 	if (is_cmd)
1509977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1519977126cSTejun Heo 
152c6fd2807SJeff Garzik 	fis[2] = tf->command;
153c6fd2807SJeff Garzik 	fis[3] = tf->feature;
154c6fd2807SJeff Garzik 
155c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
156c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
157c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
158c6fd2807SJeff Garzik 	fis[7] = tf->device;
159c6fd2807SJeff Garzik 
160c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
161c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
162c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
163c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
164c6fd2807SJeff Garzik 
165c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
166c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
167c6fd2807SJeff Garzik 	fis[14] = 0;
168c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
169c6fd2807SJeff Garzik 
170c6fd2807SJeff Garzik 	fis[16] = 0;
171c6fd2807SJeff Garzik 	fis[17] = 0;
172c6fd2807SJeff Garzik 	fis[18] = 0;
173c6fd2807SJeff Garzik 	fis[19] = 0;
174c6fd2807SJeff Garzik }
175c6fd2807SJeff Garzik 
176c6fd2807SJeff Garzik /**
177c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
179c6fd2807SJeff Garzik  *	@tf: Taskfile to output
180c6fd2807SJeff Garzik  *
181c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
182c6fd2807SJeff Garzik  *
183c6fd2807SJeff Garzik  *	LOCKING:
184c6fd2807SJeff Garzik  *	Inherited from caller.
185c6fd2807SJeff Garzik  */
186c6fd2807SJeff Garzik 
187c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
188c6fd2807SJeff Garzik {
189c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
190c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
193c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
194c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
195c6fd2807SJeff Garzik 	tf->device	= fis[7];
196c6fd2807SJeff Garzik 
197c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
198c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
199c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
200c6fd2807SJeff Garzik 
201c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
202c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
203c6fd2807SJeff Garzik }
204c6fd2807SJeff Garzik 
205c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
206c6fd2807SJeff Garzik 	/* pio multi */
207c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
209c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
210c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
211c6fd2807SJeff Garzik 	0,
212c6fd2807SJeff Garzik 	0,
213c6fd2807SJeff Garzik 	0,
214c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
215c6fd2807SJeff Garzik 	/* pio */
216c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
217c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
218c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
219c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
220c6fd2807SJeff Garzik 	0,
221c6fd2807SJeff Garzik 	0,
222c6fd2807SJeff Garzik 	0,
223c6fd2807SJeff Garzik 	0,
224c6fd2807SJeff Garzik 	/* dma */
225c6fd2807SJeff Garzik 	ATA_CMD_READ,
226c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
227c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
228c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
229c6fd2807SJeff Garzik 	0,
230c6fd2807SJeff Garzik 	0,
231c6fd2807SJeff Garzik 	0,
232c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
233c6fd2807SJeff Garzik };
234c6fd2807SJeff Garzik 
235c6fd2807SJeff Garzik /**
236c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
237bd056d7eSTejun Heo  *	@tf: command to examine and configure
238bd056d7eSTejun Heo  *	@dev: device tf belongs to
239c6fd2807SJeff Garzik  *
240c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
241c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
242c6fd2807SJeff Garzik  *
243c6fd2807SJeff Garzik  *	LOCKING:
244c6fd2807SJeff Garzik  *	caller.
245c6fd2807SJeff Garzik  */
246bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
247c6fd2807SJeff Garzik {
248c6fd2807SJeff Garzik 	u8 cmd;
249c6fd2807SJeff Garzik 
250c6fd2807SJeff Garzik 	int index, fua, lba48, write;
251c6fd2807SJeff Garzik 
252c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
253c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
255c6fd2807SJeff Garzik 
256c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
257c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
258c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2599af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
260c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
261c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
262c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
263c6fd2807SJeff Garzik 	} else {
264c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
265c6fd2807SJeff Garzik 		index = 16;
266c6fd2807SJeff Garzik 	}
267c6fd2807SJeff Garzik 
268c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
269c6fd2807SJeff Garzik 	if (cmd) {
270c6fd2807SJeff Garzik 		tf->command = cmd;
271c6fd2807SJeff Garzik 		return 0;
272c6fd2807SJeff Garzik 	}
273c6fd2807SJeff Garzik 	return -1;
274c6fd2807SJeff Garzik }
275c6fd2807SJeff Garzik 
276c6fd2807SJeff Garzik /**
27735b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
27835b649feSTejun Heo  *	@tf: ATA taskfile of interest
27935b649feSTejun Heo  *	@dev: ATA device @tf belongs to
28035b649feSTejun Heo  *
28135b649feSTejun Heo  *	LOCKING:
28235b649feSTejun Heo  *	None.
28335b649feSTejun Heo  *
28435b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
28535b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
28635b649feSTejun Heo  *	flags select the address format to use.
28735b649feSTejun Heo  *
28835b649feSTejun Heo  *	RETURNS:
28935b649feSTejun Heo  *	Block address read from @tf.
29035b649feSTejun Heo  */
29135b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
29235b649feSTejun Heo {
29335b649feSTejun Heo 	u64 block = 0;
29435b649feSTejun Heo 
29535b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
29635b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
29735b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
29835b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
29935b649feSTejun Heo 			block |= tf->hob_lbal << 24;
30035b649feSTejun Heo 		} else
30135b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
30235b649feSTejun Heo 
30335b649feSTejun Heo 		block |= tf->lbah << 16;
30435b649feSTejun Heo 		block |= tf->lbam << 8;
30535b649feSTejun Heo 		block |= tf->lbal;
30635b649feSTejun Heo 	} else {
30735b649feSTejun Heo 		u32 cyl, head, sect;
30835b649feSTejun Heo 
30935b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
31035b649feSTejun Heo 		head = tf->device & 0xf;
31135b649feSTejun Heo 		sect = tf->lbal;
31235b649feSTejun Heo 
31335b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
31435b649feSTejun Heo 	}
31535b649feSTejun Heo 
31635b649feSTejun Heo 	return block;
31735b649feSTejun Heo }
31835b649feSTejun Heo 
31935b649feSTejun Heo /**
320bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
321bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
322bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
323bd056d7eSTejun Heo  *	@block: Block address
324bd056d7eSTejun Heo  *	@n_block: Number of blocks
325bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
326bd056d7eSTejun Heo  *	@tag: tag
327bd056d7eSTejun Heo  *
328bd056d7eSTejun Heo  *	LOCKING:
329bd056d7eSTejun Heo  *	None.
330bd056d7eSTejun Heo  *
331bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
332bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
333bd056d7eSTejun Heo  *
334bd056d7eSTejun Heo  *	RETURNS:
335bd056d7eSTejun Heo  *
336bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
337bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
338bd056d7eSTejun Heo  */
339bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
341bd056d7eSTejun Heo 		    unsigned int tag)
342bd056d7eSTejun Heo {
343bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344bd056d7eSTejun Heo 	tf->flags |= tf_flags;
345bd056d7eSTejun Heo 
3466d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
347bd056d7eSTejun Heo 		/* yay, NCQ */
348bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
349bd056d7eSTejun Heo 			return -ERANGE;
350bd056d7eSTejun Heo 
351bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
352bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353bd056d7eSTejun Heo 
354bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
355bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
356bd056d7eSTejun Heo 		else
357bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
358bd056d7eSTejun Heo 
359bd056d7eSTejun Heo 		tf->nsect = tag << 3;
360bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
361bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
364bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
365bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
366bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
367bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
368bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 		tf->device = 1 << 6;
371bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
372bd056d7eSTejun Heo 			tf->device |= 1 << 7;
373bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
374bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
375bd056d7eSTejun Heo 
376bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
377bd056d7eSTejun Heo 			/* use LBA28 */
378bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
379bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
380bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
381bd056d7eSTejun Heo 				return -ERANGE;
382bd056d7eSTejun Heo 
383bd056d7eSTejun Heo 			/* use LBA48 */
384bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
385bd056d7eSTejun Heo 
386bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
387bd056d7eSTejun Heo 
388bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
389bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
390bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
391bd056d7eSTejun Heo 		} else
392bd056d7eSTejun Heo 			/* request too large even for LBA48 */
393bd056d7eSTejun Heo 			return -ERANGE;
394bd056d7eSTejun Heo 
395bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396bd056d7eSTejun Heo 			return -EINVAL;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
399bd056d7eSTejun Heo 
400bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
401bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
402bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
403bd056d7eSTejun Heo 
404bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
405bd056d7eSTejun Heo 	} else {
406bd056d7eSTejun Heo 		/* CHS */
407bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
410bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
411bd056d7eSTejun Heo 			return -ERANGE;
412bd056d7eSTejun Heo 
413bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414bd056d7eSTejun Heo 			return -EINVAL;
415bd056d7eSTejun Heo 
416bd056d7eSTejun Heo 		/* Convert LBA to CHS */
417bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
418bd056d7eSTejun Heo 		cyl   = track / dev->heads;
419bd056d7eSTejun Heo 		head  = track % dev->heads;
420bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
421bd056d7eSTejun Heo 
422bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
424bd056d7eSTejun Heo 
425bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
426bd056d7eSTejun Heo 		   Cylinder: 0-65535
427bd056d7eSTejun Heo 		   Head: 0-15
428bd056d7eSTejun Heo 		   Sector: 1-255*/
429bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430bd056d7eSTejun Heo 			return -ERANGE;
431bd056d7eSTejun Heo 
432bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433bd056d7eSTejun Heo 		tf->lbal = sect;
434bd056d7eSTejun Heo 		tf->lbam = cyl;
435bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
436bd056d7eSTejun Heo 		tf->device |= head;
437bd056d7eSTejun Heo 	}
438bd056d7eSTejun Heo 
439bd056d7eSTejun Heo 	return 0;
440bd056d7eSTejun Heo }
441bd056d7eSTejun Heo 
442bd056d7eSTejun Heo /**
443c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
445c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
446c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
447c6fd2807SJeff Garzik  *
448c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
450c6fd2807SJeff Garzik  *
451c6fd2807SJeff Garzik  *	LOCKING:
452c6fd2807SJeff Garzik  *	None.
453c6fd2807SJeff Garzik  *
454c6fd2807SJeff Garzik  *	RETURNS:
455c6fd2807SJeff Garzik  *	Packed xfer_mask.
456c6fd2807SJeff Garzik  */
4577dc951aeSTejun Heo unsigned long ata_pack_xfermask(unsigned long pio_mask,
4587dc951aeSTejun Heo 				unsigned long mwdma_mask,
4597dc951aeSTejun Heo 				unsigned long udma_mask)
460c6fd2807SJeff Garzik {
461c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
464c6fd2807SJeff Garzik }
465c6fd2807SJeff Garzik 
466c6fd2807SJeff Garzik /**
467c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
469c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
470c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
471c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
472c6fd2807SJeff Garzik  *
473c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
475c6fd2807SJeff Garzik  */
4767dc951aeSTejun Heo void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
4777dc951aeSTejun Heo 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
478c6fd2807SJeff Garzik {
479c6fd2807SJeff Garzik 	if (pio_mask)
480c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481c6fd2807SJeff Garzik 	if (mwdma_mask)
482c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483c6fd2807SJeff Garzik 	if (udma_mask)
484c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
485c6fd2807SJeff Garzik }
486c6fd2807SJeff Garzik 
487c6fd2807SJeff Garzik static const struct ata_xfer_ent {
488c6fd2807SJeff Garzik 	int shift, bits;
489c6fd2807SJeff Garzik 	u8 base;
490c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
49170cd071eSTejun Heo 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
49270cd071eSTejun Heo 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
49370cd071eSTejun Heo 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
494c6fd2807SJeff Garzik 	{ -1, },
495c6fd2807SJeff Garzik };
496c6fd2807SJeff Garzik 
497c6fd2807SJeff Garzik /**
498c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
500c6fd2807SJeff Garzik  *
501c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
502c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
503c6fd2807SJeff Garzik  *
504c6fd2807SJeff Garzik  *	LOCKING:
505c6fd2807SJeff Garzik  *	None.
506c6fd2807SJeff Garzik  *
507c6fd2807SJeff Garzik  *	RETURNS:
50870cd071eSTejun Heo  *	Matching XFER_* value, 0xff if no match found.
509c6fd2807SJeff Garzik  */
5107dc951aeSTejun Heo u8 ata_xfer_mask2mode(unsigned long xfer_mask)
511c6fd2807SJeff Garzik {
512c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
513c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
514c6fd2807SJeff Garzik 
515c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
51870cd071eSTejun Heo 	return 0xff;
519c6fd2807SJeff Garzik }
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik /**
522c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
524c6fd2807SJeff Garzik  *
525c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	LOCKING:
528c6fd2807SJeff Garzik  *	None.
529c6fd2807SJeff Garzik  *
530c6fd2807SJeff Garzik  *	RETURNS:
531c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
532c6fd2807SJeff Garzik  */
5337dc951aeSTejun Heo unsigned long ata_xfer_mode2mask(u8 xfer_mode)
534c6fd2807SJeff Garzik {
535c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
536c6fd2807SJeff Garzik 
537c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
53970cd071eSTejun Heo 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
54070cd071eSTejun Heo 				& ~((1 << ent->shift) - 1);
541c6fd2807SJeff Garzik 	return 0;
542c6fd2807SJeff Garzik }
543c6fd2807SJeff Garzik 
544c6fd2807SJeff Garzik /**
545c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
547c6fd2807SJeff Garzik  *
548c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
549c6fd2807SJeff Garzik  *
550c6fd2807SJeff Garzik  *	LOCKING:
551c6fd2807SJeff Garzik  *	None.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	RETURNS:
554c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
555c6fd2807SJeff Garzik  */
5567dc951aeSTejun Heo int ata_xfer_mode2shift(unsigned long xfer_mode)
557c6fd2807SJeff Garzik {
558c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
559c6fd2807SJeff Garzik 
560c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
562c6fd2807SJeff Garzik 			return ent->shift;
563c6fd2807SJeff Garzik 	return -1;
564c6fd2807SJeff Garzik }
565c6fd2807SJeff Garzik 
566c6fd2807SJeff Garzik /**
567c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
568c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
569c6fd2807SJeff Garzik  *
570c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
571c6fd2807SJeff Garzik  *	(highest bit in @modemask).
572c6fd2807SJeff Garzik  *
573c6fd2807SJeff Garzik  *	LOCKING:
574c6fd2807SJeff Garzik  *	None.
575c6fd2807SJeff Garzik  *
576c6fd2807SJeff Garzik  *	RETURNS:
577c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
578c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
579c6fd2807SJeff Garzik  */
5807dc951aeSTejun Heo const char *ata_mode_string(unsigned long xfer_mask)
581c6fd2807SJeff Garzik {
582c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
583c6fd2807SJeff Garzik 		"PIO0",
584c6fd2807SJeff Garzik 		"PIO1",
585c6fd2807SJeff Garzik 		"PIO2",
586c6fd2807SJeff Garzik 		"PIO3",
587c6fd2807SJeff Garzik 		"PIO4",
588b352e57dSAlan Cox 		"PIO5",
589b352e57dSAlan Cox 		"PIO6",
590c6fd2807SJeff Garzik 		"MWDMA0",
591c6fd2807SJeff Garzik 		"MWDMA1",
592c6fd2807SJeff Garzik 		"MWDMA2",
593b352e57dSAlan Cox 		"MWDMA3",
594b352e57dSAlan Cox 		"MWDMA4",
595c6fd2807SJeff Garzik 		"UDMA/16",
596c6fd2807SJeff Garzik 		"UDMA/25",
597c6fd2807SJeff Garzik 		"UDMA/33",
598c6fd2807SJeff Garzik 		"UDMA/44",
599c6fd2807SJeff Garzik 		"UDMA/66",
600c6fd2807SJeff Garzik 		"UDMA/100",
601c6fd2807SJeff Garzik 		"UDMA/133",
602c6fd2807SJeff Garzik 		"UDMA7",
603c6fd2807SJeff Garzik 	};
604c6fd2807SJeff Garzik 	int highbit;
605c6fd2807SJeff Garzik 
606c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
607c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
609c6fd2807SJeff Garzik 	return "<n/a>";
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
613c6fd2807SJeff Garzik {
614c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
615c6fd2807SJeff Garzik 		"1.5 Gbps",
616c6fd2807SJeff Garzik 		"3.0 Gbps",
617c6fd2807SJeff Garzik 	};
618c6fd2807SJeff Garzik 
619c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620c6fd2807SJeff Garzik 		return "<unknown>";
621c6fd2807SJeff Garzik 	return spd_str[spd - 1];
622c6fd2807SJeff Garzik }
623c6fd2807SJeff Garzik 
624c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
625c6fd2807SJeff Garzik {
62609d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6279af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
628c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
629562f0c2dSTejun Heo 		ata_acpi_on_disable(dev);
6304ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6314ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
632c6fd2807SJeff Garzik 		dev->class++;
633c6fd2807SJeff Garzik 	}
634c6fd2807SJeff Garzik }
635c6fd2807SJeff Garzik 
636ca77329fSKristen Carlson Accardi static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637ca77329fSKristen Carlson Accardi {
638ca77329fSKristen Carlson Accardi 	struct ata_link *link = dev->link;
639ca77329fSKristen Carlson Accardi 	struct ata_port *ap = link->ap;
640ca77329fSKristen Carlson Accardi 	u32 scontrol;
641ca77329fSKristen Carlson Accardi 	unsigned int err_mask;
642ca77329fSKristen Carlson Accardi 	int rc;
643ca77329fSKristen Carlson Accardi 
644ca77329fSKristen Carlson Accardi 	/*
645ca77329fSKristen Carlson Accardi 	 * disallow DIPM for drivers which haven't set
646ca77329fSKristen Carlson Accardi 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
647ca77329fSKristen Carlson Accardi 	 * phy ready will be set in the interrupt status on
648ca77329fSKristen Carlson Accardi 	 * state changes, which will cause some drivers to
649ca77329fSKristen Carlson Accardi 	 * think there are errors - additionally drivers will
650ca77329fSKristen Carlson Accardi 	 * need to disable hot plug.
651ca77329fSKristen Carlson Accardi 	 */
652ca77329fSKristen Carlson Accardi 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653ca77329fSKristen Carlson Accardi 		ap->pm_policy = NOT_AVAILABLE;
654ca77329fSKristen Carlson Accardi 		return -EINVAL;
655ca77329fSKristen Carlson Accardi 	}
656ca77329fSKristen Carlson Accardi 
657ca77329fSKristen Carlson Accardi 	/*
658ca77329fSKristen Carlson Accardi 	 * For DIPM, we will only enable it for the
659ca77329fSKristen Carlson Accardi 	 * min_power setting.
660ca77329fSKristen Carlson Accardi 	 *
661ca77329fSKristen Carlson Accardi 	 * Why?  Because Disks are too stupid to know that
662ca77329fSKristen Carlson Accardi 	 * If the host rejects a request to go to SLUMBER
663ca77329fSKristen Carlson Accardi 	 * they should retry at PARTIAL, and instead it
664ca77329fSKristen Carlson Accardi 	 * just would give up.  So, for medium_power to
665ca77329fSKristen Carlson Accardi 	 * work at all, we need to only allow HIPM.
666ca77329fSKristen Carlson Accardi 	 */
667ca77329fSKristen Carlson Accardi 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
668ca77329fSKristen Carlson Accardi 	if (rc)
669ca77329fSKristen Carlson Accardi 		return rc;
670ca77329fSKristen Carlson Accardi 
671ca77329fSKristen Carlson Accardi 	switch (policy) {
672ca77329fSKristen Carlson Accardi 	case MIN_POWER:
673ca77329fSKristen Carlson Accardi 		/* no restrictions on IPM transitions */
674ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x3 << 8);
675ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676ca77329fSKristen Carlson Accardi 		if (rc)
677ca77329fSKristen Carlson Accardi 			return rc;
678ca77329fSKristen Carlson Accardi 
679ca77329fSKristen Carlson Accardi 		/* enable DIPM */
680ca77329fSKristen Carlson Accardi 		if (dev->flags & ATA_DFLAG_DIPM)
681ca77329fSKristen Carlson Accardi 			err_mask = ata_dev_set_feature(dev,
682ca77329fSKristen Carlson Accardi 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
683ca77329fSKristen Carlson Accardi 		break;
684ca77329fSKristen Carlson Accardi 	case MEDIUM_POWER:
685ca77329fSKristen Carlson Accardi 		/* allow IPM to PARTIAL */
686ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x1 << 8);
687ca77329fSKristen Carlson Accardi 		scontrol |= (0x2 << 8);
688ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689ca77329fSKristen Carlson Accardi 		if (rc)
690ca77329fSKristen Carlson Accardi 			return rc;
691ca77329fSKristen Carlson Accardi 
692f5456b63SKristen Carlson Accardi 		/*
693f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
694f5456b63SKristen Carlson Accardi 		 * disallow transitions to SLUMBER, which effectively
695f5456b63SKristen Carlson Accardi 		 * disable DIPM if it does not support PARTIAL
696f5456b63SKristen Carlson Accardi 		 */
697ca77329fSKristen Carlson Accardi 		break;
698ca77329fSKristen Carlson Accardi 	case NOT_AVAILABLE:
699ca77329fSKristen Carlson Accardi 	case MAX_PERFORMANCE:
700ca77329fSKristen Carlson Accardi 		/* disable all IPM transitions */
701ca77329fSKristen Carlson Accardi 		scontrol |= (0x3 << 8);
702ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
703ca77329fSKristen Carlson Accardi 		if (rc)
704ca77329fSKristen Carlson Accardi 			return rc;
705ca77329fSKristen Carlson Accardi 
706f5456b63SKristen Carlson Accardi 		/*
707f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
708f5456b63SKristen Carlson Accardi 		 * disallow all transitions which effectively
709f5456b63SKristen Carlson Accardi 		 * disable DIPM anyway.
710f5456b63SKristen Carlson Accardi 		 */
711ca77329fSKristen Carlson Accardi 		break;
712ca77329fSKristen Carlson Accardi 	}
713ca77329fSKristen Carlson Accardi 
714ca77329fSKristen Carlson Accardi 	/* FIXME: handle SET FEATURES failure */
715ca77329fSKristen Carlson Accardi 	(void) err_mask;
716ca77329fSKristen Carlson Accardi 
717ca77329fSKristen Carlson Accardi 	return 0;
718ca77329fSKristen Carlson Accardi }
719ca77329fSKristen Carlson Accardi 
720ca77329fSKristen Carlson Accardi /**
721ca77329fSKristen Carlson Accardi  *	ata_dev_enable_pm - enable SATA interface power management
72248166fd9SStephen Hemminger  *	@dev:  device to enable power management
72348166fd9SStephen Hemminger  *	@policy: the link power management policy
724ca77329fSKristen Carlson Accardi  *
725ca77329fSKristen Carlson Accardi  *	Enable SATA Interface power management.  This will enable
726ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) for min_power
727ca77329fSKristen Carlson Accardi  * 	policy, and then call driver specific callbacks for
728ca77329fSKristen Carlson Accardi  *	enabling Host Initiated Power management.
729ca77329fSKristen Carlson Accardi  *
730ca77329fSKristen Carlson Accardi  *	Locking: Caller.
731ca77329fSKristen Carlson Accardi  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
732ca77329fSKristen Carlson Accardi  */
733ca77329fSKristen Carlson Accardi void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
734ca77329fSKristen Carlson Accardi {
735ca77329fSKristen Carlson Accardi 	int rc = 0;
736ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
737ca77329fSKristen Carlson Accardi 
738ca77329fSKristen Carlson Accardi 	/* set HIPM first, then DIPM */
739ca77329fSKristen Carlson Accardi 	if (ap->ops->enable_pm)
740ca77329fSKristen Carlson Accardi 		rc = ap->ops->enable_pm(ap, policy);
741ca77329fSKristen Carlson Accardi 	if (rc)
742ca77329fSKristen Carlson Accardi 		goto enable_pm_out;
743ca77329fSKristen Carlson Accardi 	rc = ata_dev_set_dipm(dev, policy);
744ca77329fSKristen Carlson Accardi 
745ca77329fSKristen Carlson Accardi enable_pm_out:
746ca77329fSKristen Carlson Accardi 	if (rc)
747ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
748ca77329fSKristen Carlson Accardi 	else
749ca77329fSKristen Carlson Accardi 		ap->pm_policy = policy;
750ca77329fSKristen Carlson Accardi 	return /* rc */;	/* hopefully we can use 'rc' eventually */
751ca77329fSKristen Carlson Accardi }
752ca77329fSKristen Carlson Accardi 
7531992a5edSStephen Rothwell #ifdef CONFIG_PM
754ca77329fSKristen Carlson Accardi /**
755ca77329fSKristen Carlson Accardi  *	ata_dev_disable_pm - disable SATA interface power management
75648166fd9SStephen Hemminger  *	@dev: device to disable power management
757ca77329fSKristen Carlson Accardi  *
758ca77329fSKristen Carlson Accardi  *	Disable SATA Interface power management.  This will disable
759ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) without changing
760ca77329fSKristen Carlson Accardi  * 	policy,  call driver specific callbacks for disabling Host
761ca77329fSKristen Carlson Accardi  * 	Initiated Power management.
762ca77329fSKristen Carlson Accardi  *
763ca77329fSKristen Carlson Accardi  *	Locking: Caller.
764ca77329fSKristen Carlson Accardi  *	Returns: void
765ca77329fSKristen Carlson Accardi  */
766ca77329fSKristen Carlson Accardi static void ata_dev_disable_pm(struct ata_device *dev)
767ca77329fSKristen Carlson Accardi {
768ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
769ca77329fSKristen Carlson Accardi 
770ca77329fSKristen Carlson Accardi 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771ca77329fSKristen Carlson Accardi 	if (ap->ops->disable_pm)
772ca77329fSKristen Carlson Accardi 		ap->ops->disable_pm(ap);
773ca77329fSKristen Carlson Accardi }
7741992a5edSStephen Rothwell #endif	/* CONFIG_PM */
775ca77329fSKristen Carlson Accardi 
776ca77329fSKristen Carlson Accardi void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777ca77329fSKristen Carlson Accardi {
778ca77329fSKristen Carlson Accardi 	ap->pm_policy = policy;
779ca77329fSKristen Carlson Accardi 	ap->link.eh_info.action |= ATA_EHI_LPM;
780ca77329fSKristen Carlson Accardi 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781ca77329fSKristen Carlson Accardi 	ata_port_schedule_eh(ap);
782ca77329fSKristen Carlson Accardi }
783ca77329fSKristen Carlson Accardi 
7841992a5edSStephen Rothwell #ifdef CONFIG_PM
785ca77329fSKristen Carlson Accardi static void ata_lpm_enable(struct ata_host *host)
786ca77329fSKristen Carlson Accardi {
787ca77329fSKristen Carlson Accardi 	struct ata_link *link;
788ca77329fSKristen Carlson Accardi 	struct ata_port *ap;
789ca77329fSKristen Carlson Accardi 	struct ata_device *dev;
790ca77329fSKristen Carlson Accardi 	int i;
791ca77329fSKristen Carlson Accardi 
792ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
793ca77329fSKristen Carlson Accardi 		ap = host->ports[i];
794ca77329fSKristen Carlson Accardi 		ata_port_for_each_link(link, ap) {
795ca77329fSKristen Carlson Accardi 			ata_link_for_each_dev(dev, link)
796ca77329fSKristen Carlson Accardi 				ata_dev_disable_pm(dev);
797ca77329fSKristen Carlson Accardi 		}
798ca77329fSKristen Carlson Accardi 	}
799ca77329fSKristen Carlson Accardi }
800ca77329fSKristen Carlson Accardi 
801ca77329fSKristen Carlson Accardi static void ata_lpm_disable(struct ata_host *host)
802ca77329fSKristen Carlson Accardi {
803ca77329fSKristen Carlson Accardi 	int i;
804ca77329fSKristen Carlson Accardi 
805ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
806ca77329fSKristen Carlson Accardi 		struct ata_port *ap = host->ports[i];
807ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
808ca77329fSKristen Carlson Accardi 	}
809ca77329fSKristen Carlson Accardi }
8101992a5edSStephen Rothwell #endif	/* CONFIG_PM */
811ca77329fSKristen Carlson Accardi 
812ca77329fSKristen Carlson Accardi 
813c6fd2807SJeff Garzik /**
814c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
815c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
816c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
817c6fd2807SJeff Garzik  *
8180d5ff566STejun Heo  *	This technique was originally described in
8190d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
8200d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
8210d5ff566STejun Heo  *
8220d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
8230d5ff566STejun Heo  *	and if a device is present, it will respond by
8240d5ff566STejun Heo  *	correctly storing and echoing back the
8250d5ff566STejun Heo  *	ATA shadow register contents.
826c6fd2807SJeff Garzik  *
827c6fd2807SJeff Garzik  *	LOCKING:
828c6fd2807SJeff Garzik  *	caller.
829c6fd2807SJeff Garzik  */
830c6fd2807SJeff Garzik 
8310d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
832c6fd2807SJeff Garzik {
8330d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
8340d5ff566STejun Heo 	u8 nsect, lbal;
8350d5ff566STejun Heo 
8360d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
8370d5ff566STejun Heo 
8380d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8390d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8400d5ff566STejun Heo 
8410d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
8420d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
8430d5ff566STejun Heo 
8440d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8450d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8460d5ff566STejun Heo 
8470d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
8480d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
8490d5ff566STejun Heo 
8500d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
8510d5ff566STejun Heo 		return 1;	/* we found a device */
8520d5ff566STejun Heo 
8530d5ff566STejun Heo 	return 0;		/* nothing found */
854c6fd2807SJeff Garzik }
855c6fd2807SJeff Garzik 
856c6fd2807SJeff Garzik /**
857c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
858c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
859c6fd2807SJeff Garzik  *
860c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
861c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
862c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
863c6fd2807SJeff Garzik  *
864c6fd2807SJeff Garzik  *	LOCKING:
865c6fd2807SJeff Garzik  *	None.
866c6fd2807SJeff Garzik  *
867c6fd2807SJeff Garzik  *	RETURNS:
868633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
870c6fd2807SJeff Garzik  */
871c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
872c6fd2807SJeff Garzik {
873c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
874c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
875c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
876633273a3STejun Heo 	 *
877633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
879633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
880633273a3STejun Heo 	 * spec has never mentioned about using different signatures
881633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
882633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
883633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
886633273a3STejun Heo 	 * SerialATA.
887633273a3STejun Heo 	 *
888633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
889633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
890c6fd2807SJeff Garzik 	 */
891633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
892c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
893c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
894c6fd2807SJeff Garzik 	}
895c6fd2807SJeff Garzik 
896633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
897c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
898c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
899c6fd2807SJeff Garzik 	}
900c6fd2807SJeff Garzik 
901633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
903633273a3STejun Heo 		return ATA_DEV_PMP;
904633273a3STejun Heo 	}
905633273a3STejun Heo 
906633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
9072dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
908633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
909633273a3STejun Heo 	}
910633273a3STejun Heo 
911c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
912c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
913c6fd2807SJeff Garzik }
914c6fd2807SJeff Garzik 
915c6fd2807SJeff Garzik /**
916c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
9173f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
9183f19859eSTejun Heo  *	@present: device seems present
919c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
920c6fd2807SJeff Garzik  *
921c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
923c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
924c6fd2807SJeff Garzik  *	and diagnostics.
925c6fd2807SJeff Garzik  *
926c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
927c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
928c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
929c6fd2807SJeff Garzik  *
930c6fd2807SJeff Garzik  *	LOCKING:
931c6fd2807SJeff Garzik  *	caller.
932c6fd2807SJeff Garzik  *
933c6fd2807SJeff Garzik  *	RETURNS:
934c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
935c6fd2807SJeff Garzik  */
9363f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
9373f19859eSTejun Heo 				  u8 *r_err)
938c6fd2807SJeff Garzik {
9393f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
940c6fd2807SJeff Garzik 	struct ata_taskfile tf;
941c6fd2807SJeff Garzik 	unsigned int class;
942c6fd2807SJeff Garzik 	u8 err;
943c6fd2807SJeff Garzik 
9443f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
945c6fd2807SJeff Garzik 
946c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
947c6fd2807SJeff Garzik 
948c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
949c6fd2807SJeff Garzik 	err = tf.feature;
950c6fd2807SJeff Garzik 	if (r_err)
951c6fd2807SJeff Garzik 		*r_err = err;
952c6fd2807SJeff Garzik 
953c5038fc0SAlan Cox 	/* see if device passed diags: continue and warn later */
954c5038fc0SAlan Cox 	if (err == 0)
95593590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
9563f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
95793590859SAlan Cox 	else if (err == 1)
958c6fd2807SJeff Garzik 		/* do nothing */ ;
9593f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
960c6fd2807SJeff Garzik 		/* do nothing */ ;
961c6fd2807SJeff Garzik 	else
962c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
963c6fd2807SJeff Garzik 
964c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
965c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
966c6fd2807SJeff Garzik 
967d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
968d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
969d7fbee05STejun Heo 		 * have reported incorrect device signature too.
970d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
971d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
972d7fbee05STejun Heo 		 * failure.
973d7fbee05STejun Heo 		 */
974d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
975d7fbee05STejun Heo 			class = ATA_DEV_ATA;
976d7fbee05STejun Heo 		else
977d7fbee05STejun Heo 			class = ATA_DEV_NONE;
978d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979d7fbee05STejun Heo 		class = ATA_DEV_NONE;
980d7fbee05STejun Heo 
981c6fd2807SJeff Garzik 	return class;
982c6fd2807SJeff Garzik }
983c6fd2807SJeff Garzik 
984c6fd2807SJeff Garzik /**
985c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
986c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
987c6fd2807SJeff Garzik  *	@s: string into which data is output
988c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
989c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
990c6fd2807SJeff Garzik  *
991c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
992c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
993c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
994c6fd2807SJeff Garzik  *
995c6fd2807SJeff Garzik  *	LOCKING:
996c6fd2807SJeff Garzik  *	caller.
997c6fd2807SJeff Garzik  */
998c6fd2807SJeff Garzik 
999c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
1000c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
1001c6fd2807SJeff Garzik {
1002c6fd2807SJeff Garzik 	unsigned int c;
1003c6fd2807SJeff Garzik 
1004c6fd2807SJeff Garzik 	while (len > 0) {
1005c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
1006c6fd2807SJeff Garzik 		*s = c;
1007c6fd2807SJeff Garzik 		s++;
1008c6fd2807SJeff Garzik 
1009c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
1010c6fd2807SJeff Garzik 		*s = c;
1011c6fd2807SJeff Garzik 		s++;
1012c6fd2807SJeff Garzik 
1013c6fd2807SJeff Garzik 		ofs++;
1014c6fd2807SJeff Garzik 		len -= 2;
1015c6fd2807SJeff Garzik 	}
1016c6fd2807SJeff Garzik }
1017c6fd2807SJeff Garzik 
1018c6fd2807SJeff Garzik /**
1019c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1020c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1021c6fd2807SJeff Garzik  *	@s: string into which data is output
1022c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1023c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
1024c6fd2807SJeff Garzik  *
1025c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
1026c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
1027c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
1028c6fd2807SJeff Garzik  *
1029c6fd2807SJeff Garzik  *	LOCKING:
1030c6fd2807SJeff Garzik  *	caller.
1031c6fd2807SJeff Garzik  */
1032c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
1033c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
1034c6fd2807SJeff Garzik {
1035c6fd2807SJeff Garzik 	unsigned char *p;
1036c6fd2807SJeff Garzik 
1037c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
1038c6fd2807SJeff Garzik 
1039c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
1040c6fd2807SJeff Garzik 
1041c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
1042c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
1043c6fd2807SJeff Garzik 		p--;
1044c6fd2807SJeff Garzik 	*p = '\0';
1045c6fd2807SJeff Garzik }
1046c6fd2807SJeff Garzik 
1047db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
1048db6f8759STejun Heo {
1049db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
1050db6f8759STejun Heo 		if (ata_id_has_lba48(id))
1051db6f8759STejun Heo 			return ata_id_u64(id, 100);
1052db6f8759STejun Heo 		else
1053db6f8759STejun Heo 			return ata_id_u32(id, 60);
1054db6f8759STejun Heo 	} else {
1055db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
1056db6f8759STejun Heo 			return ata_id_u32(id, 57);
1057db6f8759STejun Heo 		else
1058db6f8759STejun Heo 			return id[1] * id[3] * id[6];
1059db6f8759STejun Heo 	}
1060db6f8759STejun Heo }
1061db6f8759STejun Heo 
10621e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
10631e999736SAlan Cox {
10641e999736SAlan Cox 	u64 sectors = 0;
10651e999736SAlan Cox 
10661e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
10671e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
10681e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
10691e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10701e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10711e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10721e999736SAlan Cox 
10731e999736SAlan Cox 	return ++sectors;
10741e999736SAlan Cox }
10751e999736SAlan Cox 
10761e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
10771e999736SAlan Cox {
10781e999736SAlan Cox 	u64 sectors = 0;
10791e999736SAlan Cox 
10801e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
10811e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10821e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10831e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10841e999736SAlan Cox 
10851e999736SAlan Cox 	return ++sectors;
10861e999736SAlan Cox }
10871e999736SAlan Cox 
10881e999736SAlan Cox /**
1089c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
1090c728a914STejun Heo  *	@dev: target device
1091c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
10921e999736SAlan Cox  *
1093c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
1094c728a914STejun Heo  *	question.
1095c728a914STejun Heo  *
1096c728a914STejun Heo  *	RETURNS:
1097c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
1098c728a914STejun Heo  *	-EIO on other errors.
10991e999736SAlan Cox  */
1100c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
11011e999736SAlan Cox {
1102c728a914STejun Heo 	unsigned int err_mask;
11031e999736SAlan Cox 	struct ata_taskfile tf;
1104c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11051e999736SAlan Cox 
11061e999736SAlan Cox 	ata_tf_init(dev, &tf);
11071e999736SAlan Cox 
1108c728a914STejun Heo 	/* always clear all address registers */
11091e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1110c728a914STejun Heo 
1111c728a914STejun Heo 	if (lba48) {
1112c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
1114c728a914STejun Heo 	} else
1115c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1116c728a914STejun Heo 
11171e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1118c728a914STejun Heo 	tf.device |= ATA_LBA;
11191e999736SAlan Cox 
11202b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1121c728a914STejun Heo 	if (err_mask) {
1122c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1124c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1125c728a914STejun Heo 			return -EACCES;
1126c728a914STejun Heo 		return -EIO;
1127c728a914STejun Heo 	}
1128c728a914STejun Heo 
1129c728a914STejun Heo 	if (lba48)
1130c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
1131c728a914STejun Heo 	else
1132c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
113393328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
113493328e11SAlan Cox 		(*max_sectors)--;
11351e999736SAlan Cox 	return 0;
11361e999736SAlan Cox }
11371e999736SAlan Cox 
11381e999736SAlan Cox /**
1139c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
1140c728a914STejun Heo  *	@dev: target device
11416b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
11421e999736SAlan Cox  *
1143c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
1144c728a914STejun Heo  *
1145c728a914STejun Heo  *	RETURNS:
1146c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
1147c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1148c728a914STejun Heo  *	errors.
11491e999736SAlan Cox  */
115005027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
11511e999736SAlan Cox {
1152c728a914STejun Heo 	unsigned int err_mask;
11531e999736SAlan Cox 	struct ata_taskfile tf;
1154c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11551e999736SAlan Cox 
11561e999736SAlan Cox 	new_sectors--;
11571e999736SAlan Cox 
11581e999736SAlan Cox 	ata_tf_init(dev, &tf);
11591e999736SAlan Cox 
1160c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
11611e999736SAlan Cox 
1162c728a914STejun Heo 	if (lba48) {
1163c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
1164c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
11651e999736SAlan Cox 
11661e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
11671e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
11681e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
11691e582ba4STejun Heo 	} else {
11701e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
1171c728a914STejun Heo 
11721e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
11731e582ba4STejun Heo 	}
11741e582ba4STejun Heo 
11751e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1176c728a914STejun Heo 	tf.device |= ATA_LBA;
11771e999736SAlan Cox 
11781e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
11791e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
11801e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
11811e999736SAlan Cox 
11822b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1183c728a914STejun Heo 	if (err_mask) {
1184c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1186c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
1187c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1188c728a914STejun Heo 			return -EACCES;
1189c728a914STejun Heo 		return -EIO;
1190c728a914STejun Heo 	}
1191c728a914STejun Heo 
11921e999736SAlan Cox 	return 0;
11931e999736SAlan Cox }
11941e999736SAlan Cox 
11951e999736SAlan Cox /**
11961e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
11971e999736SAlan Cox  *	@dev: Device to resize
11981e999736SAlan Cox  *
11991e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
12001e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
12011e999736SAlan Cox  *	the drive has the HPA feature set enabled.
120205027adcSTejun Heo  *
120305027adcSTejun Heo  *	RETURNS:
120405027adcSTejun Heo  *	0 on success, -errno on failure.
12051e999736SAlan Cox  */
120605027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
12071e999736SAlan Cox {
120805027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
120905027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
121005027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
121105027adcSTejun Heo 	u64 native_sectors;
1212c728a914STejun Heo 	int rc;
12131e999736SAlan Cox 
121405027adcSTejun Heo 	/* do we need to do it? */
121505027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
121605027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
121705027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1218c728a914STejun Heo 		return 0;
12191e999736SAlan Cox 
122005027adcSTejun Heo 	/* read native max address */
122105027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
122205027adcSTejun Heo 	if (rc) {
122305027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
122405027adcSTejun Heo 		 * resizing from the next try.
122505027adcSTejun Heo 		 */
122605027adcSTejun Heo 		if (!ata_ignore_hpa) {
122705027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
122805027adcSTejun Heo 				       "broken, will skip HPA handling\n");
122905027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
123005027adcSTejun Heo 
123105027adcSTejun Heo 			/* we can continue if device aborted the command */
123205027adcSTejun Heo 			if (rc == -EACCES)
123305027adcSTejun Heo 				rc = 0;
123405027adcSTejun Heo 		}
123505027adcSTejun Heo 
123605027adcSTejun Heo 		return rc;
123705027adcSTejun Heo 	}
123805027adcSTejun Heo 
123905027adcSTejun Heo 	/* nothing to do? */
124005027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
124105027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
124205027adcSTejun Heo 			return 0;
124305027adcSTejun Heo 
124405027adcSTejun Heo 		if (native_sectors > sectors)
12451e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
124605027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
124705027adcSTejun Heo 				(unsigned long long)sectors,
124805027adcSTejun Heo 				(unsigned long long)native_sectors);
124905027adcSTejun Heo 		else if (native_sectors < sectors)
125005027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
125105027adcSTejun Heo 				"native sectors (%llu) is smaller than "
125205027adcSTejun Heo 				"sectors (%llu)\n",
125305027adcSTejun Heo 				(unsigned long long)native_sectors,
125405027adcSTejun Heo 				(unsigned long long)sectors);
125505027adcSTejun Heo 		return 0;
12561e999736SAlan Cox 	}
125737301a55STejun Heo 
125805027adcSTejun Heo 	/* let's unlock HPA */
125905027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
126005027adcSTejun Heo 	if (rc == -EACCES) {
126105027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
126205027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
126305027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
126405027adcSTejun Heo 			       (unsigned long long)sectors,
126505027adcSTejun Heo 			       (unsigned long long)native_sectors);
126605027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
126705027adcSTejun Heo 		return 0;
126805027adcSTejun Heo 	} else if (rc)
126905027adcSTejun Heo 		return rc;
127005027adcSTejun Heo 
127105027adcSTejun Heo 	/* re-read IDENTIFY data */
127205027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
127305027adcSTejun Heo 	if (rc) {
127405027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
127505027adcSTejun Heo 			       "data after HPA resizing\n");
127605027adcSTejun Heo 		return rc;
127705027adcSTejun Heo 	}
127805027adcSTejun Heo 
127905027adcSTejun Heo 	if (print_info) {
128005027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
128105027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
128205027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
128305027adcSTejun Heo 			(unsigned long long)sectors,
128405027adcSTejun Heo 			(unsigned long long)new_sectors,
128505027adcSTejun Heo 			(unsigned long long)native_sectors);
128605027adcSTejun Heo 	}
128705027adcSTejun Heo 
128805027adcSTejun Heo 	return 0;
12891e999736SAlan Cox }
12901e999736SAlan Cox 
1291c6fd2807SJeff Garzik /**
1292c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1293c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1294c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1295c6fd2807SJeff Garzik  *
1296c6fd2807SJeff Garzik  *	This function performs no actual function.
1297c6fd2807SJeff Garzik  *
1298c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1299c6fd2807SJeff Garzik  *
1300c6fd2807SJeff Garzik  *	LOCKING:
1301c6fd2807SJeff Garzik  *	caller.
1302c6fd2807SJeff Garzik  */
1303c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1304c6fd2807SJeff Garzik {
1305c6fd2807SJeff Garzik }
1306c6fd2807SJeff Garzik 
1307c6fd2807SJeff Garzik 
1308c6fd2807SJeff Garzik /**
1309c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1310c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1311c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1312c6fd2807SJeff Garzik  *
1313c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1314c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1315c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1316c6fd2807SJeff Garzik  *
1317c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1318c6fd2807SJeff Garzik  *
1319c6fd2807SJeff Garzik  *	LOCKING:
1320c6fd2807SJeff Garzik  *	caller.
1321c6fd2807SJeff Garzik  */
1322c6fd2807SJeff Garzik 
1323c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1324c6fd2807SJeff Garzik {
1325c6fd2807SJeff Garzik 	u8 tmp;
1326c6fd2807SJeff Garzik 
1327c6fd2807SJeff Garzik 	if (device == 0)
1328c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1329c6fd2807SJeff Garzik 	else
1330c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331c6fd2807SJeff Garzik 
13320d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1333c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1334c6fd2807SJeff Garzik }
1335c6fd2807SJeff Garzik 
1336c6fd2807SJeff Garzik /**
1337c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1338c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1339c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1340c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1341c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1342c6fd2807SJeff Garzik  *
1343c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1344c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1345c6fd2807SJeff Garzik  *	ATA channel.
1346c6fd2807SJeff Garzik  *
1347c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1348c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1349c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1350c6fd2807SJeff Garzik  *
1351c6fd2807SJeff Garzik  *	LOCKING:
1352c6fd2807SJeff Garzik  *	caller.
1353c6fd2807SJeff Garzik  */
1354c6fd2807SJeff Garzik 
1355c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1356c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1357c6fd2807SJeff Garzik {
1358c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
135944877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
136044877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1361c6fd2807SJeff Garzik 
1362c6fd2807SJeff Garzik 	if (wait)
1363c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1364c6fd2807SJeff Garzik 
1365c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1366c6fd2807SJeff Garzik 
1367c6fd2807SJeff Garzik 	if (wait) {
13689af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1369c6fd2807SJeff Garzik 			msleep(150);
1370c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1371c6fd2807SJeff Garzik 	}
1372c6fd2807SJeff Garzik }
1373c6fd2807SJeff Garzik 
1374c6fd2807SJeff Garzik /**
1375c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1376c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1377c6fd2807SJeff Garzik  *
1378c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1379c6fd2807SJeff Garzik  *	page.
1380c6fd2807SJeff Garzik  *
1381c6fd2807SJeff Garzik  *	LOCKING:
1382c6fd2807SJeff Garzik  *	caller.
1383c6fd2807SJeff Garzik  */
1384c6fd2807SJeff Garzik 
1385c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1386c6fd2807SJeff Garzik {
1387c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1388c6fd2807SJeff Garzik 		"53==0x%04x  "
1389c6fd2807SJeff Garzik 		"63==0x%04x  "
1390c6fd2807SJeff Garzik 		"64==0x%04x  "
1391c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1392c6fd2807SJeff Garzik 		id[49],
1393c6fd2807SJeff Garzik 		id[53],
1394c6fd2807SJeff Garzik 		id[63],
1395c6fd2807SJeff Garzik 		id[64],
1396c6fd2807SJeff Garzik 		id[75]);
1397c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1398c6fd2807SJeff Garzik 		"81==0x%04x  "
1399c6fd2807SJeff Garzik 		"82==0x%04x  "
1400c6fd2807SJeff Garzik 		"83==0x%04x  "
1401c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1402c6fd2807SJeff Garzik 		id[80],
1403c6fd2807SJeff Garzik 		id[81],
1404c6fd2807SJeff Garzik 		id[82],
1405c6fd2807SJeff Garzik 		id[83],
1406c6fd2807SJeff Garzik 		id[84]);
1407c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1408c6fd2807SJeff Garzik 		"93==0x%04x\n",
1409c6fd2807SJeff Garzik 		id[88],
1410c6fd2807SJeff Garzik 		id[93]);
1411c6fd2807SJeff Garzik }
1412c6fd2807SJeff Garzik 
1413c6fd2807SJeff Garzik /**
1414c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1416c6fd2807SJeff Garzik  *
1417c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1418c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1419c6fd2807SJeff Garzik  *
1420c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1421c6fd2807SJeff Garzik  *
1422c6fd2807SJeff Garzik  *	LOCKING:
1423c6fd2807SJeff Garzik  *	None.
1424c6fd2807SJeff Garzik  *
1425c6fd2807SJeff Garzik  *	RETURNS:
1426c6fd2807SJeff Garzik  *	Computed xfermask
1427c6fd2807SJeff Garzik  */
14287dc951aeSTejun Heo unsigned long ata_id_xfermask(const u16 *id)
1429c6fd2807SJeff Garzik {
14307dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
1431c6fd2807SJeff Garzik 
1432c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1433c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1435c6fd2807SJeff Garzik 		pio_mask <<= 3;
1436c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1437c6fd2807SJeff Garzik 	} else {
1438c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1439c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1440c6fd2807SJeff Garzik 		 * a mask.
1441c6fd2807SJeff Garzik 		 */
14427a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
144346767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
144446767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
144546767aebSAlan Cox 		else
144646767aebSAlan Cox 			pio_mask = 1;
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1449c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1450c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1451c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1452c6fd2807SJeff Garzik 		 * will get this right anyway
1453c6fd2807SJeff Garzik 		 */
1454c6fd2807SJeff Garzik 	}
1455c6fd2807SJeff Garzik 
1456c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1457c6fd2807SJeff Garzik 
1458b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1459b352e57dSAlan Cox 		/*
1460b352e57dSAlan Cox 		 *	Process compact flash extended modes
1461b352e57dSAlan Cox 		 */
1462b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1463b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1464b352e57dSAlan Cox 
1465b352e57dSAlan Cox 		if (pio)
1466b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1467b352e57dSAlan Cox 		if (pio > 1)
1468b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1469b352e57dSAlan Cox 		if (dma)
1470b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1471b352e57dSAlan Cox 		if (dma > 1)
1472b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1473b352e57dSAlan Cox 	}
1474b352e57dSAlan Cox 
1475c6fd2807SJeff Garzik 	udma_mask = 0;
1476c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1478c6fd2807SJeff Garzik 
1479c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1480c6fd2807SJeff Garzik }
1481c6fd2807SJeff Garzik 
1482c6fd2807SJeff Garzik /**
1483442eacc3SJeff Garzik  *	ata_pio_queue_task - Queue port_task
1484c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1485c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
148665f27f38SDavid Howells  *	@data: data for @fn to use
1487c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1488c6fd2807SJeff Garzik  *
1489c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1490c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1491c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1492c6fd2807SJeff Garzik  *	one task is active at any given time.
1493c6fd2807SJeff Garzik  *
1494c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1495442eacc3SJeff Garzik  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1496c6fd2807SJeff Garzik  *	synchronization.
1497c6fd2807SJeff Garzik  *
1498c6fd2807SJeff Garzik  *	LOCKING:
1499c6fd2807SJeff Garzik  *	Inherited from caller.
1500c6fd2807SJeff Garzik  */
1501442eacc3SJeff Garzik static void ata_pio_queue_task(struct ata_port *ap, void *data,
1502c6fd2807SJeff Garzik 			       unsigned long delay)
1503c6fd2807SJeff Garzik {
150465f27f38SDavid Howells 	ap->port_task_data = data;
1505c6fd2807SJeff Garzik 
150645a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
150745a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1508c6fd2807SJeff Garzik }
1509c6fd2807SJeff Garzik 
1510c6fd2807SJeff Garzik /**
1511c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1512c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1513c6fd2807SJeff Garzik  *
1514c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1515c6fd2807SJeff Garzik  *	be running or scheduled.
1516c6fd2807SJeff Garzik  *
1517c6fd2807SJeff Garzik  *	LOCKING:
1518c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1519c6fd2807SJeff Garzik  */
1520c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1521c6fd2807SJeff Garzik {
1522c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1523c6fd2807SJeff Garzik 
152445a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1525c6fd2807SJeff Garzik 
1526c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1527c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1528c6fd2807SJeff Garzik }
1529c6fd2807SJeff Garzik 
15307102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1531c6fd2807SJeff Garzik {
1532c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1533c6fd2807SJeff Garzik 
1534c6fd2807SJeff Garzik 	complete(waiting);
1535c6fd2807SJeff Garzik }
1536c6fd2807SJeff Garzik 
1537c6fd2807SJeff Garzik /**
15382432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1539c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1540c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1541c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1542c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
15435c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
15442432697bSTejun Heo  *	@n_elem: Number of sg entries
15452b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1546c6fd2807SJeff Garzik  *
1547c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1548c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1549c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1550c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1551c6fd2807SJeff Garzik  *	clean up after timeout.
1552c6fd2807SJeff Garzik  *
1553c6fd2807SJeff Garzik  *	LOCKING:
1554c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1555c6fd2807SJeff Garzik  *
1556c6fd2807SJeff Garzik  *	RETURNS:
1557c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1558c6fd2807SJeff Garzik  */
15592432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1560c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
156187260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
15622b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1563c6fd2807SJeff Garzik {
15649af5c9c9STejun Heo 	struct ata_link *link = dev->link;
15659af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1566c6fd2807SJeff Garzik 	u8 command = tf->command;
1567c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1568c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1569c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1570da917d69STejun Heo 	int preempted_nr_active_links;
1571c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1572c6fd2807SJeff Garzik 	unsigned long flags;
1573c6fd2807SJeff Garzik 	unsigned int err_mask;
1574c6fd2807SJeff Garzik 	int rc;
1575c6fd2807SJeff Garzik 
1576c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1577c6fd2807SJeff Garzik 
1578c6fd2807SJeff Garzik 	/* no internal command while frozen */
1579c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1580c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1581c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1582c6fd2807SJeff Garzik 	}
1583c6fd2807SJeff Garzik 
1584c6fd2807SJeff Garzik 	/* initialize internal qc */
1585c6fd2807SJeff Garzik 
1586c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1587c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1588c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1589c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1590c6fd2807SJeff Garzik 	 */
1591c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1592c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1593c6fd2807SJeff Garzik 	else
1594c6fd2807SJeff Garzik 		tag = 0;
1595c6fd2807SJeff Garzik 
1596c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1597c6fd2807SJeff Garzik 		BUG();
1598c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1599c6fd2807SJeff Garzik 
1600c6fd2807SJeff Garzik 	qc->tag = tag;
1601c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1602c6fd2807SJeff Garzik 	qc->ap = ap;
1603c6fd2807SJeff Garzik 	qc->dev = dev;
1604c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1605c6fd2807SJeff Garzik 
16069af5c9c9STejun Heo 	preempted_tag = link->active_tag;
16079af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1608c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1609da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
16109af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
16119af5c9c9STejun Heo 	link->sactive = 0;
1612c6fd2807SJeff Garzik 	ap->qc_active = 0;
1613da917d69STejun Heo 	ap->nr_active_links = 0;
1614c6fd2807SJeff Garzik 
1615c6fd2807SJeff Garzik 	/* prepare & issue qc */
1616c6fd2807SJeff Garzik 	qc->tf = *tf;
1617c6fd2807SJeff Garzik 	if (cdb)
1618c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1619c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1620c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1621c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
16222432697bSTejun Heo 		unsigned int i, buflen = 0;
162387260216SJens Axboe 		struct scatterlist *sg;
16242432697bSTejun Heo 
162587260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
162687260216SJens Axboe 			buflen += sg->length;
16272432697bSTejun Heo 
162887260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
162949c80429SBrian King 		qc->nbytes = buflen;
1630c6fd2807SJeff Garzik 	}
1631c6fd2807SJeff Garzik 
1632c6fd2807SJeff Garzik 	qc->private_data = &wait;
1633c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1634c6fd2807SJeff Garzik 
1635c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1636c6fd2807SJeff Garzik 
1637c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1638c6fd2807SJeff Garzik 
16392b789108STejun Heo 	if (!timeout)
16402b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
16412b789108STejun Heo 
16422b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1643c6fd2807SJeff Garzik 
1644c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1645c6fd2807SJeff Garzik 
1646c6fd2807SJeff Garzik 	if (!rc) {
1647c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1648c6fd2807SJeff Garzik 
1649c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1650c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1651c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1652c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1653c6fd2807SJeff Garzik 		 */
1654c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1655c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1656c6fd2807SJeff Garzik 
1657c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1658c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1659c6fd2807SJeff Garzik 			else
1660c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1661c6fd2807SJeff Garzik 
1662c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1663c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1664c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1665c6fd2807SJeff Garzik 		}
1666c6fd2807SJeff Garzik 
1667c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1668c6fd2807SJeff Garzik 	}
1669c6fd2807SJeff Garzik 
1670c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1671c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1672c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1673c6fd2807SJeff Garzik 
1674a51d644aSTejun Heo 	/* perform minimal error analysis */
1675a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1676a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1677a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1678a51d644aSTejun Heo 
1679a51d644aSTejun Heo 		if (!qc->err_mask)
1680c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1681a51d644aSTejun Heo 
1682a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1683a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1684c6fd2807SJeff Garzik 	}
1685c6fd2807SJeff Garzik 
1686c6fd2807SJeff Garzik 	/* finish up */
1687c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1688c6fd2807SJeff Garzik 
1689c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1690c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1691c6fd2807SJeff Garzik 
1692c6fd2807SJeff Garzik 	ata_qc_free(qc);
16939af5c9c9STejun Heo 	link->active_tag = preempted_tag;
16949af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1695c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1696da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1697c6fd2807SJeff Garzik 
1698c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1699c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1700c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1701c6fd2807SJeff Garzik 	 * port.
1702c6fd2807SJeff Garzik 	 *
1703c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1704c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1705c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1706c6fd2807SJeff Garzik 	 *
1707c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1708c6fd2807SJeff Garzik 	 */
1709c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1710c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1711c6fd2807SJeff Garzik 		ata_port_probe(ap);
1712c6fd2807SJeff Garzik 	}
1713c6fd2807SJeff Garzik 
1714c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik 	return err_mask;
1717c6fd2807SJeff Garzik }
1718c6fd2807SJeff Garzik 
1719c6fd2807SJeff Garzik /**
172033480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
17212432697bSTejun Heo  *	@dev: Device to which the command is sent
17222432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
17232432697bSTejun Heo  *	@cdb: CDB for packet command
17242432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
17252432697bSTejun Heo  *	@buf: Data buffer of the command
17262432697bSTejun Heo  *	@buflen: Length of data buffer
17272b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
17282432697bSTejun Heo  *
17292432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
17302432697bSTejun Heo  *	buffer instead of sg list.
17312432697bSTejun Heo  *
17322432697bSTejun Heo  *	LOCKING:
17332432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
17342432697bSTejun Heo  *
17352432697bSTejun Heo  *	RETURNS:
17362432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
17372432697bSTejun Heo  */
17382432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
17392432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
17402b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
17412b789108STejun Heo 			   unsigned long timeout)
17422432697bSTejun Heo {
174333480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
174433480a0eSTejun Heo 	unsigned int n_elem = 0;
17452432697bSTejun Heo 
174633480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
174733480a0eSTejun Heo 		WARN_ON(!buf);
17482432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
174933480a0eSTejun Heo 		psg = &sg;
175033480a0eSTejun Heo 		n_elem++;
175133480a0eSTejun Heo 	}
17522432697bSTejun Heo 
17532b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
17542b789108STejun Heo 				    timeout);
17552432697bSTejun Heo }
17562432697bSTejun Heo 
17572432697bSTejun Heo /**
1758c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1759c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1760c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1761c6fd2807SJeff Garzik  *
1762c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1763c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1764c6fd2807SJeff Garzik  *
1765c6fd2807SJeff Garzik  *	LOCKING:
1766c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1767c6fd2807SJeff Garzik  *
1768c6fd2807SJeff Garzik  *	RETURNS:
1769c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1770c6fd2807SJeff Garzik  */
1771c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1772c6fd2807SJeff Garzik {
1773c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1774c6fd2807SJeff Garzik 
1775c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1776c6fd2807SJeff Garzik 
1777c6fd2807SJeff Garzik 	tf.command = cmd;
1778c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1779c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1780c6fd2807SJeff Garzik 
17812b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1782c6fd2807SJeff Garzik }
1783c6fd2807SJeff Garzik 
1784c6fd2807SJeff Garzik /**
1785c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1786c6fd2807SJeff Garzik  *	@adev: ATA device
1787c6fd2807SJeff Garzik  *
1788c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1789c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1790c6fd2807SJeff Garzik  */
1791c6fd2807SJeff Garzik 
1792c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1793c6fd2807SJeff Garzik {
1794432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1795432729f0SAlan Cox 	   as the caller should know this */
17969af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1797c6fd2807SJeff Garzik 		return 0;
1798432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1799432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1800c6fd2807SJeff Garzik 		return 1;
1801432729f0SAlan Cox 	/* We turn it on when possible */
1802432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1803432729f0SAlan Cox 		return 1;
1804432729f0SAlan Cox 	return 0;
1805432729f0SAlan Cox }
1806c6fd2807SJeff Garzik 
1807432729f0SAlan Cox /**
1808432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1809432729f0SAlan Cox  *	@adev: ATA device
1810432729f0SAlan Cox  *
1811432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1812432729f0SAlan Cox  *	-1 if no iordy mode is available.
1813432729f0SAlan Cox  */
1814432729f0SAlan Cox 
1815432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1816432729f0SAlan Cox {
1817c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1818c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1819432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1820c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1821c6fd2807SJeff Garzik 		if (pio) {
1822c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1823c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1824432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1825432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1826c6fd2807SJeff Garzik 		}
1827c6fd2807SJeff Garzik 	}
1828432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1829c6fd2807SJeff Garzik }
1830c6fd2807SJeff Garzik 
1831c6fd2807SJeff Garzik /**
1832c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1833c6fd2807SJeff Garzik  *	@dev: target device
1834c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1835bff04647STejun Heo  *	@flags: ATA_READID_* flags
1836c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1837c6fd2807SJeff Garzik  *
1838c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1839c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1840c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1841c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1842c6fd2807SJeff Garzik  *
184350a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
184450a99018SAlan Cox  *	now we abort if we hit that case.
184550a99018SAlan Cox  *
1846c6fd2807SJeff Garzik  *	LOCKING:
1847c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1848c6fd2807SJeff Garzik  *
1849c6fd2807SJeff Garzik  *	RETURNS:
1850c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1851c6fd2807SJeff Garzik  */
1852c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1853bff04647STejun Heo 		    unsigned int flags, u16 *id)
1854c6fd2807SJeff Garzik {
18559af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1856c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1857c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1858c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1859c6fd2807SJeff Garzik 	const char *reason;
186054936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1861c6fd2807SJeff Garzik 	int rc;
1862c6fd2807SJeff Garzik 
1863c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
186444877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1865c6fd2807SJeff Garzik 
1866c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1867c6fd2807SJeff Garzik  retry:
1868c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1869c6fd2807SJeff Garzik 
1870c6fd2807SJeff Garzik 	switch (class) {
1871c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1872c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1873c6fd2807SJeff Garzik 		break;
1874c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1875c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1876c6fd2807SJeff Garzik 		break;
1877c6fd2807SJeff Garzik 	default:
1878c6fd2807SJeff Garzik 		rc = -ENODEV;
1879c6fd2807SJeff Garzik 		reason = "unsupported class";
1880c6fd2807SJeff Garzik 		goto err_out;
1881c6fd2807SJeff Garzik 	}
1882c6fd2807SJeff Garzik 
1883c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
188481afe893STejun Heo 
188581afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
188681afe893STejun Heo 	 * sure those are properly initialized.
188781afe893STejun Heo 	 */
188881afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
188981afe893STejun Heo 
189081afe893STejun Heo 	/* Device presence detection is unreliable on some
189181afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
189281afe893STejun Heo 	 */
189381afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1894c6fd2807SJeff Garzik 
1895c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
18962b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1897c6fd2807SJeff Garzik 	if (err_mask) {
1898800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
189955a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
190044877b4eSTejun Heo 				ap->print_id, dev->devno);
190155a8e2c8STejun Heo 			return -ENOENT;
190255a8e2c8STejun Heo 		}
190355a8e2c8STejun Heo 
190454936f8bSTejun Heo 		/* Device or controller might have reported the wrong
190554936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
190654936f8bSTejun Heo 		 * the current one is aborted by the device.
190754936f8bSTejun Heo 		 */
190854936f8bSTejun Heo 		if (may_fallback &&
190954936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
191054936f8bSTejun Heo 			may_fallback = 0;
191154936f8bSTejun Heo 
191254936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
191354936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
191454936f8bSTejun Heo 			else
191554936f8bSTejun Heo 				class = ATA_DEV_ATA;
191654936f8bSTejun Heo 			goto retry;
191754936f8bSTejun Heo 		}
191854936f8bSTejun Heo 
1919c6fd2807SJeff Garzik 		rc = -EIO;
1920c6fd2807SJeff Garzik 		reason = "I/O error";
1921c6fd2807SJeff Garzik 		goto err_out;
1922c6fd2807SJeff Garzik 	}
1923c6fd2807SJeff Garzik 
192454936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
192554936f8bSTejun Heo 	 * successfully at least once.
192654936f8bSTejun Heo 	 */
192754936f8bSTejun Heo 	may_fallback = 0;
192854936f8bSTejun Heo 
1929c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1930c6fd2807SJeff Garzik 
1931c6fd2807SJeff Garzik 	/* sanity check */
1932c6fd2807SJeff Garzik 	rc = -EINVAL;
19336070068bSAlan Cox 	reason = "device reports invalid type";
19344a3381feSJeff Garzik 
19354a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
19364a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
19374a3381feSJeff Garzik 			goto err_out;
19384a3381feSJeff Garzik 	} else {
19394a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1940c6fd2807SJeff Garzik 			goto err_out;
1941c6fd2807SJeff Garzik 	}
1942c6fd2807SJeff Garzik 
1943169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1944169439c2SMark Lord 		tried_spinup = 1;
1945169439c2SMark Lord 		/*
1946169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1947169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1948169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1949169439c2SMark Lord 		 */
1950218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1951fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1952169439c2SMark Lord 			rc = -EIO;
1953169439c2SMark Lord 			reason = "SPINUP failed";
1954169439c2SMark Lord 			goto err_out;
1955169439c2SMark Lord 		}
1956169439c2SMark Lord 		/*
1957169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1958169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1959169439c2SMark Lord 		 */
1960169439c2SMark Lord 		if (id[2] == 0x37c8)
1961169439c2SMark Lord 			goto retry;
1962169439c2SMark Lord 	}
1963169439c2SMark Lord 
1964bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1965c6fd2807SJeff Garzik 		/*
1966c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1967c6fd2807SJeff Garzik 		 * SRST RESET
196850a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
196950a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1970c6fd2807SJeff Garzik 		 * anything else..
1971c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
197250a99018SAlan Cox 		 *
197350a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
197450a99018SAlan Cox 		 * shoud never trigger.
1975c6fd2807SJeff Garzik 		 */
1976c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1977c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1978c6fd2807SJeff Garzik 			if (err_mask) {
1979c6fd2807SJeff Garzik 				rc = -EIO;
1980c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1981c6fd2807SJeff Garzik 				goto err_out;
1982c6fd2807SJeff Garzik 			}
1983c6fd2807SJeff Garzik 
1984c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1985c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1986c6fd2807SJeff Garzik 			 */
1987bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1988c6fd2807SJeff Garzik 			goto retry;
1989c6fd2807SJeff Garzik 		}
1990c6fd2807SJeff Garzik 	}
1991c6fd2807SJeff Garzik 
1992c6fd2807SJeff Garzik 	*p_class = class;
1993c6fd2807SJeff Garzik 
1994c6fd2807SJeff Garzik 	return 0;
1995c6fd2807SJeff Garzik 
1996c6fd2807SJeff Garzik  err_out:
1997c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1998c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1999c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2000c6fd2807SJeff Garzik 	return rc;
2001c6fd2807SJeff Garzik }
2002c6fd2807SJeff Garzik 
2003c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
2004c6fd2807SJeff Garzik {
20059af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20069af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2007c6fd2807SJeff Garzik }
2008c6fd2807SJeff Garzik 
2009c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
2010c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
2011c6fd2807SJeff Garzik {
20129af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2013c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2014c6fd2807SJeff Garzik 
2015c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
2016c6fd2807SJeff Garzik 		desc[0] = '\0';
2017c6fd2807SJeff Garzik 		return;
2018c6fd2807SJeff Garzik 	}
201975683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
20206919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
20216919a0a6SAlan Cox 		return;
20226919a0a6SAlan Cox 	}
2023c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
2024cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2025c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
2026c6fd2807SJeff Garzik 	}
2027c6fd2807SJeff Garzik 
2028c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
2029c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2030c6fd2807SJeff Garzik 	else
2031c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2032c6fd2807SJeff Garzik }
2033c6fd2807SJeff Garzik 
2034c6fd2807SJeff Garzik /**
2035c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2036c6fd2807SJeff Garzik  *	@dev: Target device to configure
2037c6fd2807SJeff Garzik  *
2038c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
2039c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
2040c6fd2807SJeff Garzik  *
2041c6fd2807SJeff Garzik  *	LOCKING:
2042c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2043c6fd2807SJeff Garzik  *
2044c6fd2807SJeff Garzik  *	RETURNS:
2045c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
2046c6fd2807SJeff Garzik  */
2047efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
2048c6fd2807SJeff Garzik {
20499af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20509af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
20516746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2052c6fd2807SJeff Garzik 	const u16 *id = dev->id;
20537dc951aeSTejun Heo 	unsigned long xfer_mask;
2054b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
20553f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
20563f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
2057c6fd2807SJeff Garzik 	int rc;
2058c6fd2807SJeff Garzik 
2059c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
206044877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
206144877b4eSTejun Heo 			       __FUNCTION__);
2062c6fd2807SJeff Garzik 		return 0;
2063c6fd2807SJeff Garzik 	}
2064c6fd2807SJeff Garzik 
2065c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
206644877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2067c6fd2807SJeff Garzik 
206875683fe7STejun Heo 	/* set horkage */
206975683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
207075683fe7STejun Heo 
20716746544cSTejun Heo 	/* let ACPI work its magic */
20726746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
20736746544cSTejun Heo 	if (rc)
20746746544cSTejun Heo 		return rc;
207508573a86SKristen Carlson Accardi 
207605027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
207705027adcSTejun Heo 	rc = ata_hpa_resize(dev);
207805027adcSTejun Heo 	if (rc)
207905027adcSTejun Heo 		return rc;
208005027adcSTejun Heo 
2081c6fd2807SJeff Garzik 	/* print device capabilities */
2082c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2083c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2084c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2085c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2086c6fd2807SJeff Garzik 			       __FUNCTION__,
2087c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
2088c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
2089c6fd2807SJeff Garzik 
2090c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
2091c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2092c6fd2807SJeff Garzik 	dev->max_sectors = 0;
2093c6fd2807SJeff Garzik 	dev->cdb_len = 0;
2094c6fd2807SJeff Garzik 	dev->n_sectors = 0;
2095c6fd2807SJeff Garzik 	dev->cylinders = 0;
2096c6fd2807SJeff Garzik 	dev->heads = 0;
2097c6fd2807SJeff Garzik 	dev->sectors = 0;
2098c6fd2807SJeff Garzik 
2099c6fd2807SJeff Garzik 	/*
2100c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
2101c6fd2807SJeff Garzik 	 */
2102c6fd2807SJeff Garzik 
2103c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
2104c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
2105c6fd2807SJeff Garzik 
2106c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2107c6fd2807SJeff Garzik 		ata_dump_id(id);
2108c6fd2807SJeff Garzik 
2109ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2110ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2111ef143d57SAlbert Lee 			sizeof(fwrevbuf));
2112ef143d57SAlbert Lee 
2113ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2114ef143d57SAlbert Lee 			sizeof(modelbuf));
2115ef143d57SAlbert Lee 
2116c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
2117c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
2118b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
2119b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
212044877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
212144877b4eSTejun Heo 					       "supports DRM functions and may "
212244877b4eSTejun Heo 					       "not be fully accessable.\n");
2123b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
2124ae8d4ee7SAlan Cox 		} else {
2125b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2126ae8d4ee7SAlan Cox 			/* Warn the user if the device has TPM extensions */
2127ae8d4ee7SAlan Cox 			if (ata_id_has_tpm(id))
2128ae8d4ee7SAlan Cox 				ata_dev_printk(dev, KERN_WARNING,
2129ae8d4ee7SAlan Cox 					       "supports DRM functions and may "
2130ae8d4ee7SAlan Cox 					       "not be fully accessable.\n");
2131ae8d4ee7SAlan Cox 		}
2132b352e57dSAlan Cox 
2133c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
2134c6fd2807SJeff Garzik 
21353f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
21363f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
21373f64f565SEric D. Mudama 
2138c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
2139c6fd2807SJeff Garzik 			const char *lba_desc;
2140c6fd2807SJeff Garzik 			char ncq_desc[20];
2141c6fd2807SJeff Garzik 
2142c6fd2807SJeff Garzik 			lba_desc = "LBA";
2143c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
2144c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
2145c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
2146c6fd2807SJeff Garzik 				lba_desc = "LBA48";
21476fc49adbSTejun Heo 
21486fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
21496fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
21506fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2151c6fd2807SJeff Garzik 			}
2152c6fd2807SJeff Garzik 
2153c6fd2807SJeff Garzik 			/* config NCQ */
2154c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2155c6fd2807SJeff Garzik 
2156c6fd2807SJeff Garzik 			/* print device info to dmesg */
21573f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
21583f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21593f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21603f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
21613f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21623f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21633f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2164c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
21653f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
21663f64f565SEric D. Mudama 			}
2167c6fd2807SJeff Garzik 		} else {
2168c6fd2807SJeff Garzik 			/* CHS */
2169c6fd2807SJeff Garzik 
2170c6fd2807SJeff Garzik 			/* Default translation */
2171c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2172c6fd2807SJeff Garzik 			dev->heads	= id[3];
2173c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2174c6fd2807SJeff Garzik 
2175c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2176c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2177c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2178c6fd2807SJeff Garzik 				dev->heads     = id[55];
2179c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2180c6fd2807SJeff Garzik 			}
2181c6fd2807SJeff Garzik 
2182c6fd2807SJeff Garzik 			/* print device info to dmesg */
21833f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2184c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
21853f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21863f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
21873f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21883f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21893f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
21903f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
21913f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
21923f64f565SEric D. Mudama 					dev->heads, dev->sectors);
21933f64f565SEric D. Mudama 			}
2194c6fd2807SJeff Garzik 		}
2195c6fd2807SJeff Garzik 
2196c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2197c6fd2807SJeff Garzik 	}
2198c6fd2807SJeff Garzik 
2199c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2200c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2201854c73a2STejun Heo 		const char *cdb_intr_string = "";
2202854c73a2STejun Heo 		const char *atapi_an_string = "";
22037d77b247STejun Heo 		u32 sntf;
2204c6fd2807SJeff Garzik 
2205c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2206c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2207c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2208c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2209c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2210c6fd2807SJeff Garzik 			rc = -EINVAL;
2211c6fd2807SJeff Garzik 			goto err_out_nosup;
2212c6fd2807SJeff Garzik 		}
2213c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2214c6fd2807SJeff Garzik 
22157d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
22167d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
22177d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
22187d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
22199f45cbd3SKristen Carlson Accardi 		 */
22207d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
22217d77b247STejun Heo 		    (!ap->nr_pmp_links ||
22227d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2223854c73a2STejun Heo 			unsigned int err_mask;
2224854c73a2STejun Heo 
22259f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2226218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2227218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2228854c73a2STejun Heo 			if (err_mask)
22299f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2230854c73a2STejun Heo 					"failed to enable ATAPI AN "
2231854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2232854c73a2STejun Heo 			else {
22339f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2234854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2235854c73a2STejun Heo 			}
22369f45cbd3SKristen Carlson Accardi 		}
22379f45cbd3SKristen Carlson Accardi 
2238c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2239c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2240c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2241c6fd2807SJeff Garzik 		}
2242c6fd2807SJeff Garzik 
2243c6fd2807SJeff Garzik 		/* print device info to dmesg */
2244c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2245ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2246854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2247ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2248c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2249854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2250c6fd2807SJeff Garzik 	}
2251c6fd2807SJeff Garzik 
2252914ed354STejun Heo 	/* determine max_sectors */
2253914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2254914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2255914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2256914ed354STejun Heo 
2257ca77329fSKristen Carlson Accardi 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2258ca77329fSKristen Carlson Accardi 		if (ata_id_has_hipm(dev->id))
2259ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_HIPM;
2260ca77329fSKristen Carlson Accardi 		if (ata_id_has_dipm(dev->id))
2261ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_DIPM;
2262ca77329fSKristen Carlson Accardi 	}
2263ca77329fSKristen Carlson Accardi 
2264c5038fc0SAlan Cox 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2265c5038fc0SAlan Cox 	   200 sectors */
2266c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2267c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2268c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2269c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2270c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2271c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2272c6fd2807SJeff Garzik 	}
2273c6fd2807SJeff Garzik 
2274f8d8e579STony Battersby 	if ((dev->class == ATA_DEV_ATAPI) &&
2275f442cd86SAlbert Lee 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2276f8d8e579STony Battersby 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2277f442cd86SAlbert Lee 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2278f442cd86SAlbert Lee 	}
2279f8d8e579STony Battersby 
228075683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
228103ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
228203ec52deSTejun Heo 					 dev->max_sectors);
228318d6e9d5SAlbert Lee 
2284ca77329fSKristen Carlson Accardi 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2285ca77329fSKristen Carlson Accardi 		dev->horkage |= ATA_HORKAGE_IPM;
2286ca77329fSKristen Carlson Accardi 
2287ca77329fSKristen Carlson Accardi 		/* reset link pm_policy for this port to no pm */
2288ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
2289ca77329fSKristen Carlson Accardi 	}
2290ca77329fSKristen Carlson Accardi 
2291c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2292cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2293c6fd2807SJeff Garzik 
2294c5038fc0SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2295c5038fc0SAlan Cox 		/* Let the user know. We don't want to disallow opens for
2296c5038fc0SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
2297c5038fc0SAlan Cox 		   idiot. Do this after the dev_config call as some controllers
2298c5038fc0SAlan Cox 		   with buggy firmware may want to avoid reporting false device
2299c5038fc0SAlan Cox 		   bugs */
2300c5038fc0SAlan Cox 
2301c5038fc0SAlan Cox 		if (print_info) {
2302c5038fc0SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
2303c5038fc0SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
2304c5038fc0SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
2305c5038fc0SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
2306c5038fc0SAlan Cox 		}
2307c5038fc0SAlan Cox 	}
2308c5038fc0SAlan Cox 
2309c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2310c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2311c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2312c6fd2807SJeff Garzik 	return 0;
2313c6fd2807SJeff Garzik 
2314c6fd2807SJeff Garzik err_out_nosup:
2315c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2316c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2317c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2318c6fd2807SJeff Garzik 	return rc;
2319c6fd2807SJeff Garzik }
2320c6fd2807SJeff Garzik 
2321c6fd2807SJeff Garzik /**
23222e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2323be0d18dfSAlan Cox  *	@ap: port
2324be0d18dfSAlan Cox  *
23252e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2326be0d18dfSAlan Cox  *	detection.
2327be0d18dfSAlan Cox  */
2328be0d18dfSAlan Cox 
2329be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2330be0d18dfSAlan Cox {
2331be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2332be0d18dfSAlan Cox }
2333be0d18dfSAlan Cox 
2334be0d18dfSAlan Cox /**
23352e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2336be0d18dfSAlan Cox  *	@ap: port
2337be0d18dfSAlan Cox  *
23382e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2339be0d18dfSAlan Cox  *	detection.
2340be0d18dfSAlan Cox  */
2341be0d18dfSAlan Cox 
2342be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2343be0d18dfSAlan Cox {
2344be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2345be0d18dfSAlan Cox }
2346be0d18dfSAlan Cox 
2347be0d18dfSAlan Cox /**
2348be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2349be0d18dfSAlan Cox  *	@ap: port
2350be0d18dfSAlan Cox  *
2351be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2352be0d18dfSAlan Cox  */
2353be0d18dfSAlan Cox 
2354be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2355be0d18dfSAlan Cox {
2356be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2357be0d18dfSAlan Cox }
2358be0d18dfSAlan Cox 
2359be0d18dfSAlan Cox /**
2360c88f90c3STejun Heo  *	ata_cable_ignore	-	return ignored PATA cable.
2361c88f90c3STejun Heo  *	@ap: port
2362c88f90c3STejun Heo  *
2363c88f90c3STejun Heo  *	Helper method for drivers which don't use cable type to limit
2364c88f90c3STejun Heo  *	transfer mode.
2365c88f90c3STejun Heo  */
2366c88f90c3STejun Heo int ata_cable_ignore(struct ata_port *ap)
2367c88f90c3STejun Heo {
2368c88f90c3STejun Heo 	return ATA_CBL_PATA_IGN;
2369c88f90c3STejun Heo }
2370c88f90c3STejun Heo 
2371c88f90c3STejun Heo /**
2372be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2373be0d18dfSAlan Cox  *	@ap: port
2374be0d18dfSAlan Cox  *
2375be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2376be0d18dfSAlan Cox  */
2377be0d18dfSAlan Cox 
2378be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2379be0d18dfSAlan Cox {
2380be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2381be0d18dfSAlan Cox }
2382be0d18dfSAlan Cox 
2383be0d18dfSAlan Cox /**
2384c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2385c6fd2807SJeff Garzik  *	@ap: Bus to probe
2386c6fd2807SJeff Garzik  *
2387c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2388c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2389c6fd2807SJeff Garzik  *	the bus.
2390c6fd2807SJeff Garzik  *
2391c6fd2807SJeff Garzik  *	LOCKING:
2392c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2393c6fd2807SJeff Garzik  *
2394c6fd2807SJeff Garzik  *	RETURNS:
2395c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2396c6fd2807SJeff Garzik  */
2397c6fd2807SJeff Garzik 
2398c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2399c6fd2807SJeff Garzik {
2400c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2401c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2402f58229f8STejun Heo 	int rc;
2403c6fd2807SJeff Garzik 	struct ata_device *dev;
2404c6fd2807SJeff Garzik 
2405c6fd2807SJeff Garzik 	ata_port_probe(ap);
2406c6fd2807SJeff Garzik 
2407f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2408f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2409c6fd2807SJeff Garzik 
2410c6fd2807SJeff Garzik  retry:
2411cdeab114STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2412cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2413cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2414cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2415cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2416cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2417cdeab114STejun Heo 		 * bus as we may be talking too fast.
2418cdeab114STejun Heo 		 */
2419cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2420cdeab114STejun Heo 
2421cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2422cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2423cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2424cdeab114STejun Heo 		 * configuring devices.
2425cdeab114STejun Heo 		 */
2426cdeab114STejun Heo 		if (ap->ops->set_piomode)
2427cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2428cdeab114STejun Heo 	}
2429cdeab114STejun Heo 
2430c6fd2807SJeff Garzik 	/* reset and determine device classes */
2431c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2432c6fd2807SJeff Garzik 
2433f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2434c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2435c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2436c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2437c6fd2807SJeff Garzik 		else
2438c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2439c6fd2807SJeff Garzik 
2440c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2441c6fd2807SJeff Garzik 	}
2442c6fd2807SJeff Garzik 
2443c6fd2807SJeff Garzik 	ata_port_probe(ap);
2444c6fd2807SJeff Garzik 
2445f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2446f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2447f31f0cc2SJeff Garzik 	   the slave device */
2448f31f0cc2SJeff Garzik 
2449f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2450f58229f8STejun Heo 		if (tries[dev->devno])
2451f58229f8STejun Heo 			dev->class = classes[dev->devno];
2452c6fd2807SJeff Garzik 
2453c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2454c6fd2807SJeff Garzik 			continue;
2455c6fd2807SJeff Garzik 
2456bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2457bff04647STejun Heo 				     dev->id);
2458c6fd2807SJeff Garzik 		if (rc)
2459c6fd2807SJeff Garzik 			goto fail;
2460f31f0cc2SJeff Garzik 	}
2461f31f0cc2SJeff Garzik 
2462be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2463be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2464be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2465be0d18dfSAlan Cox 
2466614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2467614fe29bSAlan Cox 	   reported cable types and sensed types */
2468614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2469614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2470614fe29bSAlan Cox 			continue;
2471614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2472614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2473614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2474614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2475614fe29bSAlan Cox 	}
2476614fe29bSAlan Cox 
2477f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2478f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2479f31f0cc2SJeff Garzik 
2480f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2481f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2482f31f0cc2SJeff Garzik 			continue;
2483c6fd2807SJeff Garzik 
24849af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2485efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
24869af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2487c6fd2807SJeff Garzik 		if (rc)
2488c6fd2807SJeff Garzik 			goto fail;
2489c6fd2807SJeff Garzik 	}
2490c6fd2807SJeff Garzik 
2491c6fd2807SJeff Garzik 	/* configure transfer mode */
24920260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
24934ae72a1eSTejun Heo 	if (rc)
2494c6fd2807SJeff Garzik 		goto fail;
2495c6fd2807SJeff Garzik 
2496f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2497f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2498c6fd2807SJeff Garzik 			return 0;
2499c6fd2807SJeff Garzik 
2500c6fd2807SJeff Garzik 	/* no device present, disable port */
2501c6fd2807SJeff Garzik 	ata_port_disable(ap);
2502c6fd2807SJeff Garzik 	return -ENODEV;
2503c6fd2807SJeff Garzik 
2504c6fd2807SJeff Garzik  fail:
25054ae72a1eSTejun Heo 	tries[dev->devno]--;
25064ae72a1eSTejun Heo 
2507c6fd2807SJeff Garzik 	switch (rc) {
2508c6fd2807SJeff Garzik 	case -EINVAL:
25094ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2510c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2511c6fd2807SJeff Garzik 		break;
25124ae72a1eSTejun Heo 
25134ae72a1eSTejun Heo 	case -ENODEV:
25144ae72a1eSTejun Heo 		/* give it just one more chance */
25154ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2516c6fd2807SJeff Garzik 	case -EIO:
25174ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
25184ae72a1eSTejun Heo 			/* This is the last chance, better to slow
25194ae72a1eSTejun Heo 			 * down than lose it.
25204ae72a1eSTejun Heo 			 */
2521936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
25224ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
25234ae72a1eSTejun Heo 		}
2524c6fd2807SJeff Garzik 	}
2525c6fd2807SJeff Garzik 
25264ae72a1eSTejun Heo 	if (!tries[dev->devno])
2527c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2528c6fd2807SJeff Garzik 
2529c6fd2807SJeff Garzik 	goto retry;
2530c6fd2807SJeff Garzik }
2531c6fd2807SJeff Garzik 
2532c6fd2807SJeff Garzik /**
2533c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2534c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2535c6fd2807SJeff Garzik  *
2536c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2537c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2538c6fd2807SJeff Garzik  *
2539cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2540c6fd2807SJeff Garzik  *	serialization.
2541c6fd2807SJeff Garzik  */
2542c6fd2807SJeff Garzik 
2543c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2544c6fd2807SJeff Garzik {
2545c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2546c6fd2807SJeff Garzik }
2547c6fd2807SJeff Garzik 
2548c6fd2807SJeff Garzik /**
2549c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2550936fd732STejun Heo  *	@link: SATA link to printk link status about
2551c6fd2807SJeff Garzik  *
2552c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2553c6fd2807SJeff Garzik  *
2554c6fd2807SJeff Garzik  *	LOCKING:
2555c6fd2807SJeff Garzik  *	None.
2556c6fd2807SJeff Garzik  */
2557936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2558c6fd2807SJeff Garzik {
2559c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2560c6fd2807SJeff Garzik 
2561936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2562c6fd2807SJeff Garzik 		return;
2563936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2564c6fd2807SJeff Garzik 
2565936fd732STejun Heo 	if (ata_link_online(link)) {
2566c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2567936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2568c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2569c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2570c6fd2807SJeff Garzik 	} else {
2571936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2572c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2573c6fd2807SJeff Garzik 				sstatus, scontrol);
2574c6fd2807SJeff Garzik 	}
2575c6fd2807SJeff Garzik }
2576c6fd2807SJeff Garzik 
2577c6fd2807SJeff Garzik /**
2578c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2579c6fd2807SJeff Garzik  *	@adev: device
2580c6fd2807SJeff Garzik  *
2581c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2582c6fd2807SJeff Garzik  *	present NULL is returned
2583c6fd2807SJeff Garzik  */
2584c6fd2807SJeff Garzik 
2585c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2586c6fd2807SJeff Garzik {
25879af5c9c9STejun Heo 	struct ata_link *link = adev->link;
25889af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2589c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2590c6fd2807SJeff Garzik 		return NULL;
2591c6fd2807SJeff Garzik 	return pair;
2592c6fd2807SJeff Garzik }
2593c6fd2807SJeff Garzik 
2594c6fd2807SJeff Garzik /**
2595c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2596c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2597c6fd2807SJeff Garzik  *
2598c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2599c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2600c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2601c6fd2807SJeff Garzik  *	on this port.
2602c6fd2807SJeff Garzik  *
2603cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2604c6fd2807SJeff Garzik  *	serialization.
2605c6fd2807SJeff Garzik  */
2606c6fd2807SJeff Garzik 
2607c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2608c6fd2807SJeff Garzik {
26099af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
26109af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2611c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2612c6fd2807SJeff Garzik }
2613c6fd2807SJeff Garzik 
2614c6fd2807SJeff Garzik /**
2615c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2616936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2617c6fd2807SJeff Garzik  *
2618936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2619c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2620c6fd2807SJeff Garzik  *	using sata_set_spd().
2621c6fd2807SJeff Garzik  *
2622c6fd2807SJeff Garzik  *	LOCKING:
2623c6fd2807SJeff Garzik  *	Inherited from caller.
2624c6fd2807SJeff Garzik  *
2625c6fd2807SJeff Garzik  *	RETURNS:
2626c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2627c6fd2807SJeff Garzik  */
2628936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2629c6fd2807SJeff Garzik {
2630c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2631c6fd2807SJeff Garzik 	int rc, highbit;
2632c6fd2807SJeff Garzik 
2633936fd732STejun Heo 	if (!sata_scr_valid(link))
2634008a7896STejun Heo 		return -EOPNOTSUPP;
2635008a7896STejun Heo 
2636008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2637936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2638008a7896STejun Heo 	 */
2639936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2640008a7896STejun Heo 	if (rc == 0)
2641008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2642008a7896STejun Heo 	else
2643936fd732STejun Heo 		spd = link->sata_spd;
2644c6fd2807SJeff Garzik 
2645936fd732STejun Heo 	mask = link->sata_spd_limit;
2646c6fd2807SJeff Garzik 	if (mask <= 1)
2647c6fd2807SJeff Garzik 		return -EINVAL;
2648008a7896STejun Heo 
2649008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2650c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2651c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2652c6fd2807SJeff Garzik 
2653008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2654008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2655008a7896STejun Heo 	 */
2656008a7896STejun Heo 	if (spd > 1)
2657008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2658008a7896STejun Heo 	else
2659008a7896STejun Heo 		mask &= 1;
2660008a7896STejun Heo 
2661008a7896STejun Heo 	/* were we already at the bottom? */
2662c6fd2807SJeff Garzik 	if (!mask)
2663c6fd2807SJeff Garzik 		return -EINVAL;
2664c6fd2807SJeff Garzik 
2665936fd732STejun Heo 	link->sata_spd_limit = mask;
2666c6fd2807SJeff Garzik 
2667936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2668c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2669c6fd2807SJeff Garzik 
2670c6fd2807SJeff Garzik 	return 0;
2671c6fd2807SJeff Garzik }
2672c6fd2807SJeff Garzik 
2673936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2674c6fd2807SJeff Garzik {
26755270222fSTejun Heo 	struct ata_link *host_link = &link->ap->link;
26765270222fSTejun Heo 	u32 limit, target, spd;
2677c6fd2807SJeff Garzik 
26785270222fSTejun Heo 	limit = link->sata_spd_limit;
26795270222fSTejun Heo 
26805270222fSTejun Heo 	/* Don't configure downstream link faster than upstream link.
26815270222fSTejun Heo 	 * It doesn't speed up anything and some PMPs choke on such
26825270222fSTejun Heo 	 * configuration.
26835270222fSTejun Heo 	 */
26845270222fSTejun Heo 	if (!ata_is_host_link(link) && host_link->sata_spd)
26855270222fSTejun Heo 		limit &= (1 << host_link->sata_spd) - 1;
26865270222fSTejun Heo 
26875270222fSTejun Heo 	if (limit == UINT_MAX)
26885270222fSTejun Heo 		target = 0;
2689c6fd2807SJeff Garzik 	else
26905270222fSTejun Heo 		target = fls(limit);
2691c6fd2807SJeff Garzik 
2692c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
26935270222fSTejun Heo 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2694c6fd2807SJeff Garzik 
26955270222fSTejun Heo 	return spd != target;
2696c6fd2807SJeff Garzik }
2697c6fd2807SJeff Garzik 
2698c6fd2807SJeff Garzik /**
2699c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2700936fd732STejun Heo  *	@link: Link in question
2701c6fd2807SJeff Garzik  *
2702c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2703936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2704c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2705c6fd2807SJeff Garzik  *	configuration.
2706c6fd2807SJeff Garzik  *
2707c6fd2807SJeff Garzik  *	LOCKING:
2708c6fd2807SJeff Garzik  *	Inherited from caller.
2709c6fd2807SJeff Garzik  *
2710c6fd2807SJeff Garzik  *	RETURNS:
2711c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2712c6fd2807SJeff Garzik  */
2713936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2714c6fd2807SJeff Garzik {
2715c6fd2807SJeff Garzik 	u32 scontrol;
2716c6fd2807SJeff Garzik 
2717936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2718db64bcf3STejun Heo 		return 1;
2719c6fd2807SJeff Garzik 
2720936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2721c6fd2807SJeff Garzik }
2722c6fd2807SJeff Garzik 
2723c6fd2807SJeff Garzik /**
2724c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2725936fd732STejun Heo  *	@link: Link to set SATA spd for
2726c6fd2807SJeff Garzik  *
2727936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2728c6fd2807SJeff Garzik  *
2729c6fd2807SJeff Garzik  *	LOCKING:
2730c6fd2807SJeff Garzik  *	Inherited from caller.
2731c6fd2807SJeff Garzik  *
2732c6fd2807SJeff Garzik  *	RETURNS:
2733c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2734c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2735c6fd2807SJeff Garzik  */
2736936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2737c6fd2807SJeff Garzik {
2738c6fd2807SJeff Garzik 	u32 scontrol;
2739c6fd2807SJeff Garzik 	int rc;
2740c6fd2807SJeff Garzik 
2741936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2742c6fd2807SJeff Garzik 		return rc;
2743c6fd2807SJeff Garzik 
2744936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2745c6fd2807SJeff Garzik 		return 0;
2746c6fd2807SJeff Garzik 
2747936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2748c6fd2807SJeff Garzik 		return rc;
2749c6fd2807SJeff Garzik 
2750c6fd2807SJeff Garzik 	return 1;
2751c6fd2807SJeff Garzik }
2752c6fd2807SJeff Garzik 
2753c6fd2807SJeff Garzik /*
2754c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2755c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2756c6fd2807SJeff Garzik  */
2757c6fd2807SJeff Garzik /*
2758b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2759c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2760b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2761b352e57dSAlan Cox  *
2762b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2763c6fd2807SJeff Garzik  */
2764c6fd2807SJeff Garzik 
2765c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
276670cd071eSTejun Heo /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
276770cd071eSTejun Heo 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
276870cd071eSTejun Heo 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
276970cd071eSTejun Heo 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
277070cd071eSTejun Heo 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
277170cd071eSTejun Heo 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
277270cd071eSTejun Heo 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
277370cd071eSTejun Heo 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2774c6fd2807SJeff Garzik 
277570cd071eSTejun Heo 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
277670cd071eSTejun Heo 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
277770cd071eSTejun Heo 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2778c6fd2807SJeff Garzik 
277970cd071eSTejun Heo 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
278070cd071eSTejun Heo 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
278170cd071eSTejun Heo 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2782b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
278370cd071eSTejun Heo 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2784c6fd2807SJeff Garzik 
2785c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
278670cd071eSTejun Heo 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
278770cd071eSTejun Heo 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
278870cd071eSTejun Heo 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
278970cd071eSTejun Heo 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
279070cd071eSTejun Heo 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
279170cd071eSTejun Heo 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
279270cd071eSTejun Heo 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2793c6fd2807SJeff Garzik 
2794c6fd2807SJeff Garzik 	{ 0xFF }
2795c6fd2807SJeff Garzik };
2796c6fd2807SJeff Garzik 
2797c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2798c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2799c6fd2807SJeff Garzik 
2800c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2801c6fd2807SJeff Garzik {
2802c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2803c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2804c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2805c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2806c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2807c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2808c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2809c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2810c6fd2807SJeff Garzik }
2811c6fd2807SJeff Garzik 
2812c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2813c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2814c6fd2807SJeff Garzik {
2815c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2816c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2817c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2818c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2819c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2820c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2821c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2822c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2823c6fd2807SJeff Garzik }
2824c6fd2807SJeff Garzik 
28256357357cSTejun Heo const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2826c6fd2807SJeff Garzik {
282770cd071eSTejun Heo 	const struct ata_timing *t = ata_timing;
2828c6fd2807SJeff Garzik 
282970cd071eSTejun Heo 	while (xfer_mode > t->mode)
283070cd071eSTejun Heo 		t++;
283170cd071eSTejun Heo 
283270cd071eSTejun Heo 	if (xfer_mode == t->mode)
2833c6fd2807SJeff Garzik 		return t;
283470cd071eSTejun Heo 	return NULL;
2835c6fd2807SJeff Garzik }
2836c6fd2807SJeff Garzik 
2837c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2838c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2839c6fd2807SJeff Garzik {
2840c6fd2807SJeff Garzik 	const struct ata_timing *s;
2841c6fd2807SJeff Garzik 	struct ata_timing p;
2842c6fd2807SJeff Garzik 
2843c6fd2807SJeff Garzik 	/*
2844c6fd2807SJeff Garzik 	 * Find the mode.
2845c6fd2807SJeff Garzik 	 */
2846c6fd2807SJeff Garzik 
2847c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2848c6fd2807SJeff Garzik 		return -EINVAL;
2849c6fd2807SJeff Garzik 
2850c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2851c6fd2807SJeff Garzik 
2852c6fd2807SJeff Garzik 	/*
2853c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2854c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2855c6fd2807SJeff Garzik 	 */
2856c6fd2807SJeff Garzik 
2857c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2858c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2859c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2860c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2861c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2862c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2863c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2864c6fd2807SJeff Garzik 		}
2865c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2866c6fd2807SJeff Garzik 	}
2867c6fd2807SJeff Garzik 
2868c6fd2807SJeff Garzik 	/*
2869c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2870c6fd2807SJeff Garzik 	 */
2871c6fd2807SJeff Garzik 
2872c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2873c6fd2807SJeff Garzik 
2874c6fd2807SJeff Garzik 	/*
2875c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2876c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2877c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2878c6fd2807SJeff Garzik 	 */
2879c6fd2807SJeff Garzik 
2880fd3367afSAlan 	if (speed > XFER_PIO_6) {
2881c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2882c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2883c6fd2807SJeff Garzik 	}
2884c6fd2807SJeff Garzik 
2885c6fd2807SJeff Garzik 	/*
2886c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2887c6fd2807SJeff Garzik 	 */
2888c6fd2807SJeff Garzik 
2889c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2890c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2891c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2892c6fd2807SJeff Garzik 	}
2893c6fd2807SJeff Garzik 
2894c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2895c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2896c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2897c6fd2807SJeff Garzik 	}
28984f701d1eSAlan Cox 
28994f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
29004f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
29014f701d1eSAlan Cox 	   if so we must correct this */
29024f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
29034f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2904c6fd2807SJeff Garzik 
2905c6fd2807SJeff Garzik 	return 0;
2906c6fd2807SJeff Garzik }
2907c6fd2807SJeff Garzik 
2908c6fd2807SJeff Garzik /**
2909a0f79b92STejun Heo  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2910a0f79b92STejun Heo  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2911a0f79b92STejun Heo  *	@cycle: cycle duration in ns
2912a0f79b92STejun Heo  *
2913a0f79b92STejun Heo  *	Return matching xfer mode for @cycle.  The returned mode is of
2914a0f79b92STejun Heo  *	the transfer type specified by @xfer_shift.  If @cycle is too
2915a0f79b92STejun Heo  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
2916a0f79b92STejun Heo  *	than the fastest known mode, the fasted mode is returned.
2917a0f79b92STejun Heo  *
2918a0f79b92STejun Heo  *	LOCKING:
2919a0f79b92STejun Heo  *	None.
2920a0f79b92STejun Heo  *
2921a0f79b92STejun Heo  *	RETURNS:
2922a0f79b92STejun Heo  *	Matching xfer_mode, 0xff if no match found.
2923a0f79b92STejun Heo  */
2924a0f79b92STejun Heo u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2925a0f79b92STejun Heo {
2926a0f79b92STejun Heo 	u8 base_mode = 0xff, last_mode = 0xff;
2927a0f79b92STejun Heo 	const struct ata_xfer_ent *ent;
2928a0f79b92STejun Heo 	const struct ata_timing *t;
2929a0f79b92STejun Heo 
2930a0f79b92STejun Heo 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2931a0f79b92STejun Heo 		if (ent->shift == xfer_shift)
2932a0f79b92STejun Heo 			base_mode = ent->base;
2933a0f79b92STejun Heo 
2934a0f79b92STejun Heo 	for (t = ata_timing_find_mode(base_mode);
2935a0f79b92STejun Heo 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
2936a0f79b92STejun Heo 		unsigned short this_cycle;
2937a0f79b92STejun Heo 
2938a0f79b92STejun Heo 		switch (xfer_shift) {
2939a0f79b92STejun Heo 		case ATA_SHIFT_PIO:
2940a0f79b92STejun Heo 		case ATA_SHIFT_MWDMA:
2941a0f79b92STejun Heo 			this_cycle = t->cycle;
2942a0f79b92STejun Heo 			break;
2943a0f79b92STejun Heo 		case ATA_SHIFT_UDMA:
2944a0f79b92STejun Heo 			this_cycle = t->udma;
2945a0f79b92STejun Heo 			break;
2946a0f79b92STejun Heo 		default:
2947a0f79b92STejun Heo 			return 0xff;
2948a0f79b92STejun Heo 		}
2949a0f79b92STejun Heo 
2950a0f79b92STejun Heo 		if (cycle > this_cycle)
2951a0f79b92STejun Heo 			break;
2952a0f79b92STejun Heo 
2953a0f79b92STejun Heo 		last_mode = t->mode;
2954a0f79b92STejun Heo 	}
2955a0f79b92STejun Heo 
2956a0f79b92STejun Heo 	return last_mode;
2957a0f79b92STejun Heo }
2958a0f79b92STejun Heo 
2959a0f79b92STejun Heo /**
2960c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2961c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2962458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2963c6fd2807SJeff Garzik  *
2964c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2965c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2966c6fd2807SJeff Garzik  *	will apply the limit.
2967c6fd2807SJeff Garzik  *
2968c6fd2807SJeff Garzik  *	LOCKING:
2969c6fd2807SJeff Garzik  *	Inherited from caller.
2970c6fd2807SJeff Garzik  *
2971c6fd2807SJeff Garzik  *	RETURNS:
2972c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2973c6fd2807SJeff Garzik  */
2974458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2975c6fd2807SJeff Garzik {
2976458337dbSTejun Heo 	char buf[32];
29777dc951aeSTejun Heo 	unsigned long orig_mask, xfer_mask;
29787dc951aeSTejun Heo 	unsigned long pio_mask, mwdma_mask, udma_mask;
2979458337dbSTejun Heo 	int quiet, highbit;
2980c6fd2807SJeff Garzik 
2981458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2982458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2983458337dbSTejun Heo 
2984458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2985458337dbSTejun Heo 						  dev->mwdma_mask,
2986c6fd2807SJeff Garzik 						  dev->udma_mask);
2987458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2988c6fd2807SJeff Garzik 
2989458337dbSTejun Heo 	switch (sel) {
2990458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2991458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2992458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2993458337dbSTejun Heo 		break;
2994458337dbSTejun Heo 
2995458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2996458337dbSTejun Heo 		if (udma_mask) {
2997458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2998458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2999458337dbSTejun Heo 			if (!udma_mask)
3000458337dbSTejun Heo 				return -ENOENT;
3001458337dbSTejun Heo 		} else if (mwdma_mask) {
3002458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
3003458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
3004458337dbSTejun Heo 			if (!mwdma_mask)
3005458337dbSTejun Heo 				return -ENOENT;
3006458337dbSTejun Heo 		}
3007458337dbSTejun Heo 		break;
3008458337dbSTejun Heo 
3009458337dbSTejun Heo 	case ATA_DNXFER_40C:
3010458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
3011458337dbSTejun Heo 		break;
3012458337dbSTejun Heo 
3013458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
3014458337dbSTejun Heo 		pio_mask &= 1;
3015458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
3016458337dbSTejun Heo 		mwdma_mask = 0;
3017458337dbSTejun Heo 		udma_mask = 0;
3018458337dbSTejun Heo 		break;
3019458337dbSTejun Heo 
3020458337dbSTejun Heo 	default:
3021458337dbSTejun Heo 		BUG();
3022458337dbSTejun Heo 	}
3023458337dbSTejun Heo 
3024458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3025458337dbSTejun Heo 
3026458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3027458337dbSTejun Heo 		return -ENOENT;
3028458337dbSTejun Heo 
3029458337dbSTejun Heo 	if (!quiet) {
3030458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3031458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
3032458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
3033458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3034458337dbSTejun Heo 		else
3035458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
3036458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
3037458337dbSTejun Heo 
3038458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
3039458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
3040458337dbSTejun Heo 	}
3041c6fd2807SJeff Garzik 
3042c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3043c6fd2807SJeff Garzik 			    &dev->udma_mask);
3044c6fd2807SJeff Garzik 
3045c6fd2807SJeff Garzik 	return 0;
3046c6fd2807SJeff Garzik }
3047c6fd2807SJeff Garzik 
3048c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
3049c6fd2807SJeff Garzik {
30509af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3051c6fd2807SJeff Garzik 	unsigned int err_mask;
3052c6fd2807SJeff Garzik 	int rc;
3053c6fd2807SJeff Garzik 
3054c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
3055c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3056c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
3057c6fd2807SJeff Garzik 
3058c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
30592dcb407eSJeff Garzik 
306011750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
306111750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
306211750a40SAlan 		err_mask &= ~AC_ERR_DEV;
30632dcb407eSJeff Garzik 
30640bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
30650bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
30660bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
30670bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
30680bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30692dcb407eSJeff Garzik 
30703acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
30713acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
30723acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
30733acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
30743acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
30753acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30763acaf94bSAlan Cox 
3077c6fd2807SJeff Garzik 	if (err_mask) {
3078c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3079c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
3080c6fd2807SJeff Garzik 		return -EIO;
3081c6fd2807SJeff Garzik 	}
3082c6fd2807SJeff Garzik 
3083baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3084422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3085baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3086c6fd2807SJeff Garzik 	if (rc)
3087c6fd2807SJeff Garzik 		return rc;
3088c6fd2807SJeff Garzik 
3089c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3090c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
3091c6fd2807SJeff Garzik 
3092c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3093c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3094c6fd2807SJeff Garzik 	return 0;
3095c6fd2807SJeff Garzik }
3096c6fd2807SJeff Garzik 
3097c6fd2807SJeff Garzik /**
309804351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
30990260731fSTejun Heo  *	@link: link on which timings will be programmed
3100c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
3101c6fd2807SJeff Garzik  *
310204351821SAlan  *	Standard implementation of the function used to tune and set
310304351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
310404351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
3105c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
3106c6fd2807SJeff Garzik  *
3107c6fd2807SJeff Garzik  *	LOCKING:
3108c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3109c6fd2807SJeff Garzik  *
3110c6fd2807SJeff Garzik  *	RETURNS:
3111c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3112c6fd2807SJeff Garzik  */
311304351821SAlan 
31140260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3115c6fd2807SJeff Garzik {
31160260731fSTejun Heo 	struct ata_port *ap = link->ap;
3117c6fd2807SJeff Garzik 	struct ata_device *dev;
3118f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
3119c6fd2807SJeff Garzik 
3120c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
3121f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
31227dc951aeSTejun Heo 		unsigned long pio_mask, dma_mask;
3123b3a70601SAlan Cox 		unsigned int mode_mask;
3124c6fd2807SJeff Garzik 
3125c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3126c6fd2807SJeff Garzik 			continue;
3127c6fd2807SJeff Garzik 
3128b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
3129b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
3130b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
3131b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
3132b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
3133b3a70601SAlan Cox 
3134c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
3135c6fd2807SJeff Garzik 
3136c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3137c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3138b3a70601SAlan Cox 
3139b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
3140b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3141b3a70601SAlan Cox 		else
3142b3a70601SAlan Cox 			dma_mask = 0;
3143b3a70601SAlan Cox 
3144c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3145c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3146c6fd2807SJeff Garzik 
3147c6fd2807SJeff Garzik 		found = 1;
314870cd071eSTejun Heo 		if (dev->dma_mode != 0xff)
3149c6fd2807SJeff Garzik 			used_dma = 1;
3150c6fd2807SJeff Garzik 	}
3151c6fd2807SJeff Garzik 	if (!found)
3152c6fd2807SJeff Garzik 		goto out;
3153c6fd2807SJeff Garzik 
3154c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
3155f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3156c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3157c6fd2807SJeff Garzik 			continue;
3158c6fd2807SJeff Garzik 
315970cd071eSTejun Heo 		if (dev->pio_mode == 0xff) {
3160c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3161c6fd2807SJeff Garzik 			rc = -EINVAL;
3162c6fd2807SJeff Garzik 			goto out;
3163c6fd2807SJeff Garzik 		}
3164c6fd2807SJeff Garzik 
3165c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
3166c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
3167c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
3168c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
3169c6fd2807SJeff Garzik 	}
3170c6fd2807SJeff Garzik 
3171c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
3172f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
317370cd071eSTejun Heo 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3174c6fd2807SJeff Garzik 			continue;
3175c6fd2807SJeff Garzik 
3176c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
3177c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3178c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
3179c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
3180c6fd2807SJeff Garzik 	}
3181c6fd2807SJeff Garzik 
3182c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
3183f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
318418d90debSAlan 		/* don't update suspended devices' xfer mode */
31859666f400STejun Heo 		if (!ata_dev_enabled(dev))
3186c6fd2807SJeff Garzik 			continue;
3187c6fd2807SJeff Garzik 
3188c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3189c6fd2807SJeff Garzik 		if (rc)
3190c6fd2807SJeff Garzik 			goto out;
3191c6fd2807SJeff Garzik 	}
3192c6fd2807SJeff Garzik 
3193c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3194c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3195c6fd2807SJeff Garzik 	 */
3196cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3197032af1ceSAlan 		ap->host->simplex_claimed = ap;
3198c6fd2807SJeff Garzik 
3199c6fd2807SJeff Garzik  out:
3200c6fd2807SJeff Garzik 	if (rc)
3201c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3202c6fd2807SJeff Garzik 	return rc;
3203c6fd2807SJeff Garzik }
3204c6fd2807SJeff Garzik 
3205c6fd2807SJeff Garzik /**
3206c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3207c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3208c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3209c6fd2807SJeff Garzik  *
3210c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3211c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3212c6fd2807SJeff Garzik  *	other threads.
3213c6fd2807SJeff Garzik  *
3214c6fd2807SJeff Garzik  *	LOCKING:
3215cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3216c6fd2807SJeff Garzik  */
3217c6fd2807SJeff Garzik 
3218c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3219c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3220c6fd2807SJeff Garzik {
3221c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3222c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3223c6fd2807SJeff Garzik }
3224c6fd2807SJeff Garzik 
3225c6fd2807SJeff Garzik /**
3226c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3227c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3228c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3229c6fd2807SJeff Garzik  *	@tmout: overall timeout
3230c6fd2807SJeff Garzik  *
3231c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3232c6fd2807SJeff Garzik  *	or a timeout occurs.
3233c6fd2807SJeff Garzik  *
3234d1adc1bbSTejun Heo  *	LOCKING:
3235d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3236d1adc1bbSTejun Heo  *
3237d1adc1bbSTejun Heo  *	RETURNS:
3238d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3239c6fd2807SJeff Garzik  */
3240d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3241c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3242c6fd2807SJeff Garzik {
3243c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3244c6fd2807SJeff Garzik 	u8 status;
3245c6fd2807SJeff Garzik 
3246c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3247c6fd2807SJeff Garzik 	timer_start = jiffies;
3248c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3249d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3250d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3251c6fd2807SJeff Garzik 		msleep(50);
3252c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3253c6fd2807SJeff Garzik 	}
3254c6fd2807SJeff Garzik 
3255d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3256c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
325735aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
325835aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3259c6fd2807SJeff Garzik 
3260c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3261d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3262d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3263c6fd2807SJeff Garzik 		msleep(50);
3264c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3265c6fd2807SJeff Garzik 	}
3266c6fd2807SJeff Garzik 
3267d1adc1bbSTejun Heo 	if (status == 0xff)
3268d1adc1bbSTejun Heo 		return -ENODEV;
3269d1adc1bbSTejun Heo 
3270c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3271c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
327235aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
327335aa7a43SJeff Garzik 				tmout / HZ, status);
3274d1adc1bbSTejun Heo 		return -EBUSY;
3275c6fd2807SJeff Garzik 	}
3276c6fd2807SJeff Garzik 
3277c6fd2807SJeff Garzik 	return 0;
3278c6fd2807SJeff Garzik }
3279c6fd2807SJeff Garzik 
3280d4b2bab4STejun Heo /**
328188ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
328288ff6eafSTejun Heo  *	@ap: port containing status register to be polled
328388ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
328488ff6eafSTejun Heo  *
328588ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
328688ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
328788ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
328888ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
328988ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
329088ff6eafSTejun Heo  *	status.
329188ff6eafSTejun Heo  *
329288ff6eafSTejun Heo  *	LOCKING:
329388ff6eafSTejun Heo  *	Kernel thread context (may sleep).
329488ff6eafSTejun Heo  */
329588ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
329688ff6eafSTejun Heo {
329788ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
329888ff6eafSTejun Heo 
329988ff6eafSTejun Heo 	if (time_before(until, deadline))
330088ff6eafSTejun Heo 		deadline = until;
330188ff6eafSTejun Heo 
330288ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
330388ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
330488ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
330588ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
330688ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
330788ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
330888ff6eafSTejun Heo 	 * delay here as well.
330988ff6eafSTejun Heo 	 *
331088ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
331188ff6eafSTejun Heo 	 */
331288ff6eafSTejun Heo 	msleep(150);
331388ff6eafSTejun Heo 
331488ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
331588ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
331688ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
331788ff6eafSTejun Heo 	 * than that.
33181974e201STejun Heo 	 *
33191974e201STejun Heo 	 * Note that some PATA controllers (pata_ali) explode if
33201974e201STejun Heo 	 * status register is read more than once when there's no
33211974e201STejun Heo 	 * device attached.
332288ff6eafSTejun Heo 	 */
33231974e201STejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
332488ff6eafSTejun Heo 		while (1) {
332588ff6eafSTejun Heo 			u8 status = ata_chk_status(ap);
332688ff6eafSTejun Heo 
332788ff6eafSTejun Heo 			if (status != 0xff || time_after(jiffies, deadline))
332888ff6eafSTejun Heo 				return;
332988ff6eafSTejun Heo 
333088ff6eafSTejun Heo 			msleep(50);
333188ff6eafSTejun Heo 		}
333288ff6eafSTejun Heo 	}
33331974e201STejun Heo }
333488ff6eafSTejun Heo 
333588ff6eafSTejun Heo /**
3336d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3337d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3338d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3339d4b2bab4STejun Heo  *
3340d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3341d4b2bab4STejun Heo  *	occurs.
3342d4b2bab4STejun Heo  *
3343d4b2bab4STejun Heo  *	LOCKING:
3344d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3345d4b2bab4STejun Heo  *
3346d4b2bab4STejun Heo  *	RETURNS:
3347d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3348d4b2bab4STejun Heo  */
3349d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3350d4b2bab4STejun Heo {
3351d4b2bab4STejun Heo 	unsigned long start = jiffies;
3352d4b2bab4STejun Heo 	int warned = 0;
3353d4b2bab4STejun Heo 
3354d4b2bab4STejun Heo 	while (1) {
3355d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3356d4b2bab4STejun Heo 		unsigned long now = jiffies;
3357d4b2bab4STejun Heo 
3358d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3359d4b2bab4STejun Heo 			return 0;
3360936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3361d4b2bab4STejun Heo 			return -ENODEV;
3362d4b2bab4STejun Heo 		if (time_after(now, deadline))
3363d4b2bab4STejun Heo 			return -EBUSY;
3364d4b2bab4STejun Heo 
3365d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3366d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3367d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3368d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3369d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3370d4b2bab4STejun Heo 			warned = 1;
3371d4b2bab4STejun Heo 		}
3372d4b2bab4STejun Heo 
3373d4b2bab4STejun Heo 		msleep(50);
3374d4b2bab4STejun Heo 	}
3375d4b2bab4STejun Heo }
3376d4b2bab4STejun Heo 
3377d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3378d4b2bab4STejun Heo 			      unsigned long deadline)
3379c6fd2807SJeff Garzik {
3380c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3381c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3382c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
33839b89391cSTejun Heo 	int rc, ret = 0;
3384c6fd2807SJeff Garzik 
3385c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3386c6fd2807SJeff Garzik 	 * BSY bit to clear
3387c6fd2807SJeff Garzik 	 */
3388d4b2bab4STejun Heo 	if (dev0) {
3389d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
33909b89391cSTejun Heo 		if (rc) {
33919b89391cSTejun Heo 			if (rc != -ENODEV)
3392d4b2bab4STejun Heo 				return rc;
33939b89391cSTejun Heo 			ret = rc;
33949b89391cSTejun Heo 		}
3395d4b2bab4STejun Heo 	}
3396c6fd2807SJeff Garzik 
3397e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3398e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3399c6fd2807SJeff Garzik 	 */
3400e141d999STejun Heo 	if (dev1) {
3401e141d999STejun Heo 		int i;
3402c6fd2807SJeff Garzik 
3403c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3404e141d999STejun Heo 
3405e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3406e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3407e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3408e141d999STejun Heo 		 */
3409e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3410e141d999STejun Heo 			u8 nsect, lbal;
3411e141d999STejun Heo 
34120d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
34130d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3414c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3415c6fd2807SJeff Garzik 				break;
3416c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3417c6fd2807SJeff Garzik 		}
3418e141d999STejun Heo 
3419d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
34209b89391cSTejun Heo 		if (rc) {
34219b89391cSTejun Heo 			if (rc != -ENODEV)
3422d4b2bab4STejun Heo 				return rc;
34239b89391cSTejun Heo 			ret = rc;
34249b89391cSTejun Heo 		}
3425d4b2bab4STejun Heo 	}
3426c6fd2807SJeff Garzik 
3427c6fd2807SJeff Garzik 	/* is all this really necessary? */
3428c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3429c6fd2807SJeff Garzik 	if (dev1)
3430c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3431c6fd2807SJeff Garzik 	if (dev0)
3432c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3433d4b2bab4STejun Heo 
34349b89391cSTejun Heo 	return ret;
3435c6fd2807SJeff Garzik }
3436c6fd2807SJeff Garzik 
3437d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3438d4b2bab4STejun Heo 			     unsigned long deadline)
3439c6fd2807SJeff Garzik {
3440c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3441c6fd2807SJeff Garzik 
344244877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3443c6fd2807SJeff Garzik 
3444c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
34450d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3446c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34470d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3448c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34490d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3450c6fd2807SJeff Garzik 
345188ff6eafSTejun Heo 	/* wait a while before checking status */
345288ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3453c6fd2807SJeff Garzik 
3454c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3455c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3456c6fd2807SJeff Garzik 	 * pulldown resistor.
3457c6fd2807SJeff Garzik 	 */
3458150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
34599b89391cSTejun Heo 		return -ENODEV;
3460c6fd2807SJeff Garzik 
3461d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3462c6fd2807SJeff Garzik }
3463c6fd2807SJeff Garzik 
3464c6fd2807SJeff Garzik /**
3465c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3466c6fd2807SJeff Garzik  *	@ap: port to reset
3467c6fd2807SJeff Garzik  *
3468c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3469c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3470c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3471c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3472c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3473c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3474c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3475c6fd2807SJeff Garzik  *
3476c6fd2807SJeff Garzik  *	LOCKING:
3477c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3478cca3974eSJeff Garzik  *	Obtains host lock.
3479c6fd2807SJeff Garzik  *
3480c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3481c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3482c6fd2807SJeff Garzik  */
3483c6fd2807SJeff Garzik 
3484c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3485c6fd2807SJeff Garzik {
34869af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3487c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3488c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3489c6fd2807SJeff Garzik 	u8 err;
3490c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
34919b89391cSTejun Heo 	int rc;
3492c6fd2807SJeff Garzik 
349344877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3494c6fd2807SJeff Garzik 
3495c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3496c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3497c6fd2807SJeff Garzik 		dev0 = 1;
3498c6fd2807SJeff Garzik 	else {
3499c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3500c6fd2807SJeff Garzik 		if (slave_possible)
3501c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3502c6fd2807SJeff Garzik 	}
3503c6fd2807SJeff Garzik 
3504c6fd2807SJeff Garzik 	if (dev0)
3505c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3506c6fd2807SJeff Garzik 	if (dev1)
3507c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3508c6fd2807SJeff Garzik 
3509c6fd2807SJeff Garzik 	/* select device 0 again */
3510c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3511c6fd2807SJeff Garzik 
3512c6fd2807SJeff Garzik 	/* issue bus reset */
35139b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
35149b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
35159b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3516c6fd2807SJeff Garzik 			goto err_out;
35179b89391cSTejun Heo 	}
3518c6fd2807SJeff Garzik 
3519c6fd2807SJeff Garzik 	/*
3520c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3521c6fd2807SJeff Garzik 	 */
35223f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3523c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
35243f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3525c6fd2807SJeff Garzik 
3526c6fd2807SJeff Garzik 	/* is double-select really necessary? */
35279af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3528c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
35299af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3530c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3531c6fd2807SJeff Garzik 
3532c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
35339af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
35349af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3535c6fd2807SJeff Garzik 		goto err_out;
3536c6fd2807SJeff Garzik 
3537c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3538c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
35390d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3540c6fd2807SJeff Garzik 	}
3541c6fd2807SJeff Garzik 
3542c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3543c6fd2807SJeff Garzik 	return;
3544c6fd2807SJeff Garzik 
3545c6fd2807SJeff Garzik err_out:
3546c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3547ac8869d5SJeff Garzik 	ata_port_disable(ap);
3548c6fd2807SJeff Garzik 
3549c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3550c6fd2807SJeff Garzik }
3551c6fd2807SJeff Garzik 
3552c6fd2807SJeff Garzik /**
3553936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3554936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3555c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3556d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3557c6fd2807SJeff Garzik  *
3558936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3559c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3560c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3561d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3562d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3563c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3564c6fd2807SJeff Garzik  *
3565d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3566d4b2bab4STejun Heo  *	two is used.
3567d4b2bab4STejun Heo  *
3568c6fd2807SJeff Garzik  *	LOCKING:
3569c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3570c6fd2807SJeff Garzik  *
3571c6fd2807SJeff Garzik  *	RETURNS:
3572c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3573c6fd2807SJeff Garzik  */
3574936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3575d4b2bab4STejun Heo 		       unsigned long deadline)
3576c6fd2807SJeff Garzik {
3577c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3578d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3579d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3580c6fd2807SJeff Garzik 	u32 last, cur;
3581c6fd2807SJeff Garzik 	int rc;
3582c6fd2807SJeff Garzik 
3583d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3584d4b2bab4STejun Heo 	if (time_before(t, deadline))
3585d4b2bab4STejun Heo 		deadline = t;
3586d4b2bab4STejun Heo 
3587936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3588c6fd2807SJeff Garzik 		return rc;
3589c6fd2807SJeff Garzik 	cur &= 0xf;
3590c6fd2807SJeff Garzik 
3591c6fd2807SJeff Garzik 	last = cur;
3592c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3593c6fd2807SJeff Garzik 
3594c6fd2807SJeff Garzik 	while (1) {
3595c6fd2807SJeff Garzik 		msleep(interval_msec);
3596936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3597c6fd2807SJeff Garzik 			return rc;
3598c6fd2807SJeff Garzik 		cur &= 0xf;
3599c6fd2807SJeff Garzik 
3600c6fd2807SJeff Garzik 		/* DET stable? */
3601c6fd2807SJeff Garzik 		if (cur == last) {
3602d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3603c6fd2807SJeff Garzik 				continue;
3604c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3605c6fd2807SJeff Garzik 				return 0;
3606c6fd2807SJeff Garzik 			continue;
3607c6fd2807SJeff Garzik 		}
3608c6fd2807SJeff Garzik 
3609c6fd2807SJeff Garzik 		/* unstable, start over */
3610c6fd2807SJeff Garzik 		last = cur;
3611c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3612c6fd2807SJeff Garzik 
3613f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3614f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3615f1545154STejun Heo 		 */
3616d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3617f1545154STejun Heo 			return -EPIPE;
3618c6fd2807SJeff Garzik 	}
3619c6fd2807SJeff Garzik }
3620c6fd2807SJeff Garzik 
3621c6fd2807SJeff Garzik /**
3622936fd732STejun Heo  *	sata_link_resume - resume SATA link
3623936fd732STejun Heo  *	@link: ATA link to resume SATA
3624c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3625d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3626c6fd2807SJeff Garzik  *
3627936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3628c6fd2807SJeff Garzik  *
3629c6fd2807SJeff Garzik  *	LOCKING:
3630c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3631c6fd2807SJeff Garzik  *
3632c6fd2807SJeff Garzik  *	RETURNS:
3633c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3634c6fd2807SJeff Garzik  */
3635936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3636d4b2bab4STejun Heo 		     unsigned long deadline)
3637c6fd2807SJeff Garzik {
3638c6fd2807SJeff Garzik 	u32 scontrol;
3639c6fd2807SJeff Garzik 	int rc;
3640c6fd2807SJeff Garzik 
3641936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3642c6fd2807SJeff Garzik 		return rc;
3643c6fd2807SJeff Garzik 
3644c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3645c6fd2807SJeff Garzik 
3646936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3647c6fd2807SJeff Garzik 		return rc;
3648c6fd2807SJeff Garzik 
3649c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3650c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3651c6fd2807SJeff Garzik 	 */
3652c6fd2807SJeff Garzik 	msleep(200);
3653c6fd2807SJeff Garzik 
3654936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3655c6fd2807SJeff Garzik }
3656c6fd2807SJeff Garzik 
3657c6fd2807SJeff Garzik /**
3658c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3659cc0680a5STejun Heo  *	@link: ATA link to be reset
3660d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3661c6fd2807SJeff Garzik  *
3662cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3663b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3664b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3665b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3666b8cffc6aSTejun Heo  *	should just whine, not fail.
3667c6fd2807SJeff Garzik  *
3668c6fd2807SJeff Garzik  *	LOCKING:
3669c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3670c6fd2807SJeff Garzik  *
3671c6fd2807SJeff Garzik  *	RETURNS:
3672c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3673c6fd2807SJeff Garzik  */
3674cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3675c6fd2807SJeff Garzik {
3676cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3677936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3678c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3679c6fd2807SJeff Garzik 	int rc;
3680c6fd2807SJeff Garzik 
368131daabdaSTejun Heo 	/* handle link resume */
3682c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
36830c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3684c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3685c6fd2807SJeff Garzik 
3686633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3687633273a3STejun Heo 	 * is supported.
3688633273a3STejun Heo 	 */
3689633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3690633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3691633273a3STejun Heo 
3692c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3693c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3694c6fd2807SJeff Garzik 		return 0;
3695c6fd2807SJeff Garzik 
3696936fd732STejun Heo 	/* if SATA, resume link */
3697a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3698936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3699b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3700b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3701cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3702c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3703c6fd2807SJeff Garzik 	}
3704c6fd2807SJeff Garzik 
3705c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3706c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3707c6fd2807SJeff Garzik 	 */
37080c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3709b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
37106dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3711cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3712b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3713b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3714b8cffc6aSTejun Heo 		}
3715b8cffc6aSTejun Heo 	}
3716c6fd2807SJeff Garzik 
3717c6fd2807SJeff Garzik 	return 0;
3718c6fd2807SJeff Garzik }
3719c6fd2807SJeff Garzik 
3720c6fd2807SJeff Garzik /**
3721c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3722cc0680a5STejun Heo  *	@link: ATA link to reset
3723c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3724d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3725c6fd2807SJeff Garzik  *
3726c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3727c6fd2807SJeff Garzik  *
3728c6fd2807SJeff Garzik  *	LOCKING:
3729c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3730c6fd2807SJeff Garzik  *
3731c6fd2807SJeff Garzik  *	RETURNS:
3732c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3733c6fd2807SJeff Garzik  */
3734cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3735d4b2bab4STejun Heo 		      unsigned long deadline)
3736c6fd2807SJeff Garzik {
3737cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3738c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3739d4b2bab4STejun Heo 	unsigned int devmask = 0;
3740d4b2bab4STejun Heo 	int rc;
3741c6fd2807SJeff Garzik 	u8 err;
3742c6fd2807SJeff Garzik 
3743c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3744c6fd2807SJeff Garzik 
3745936fd732STejun Heo 	if (ata_link_offline(link)) {
3746c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3747c6fd2807SJeff Garzik 		goto out;
3748c6fd2807SJeff Garzik 	}
3749c6fd2807SJeff Garzik 
3750c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3751c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3752c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3753c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3754c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3755c6fd2807SJeff Garzik 
3756c6fd2807SJeff Garzik 	/* select device 0 again */
3757c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3758c6fd2807SJeff Garzik 
3759c6fd2807SJeff Garzik 	/* issue bus reset */
3760c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3761d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
37629b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3763936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3764cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3765d4b2bab4STejun Heo 		return rc;
3766c6fd2807SJeff Garzik 	}
3767c6fd2807SJeff Garzik 
3768c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
37693f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
37703f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3771c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
37723f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
37733f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3774c6fd2807SJeff Garzik 
3775c6fd2807SJeff Garzik  out:
3776c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3777c6fd2807SJeff Garzik 	return 0;
3778c6fd2807SJeff Garzik }
3779c6fd2807SJeff Garzik 
3780c6fd2807SJeff Garzik /**
3781cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3782cc0680a5STejun Heo  *	@link: link to reset
3783b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3784d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3785c6fd2807SJeff Garzik  *
3786cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3787c6fd2807SJeff Garzik  *
3788c6fd2807SJeff Garzik  *	LOCKING:
3789c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3790c6fd2807SJeff Garzik  *
3791c6fd2807SJeff Garzik  *	RETURNS:
3792c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3793c6fd2807SJeff Garzik  */
3794cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3795d4b2bab4STejun Heo 			unsigned long deadline)
3796c6fd2807SJeff Garzik {
3797c6fd2807SJeff Garzik 	u32 scontrol;
3798c6fd2807SJeff Garzik 	int rc;
3799c6fd2807SJeff Garzik 
3800c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3801c6fd2807SJeff Garzik 
3802936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3803c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3804c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3805c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3806c6fd2807SJeff Garzik 		 * and Sil3124.
3807c6fd2807SJeff Garzik 		 */
3808936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3809b6103f6dSTejun Heo 			goto out;
3810c6fd2807SJeff Garzik 
3811cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3812c6fd2807SJeff Garzik 
3813936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3814b6103f6dSTejun Heo 			goto out;
3815c6fd2807SJeff Garzik 
3816936fd732STejun Heo 		sata_set_spd(link);
3817c6fd2807SJeff Garzik 	}
3818c6fd2807SJeff Garzik 
3819c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3820936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3821b6103f6dSTejun Heo 		goto out;
3822c6fd2807SJeff Garzik 
3823c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3824c6fd2807SJeff Garzik 
3825936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3826b6103f6dSTejun Heo 		goto out;
3827c6fd2807SJeff Garzik 
3828c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3829c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3830c6fd2807SJeff Garzik 	 */
3831c6fd2807SJeff Garzik 	msleep(1);
3832c6fd2807SJeff Garzik 
3833936fd732STejun Heo 	/* bring link back */
3834936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3835b6103f6dSTejun Heo  out:
3836b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3837b6103f6dSTejun Heo 	return rc;
3838b6103f6dSTejun Heo }
3839b6103f6dSTejun Heo 
3840b6103f6dSTejun Heo /**
3841b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3842cc0680a5STejun Heo  *	@link: link to reset
3843b6103f6dSTejun Heo  *	@class: resulting class of attached device
3844d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3845b6103f6dSTejun Heo  *
3846b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3847b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3848b6103f6dSTejun Heo  *
3849b6103f6dSTejun Heo  *	LOCKING:
3850b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3851b6103f6dSTejun Heo  *
3852b6103f6dSTejun Heo  *	RETURNS:
3853b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3854b6103f6dSTejun Heo  */
3855cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3856d4b2bab4STejun Heo 		       unsigned long deadline)
3857b6103f6dSTejun Heo {
3858cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3859936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3860b6103f6dSTejun Heo 	int rc;
3861b6103f6dSTejun Heo 
3862b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3863b6103f6dSTejun Heo 
3864b6103f6dSTejun Heo 	/* do hardreset */
3865cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3866b6103f6dSTejun Heo 	if (rc) {
3867cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3868b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3869b6103f6dSTejun Heo 		return rc;
3870b6103f6dSTejun Heo 	}
3871c6fd2807SJeff Garzik 
3872c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3873936fd732STejun Heo 	if (ata_link_offline(link)) {
3874c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3875c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3876c6fd2807SJeff Garzik 		return 0;
3877c6fd2807SJeff Garzik 	}
3878c6fd2807SJeff Garzik 
387988ff6eafSTejun Heo 	/* wait a while before checking status */
388088ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
388134fee227STejun Heo 
3882633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3883633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3884633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3885633273a3STejun Heo 	 * second and request follow-up SRST.
3886633273a3STejun Heo 	 */
3887633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3888633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3889633273a3STejun Heo 		return -EAGAIN;
3890633273a3STejun Heo 	}
3891633273a3STejun Heo 
3892d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
38939b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
38949b89391cSTejun Heo 	if (rc) {
3895cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3896d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3897d4b2bab4STejun Heo 		return rc;
3898c6fd2807SJeff Garzik 	}
3899c6fd2807SJeff Garzik 
3900c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3901c6fd2807SJeff Garzik 
39023f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3903c6fd2807SJeff Garzik 
3904c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3905c6fd2807SJeff Garzik 	return 0;
3906c6fd2807SJeff Garzik }
3907c6fd2807SJeff Garzik 
3908c6fd2807SJeff Garzik /**
3909c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3910cc0680a5STejun Heo  *	@link: the target ata_link
3911c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3912c6fd2807SJeff Garzik  *
3913c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3914c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3915c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3916c6fd2807SJeff Garzik  *
3917c6fd2807SJeff Garzik  *	LOCKING:
3918c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3919c6fd2807SJeff Garzik  */
3920cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3921c6fd2807SJeff Garzik {
3922cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3923c6fd2807SJeff Garzik 	u32 serror;
3924c6fd2807SJeff Garzik 
3925c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3926c6fd2807SJeff Garzik 
3927c6fd2807SJeff Garzik 	/* print link status */
3928936fd732STejun Heo 	sata_print_link_status(link);
3929c6fd2807SJeff Garzik 
3930c6fd2807SJeff Garzik 	/* clear SError */
3931936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3932936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3933f7fe7ad4STejun Heo 	link->eh_info.serror = 0;
3934c6fd2807SJeff Garzik 
3935c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3936c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3937c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3938c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3939c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3940c6fd2807SJeff Garzik 
3941c6fd2807SJeff Garzik 	/* bail out if no device is present */
3942c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3943c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3944c6fd2807SJeff Garzik 		return;
3945c6fd2807SJeff Garzik 	}
3946c6fd2807SJeff Garzik 
3947c6fd2807SJeff Garzik 	/* set up device control */
39480d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
39490d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3950c6fd2807SJeff Garzik 
3951c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3952c6fd2807SJeff Garzik }
3953c6fd2807SJeff Garzik 
3954c6fd2807SJeff Garzik /**
3955c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3956c6fd2807SJeff Garzik  *	@dev: device to compare against
3957c6fd2807SJeff Garzik  *	@new_class: class of the new device
3958c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3959c6fd2807SJeff Garzik  *
3960c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3961c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3962c6fd2807SJeff Garzik  *	@new_id.
3963c6fd2807SJeff Garzik  *
3964c6fd2807SJeff Garzik  *	LOCKING:
3965c6fd2807SJeff Garzik  *	None.
3966c6fd2807SJeff Garzik  *
3967c6fd2807SJeff Garzik  *	RETURNS:
3968c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3969c6fd2807SJeff Garzik  */
3970c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3971c6fd2807SJeff Garzik 			       const u16 *new_id)
3972c6fd2807SJeff Garzik {
3973c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3974a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3975a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3976c6fd2807SJeff Garzik 
3977c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3978c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3979c6fd2807SJeff Garzik 			       dev->class, new_class);
3980c6fd2807SJeff Garzik 		return 0;
3981c6fd2807SJeff Garzik 	}
3982c6fd2807SJeff Garzik 
3983a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3984a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3985a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3986a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3987c6fd2807SJeff Garzik 
3988c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3989c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3990c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3991c6fd2807SJeff Garzik 		return 0;
3992c6fd2807SJeff Garzik 	}
3993c6fd2807SJeff Garzik 
3994c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3995c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3996c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3997c6fd2807SJeff Garzik 		return 0;
3998c6fd2807SJeff Garzik 	}
3999c6fd2807SJeff Garzik 
4000c6fd2807SJeff Garzik 	return 1;
4001c6fd2807SJeff Garzik }
4002c6fd2807SJeff Garzik 
4003c6fd2807SJeff Garzik /**
4004fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
40053fae450cSHenrik Kretzschmar  *	@dev: target ATA device
4006bff04647STejun Heo  *	@readid_flags: read ID flags
4007c6fd2807SJeff Garzik  *
4008c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
4009c6fd2807SJeff Garzik  *	the port.
4010c6fd2807SJeff Garzik  *
4011c6fd2807SJeff Garzik  *	LOCKING:
4012c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4013c6fd2807SJeff Garzik  *
4014c6fd2807SJeff Garzik  *	RETURNS:
4015c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
4016c6fd2807SJeff Garzik  */
4017fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4018c6fd2807SJeff Garzik {
4019c6fd2807SJeff Garzik 	unsigned int class = dev->class;
40209af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
4021c6fd2807SJeff Garzik 	int rc;
4022c6fd2807SJeff Garzik 
4023c6fd2807SJeff Garzik 	/* read ID data */
4024bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4025c6fd2807SJeff Garzik 	if (rc)
4026fe30911bSTejun Heo 		return rc;
4027c6fd2807SJeff Garzik 
4028c6fd2807SJeff Garzik 	/* is the device still there? */
4029fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
4030fe30911bSTejun Heo 		return -ENODEV;
4031c6fd2807SJeff Garzik 
4032c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4033fe30911bSTejun Heo 	return 0;
4034fe30911bSTejun Heo }
4035fe30911bSTejun Heo 
4036fe30911bSTejun Heo /**
4037fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
4038fe30911bSTejun Heo  *	@dev: device to revalidate
4039422c9daaSTejun Heo  *	@new_class: new class code
4040fe30911bSTejun Heo  *	@readid_flags: read ID flags
4041fe30911bSTejun Heo  *
4042fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4043fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
4044fe30911bSTejun Heo  *
4045fe30911bSTejun Heo  *	LOCKING:
4046fe30911bSTejun Heo  *	Kernel thread context (may sleep)
4047fe30911bSTejun Heo  *
4048fe30911bSTejun Heo  *	RETURNS:
4049fe30911bSTejun Heo  *	0 on success, negative errno otherwise
4050fe30911bSTejun Heo  */
4051422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4052422c9daaSTejun Heo 		       unsigned int readid_flags)
4053fe30911bSTejun Heo {
40546ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
4055fe30911bSTejun Heo 	int rc;
4056fe30911bSTejun Heo 
4057fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
4058fe30911bSTejun Heo 		return -ENODEV;
4059fe30911bSTejun Heo 
4060422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4061422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
4062422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4063422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4064422c9daaSTejun Heo 			       dev->class, new_class);
4065422c9daaSTejun Heo 		rc = -ENODEV;
4066422c9daaSTejun Heo 		goto fail;
4067422c9daaSTejun Heo 	}
4068422c9daaSTejun Heo 
4069fe30911bSTejun Heo 	/* re-read ID */
4070fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
4071fe30911bSTejun Heo 	if (rc)
4072fe30911bSTejun Heo 		goto fail;
4073c6fd2807SJeff Garzik 
4074c6fd2807SJeff Garzik 	/* configure device according to the new ID */
4075efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
40766ddcd3b0STejun Heo 	if (rc)
40776ddcd3b0STejun Heo 		goto fail;
40786ddcd3b0STejun Heo 
40796ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
4080b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4081b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
40826ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
40836ddcd3b0STejun Heo 			       "%llu != %llu\n",
40846ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
40856ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
40868270bec4STejun Heo 
40878270bec4STejun Heo 		/* restore original n_sectors */
40888270bec4STejun Heo 		dev->n_sectors = n_sectors;
40898270bec4STejun Heo 
40906ddcd3b0STejun Heo 		rc = -ENODEV;
40916ddcd3b0STejun Heo 		goto fail;
40926ddcd3b0STejun Heo 	}
40936ddcd3b0STejun Heo 
4094c6fd2807SJeff Garzik 	return 0;
4095c6fd2807SJeff Garzik 
4096c6fd2807SJeff Garzik  fail:
4097c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4098c6fd2807SJeff Garzik 	return rc;
4099c6fd2807SJeff Garzik }
4100c6fd2807SJeff Garzik 
41016919a0a6SAlan Cox struct ata_blacklist_entry {
41026919a0a6SAlan Cox 	const char *model_num;
41036919a0a6SAlan Cox 	const char *model_rev;
41046919a0a6SAlan Cox 	unsigned long horkage;
41056919a0a6SAlan Cox };
41066919a0a6SAlan Cox 
41076919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
41086919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
41096919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
41106919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
41116919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
41126919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
41136919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
41146919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
41156919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
41166919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
41176919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
41186919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
41196919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
41206919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
41216919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
41226919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
41236919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
41246919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
41256919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
41266919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
41276919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
41286919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
41296919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
41306919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
41316919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
41326919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
41336919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
41346919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
41356919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
41366919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
41376919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
413839f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
41393af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
41403af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
41413af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
41426919a0a6SAlan Cox 
414318d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
414440a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
414518d6e9d5SAlbert Lee 
41466919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
41476919a0a6SAlan Cox 
41486919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
41496919a0a6SAlan Cox 	/* NCQ is slow */
41506919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4151459ad688STejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
415209125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
415309125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
41547acfaf30SPaul Rolland 	/* NCQ is broken */
4155539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
41560e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
41570b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
41580b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
4159da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4160e41bd3e8STejun Heo 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4161539cc7c7SJeff Garzik 
416236e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
416336e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
416436e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
416536e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
416636e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
41676919a0a6SAlan Cox 
416816c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
416916c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
417016c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
417116c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
417216c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
41736919a0a6SAlan Cox 
417493328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
417593328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
417693328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
417793328e11SAlan Cox 
41786bbfd53dSAlan Cox 	/* Devices which get the IVB wrong */
41796bbfd53dSAlan Cox 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
41806bbfd53dSAlan Cox 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4181e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4182e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4183e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
41846bbfd53dSAlan Cox 
41856919a0a6SAlan Cox 	/* End Marker */
41866919a0a6SAlan Cox 	{ }
4187c6fd2807SJeff Garzik };
4188c6fd2807SJeff Garzik 
4189741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4190539cc7c7SJeff Garzik {
4191539cc7c7SJeff Garzik 	const char *p;
4192539cc7c7SJeff Garzik 	int len;
4193539cc7c7SJeff Garzik 
4194539cc7c7SJeff Garzik 	/*
4195539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4196539cc7c7SJeff Garzik 	 */
4197539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4198539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4199539cc7c7SJeff Garzik 		len = p - patt;
4200317b50b8SAndrew Paprocki 	else {
4201539cc7c7SJeff Garzik 		len = strlen(name);
4202317b50b8SAndrew Paprocki 		if (!len) {
4203317b50b8SAndrew Paprocki 			if (!*patt)
4204317b50b8SAndrew Paprocki 				return 0;
4205317b50b8SAndrew Paprocki 			return -1;
4206317b50b8SAndrew Paprocki 		}
4207317b50b8SAndrew Paprocki 	}
4208539cc7c7SJeff Garzik 
4209539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4210539cc7c7SJeff Garzik }
4211539cc7c7SJeff Garzik 
421275683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4213c6fd2807SJeff Garzik {
42148bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
42158bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
42166919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4217c6fd2807SJeff Garzik 
42188bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
42198bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4220c6fd2807SJeff Garzik 
42216919a0a6SAlan Cox 	while (ad->model_num) {
4222539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
42236919a0a6SAlan Cox 			if (ad->model_rev == NULL)
42246919a0a6SAlan Cox 				return ad->horkage;
4225539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
42266919a0a6SAlan Cox 				return ad->horkage;
4227c6fd2807SJeff Garzik 		}
42286919a0a6SAlan Cox 		ad++;
4229c6fd2807SJeff Garzik 	}
4230c6fd2807SJeff Garzik 	return 0;
4231c6fd2807SJeff Garzik }
4232c6fd2807SJeff Garzik 
42336919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
42346919a0a6SAlan Cox {
42356919a0a6SAlan Cox 	/* We don't support polling DMA.
42366919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
42376919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
42386919a0a6SAlan Cox 	 */
42399af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
42406919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
42416919a0a6SAlan Cox 		return 1;
424275683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
42436919a0a6SAlan Cox }
42446919a0a6SAlan Cox 
4245c6fd2807SJeff Garzik /**
42466bbfd53dSAlan Cox  *	ata_is_40wire		-	check drive side detection
42476bbfd53dSAlan Cox  *	@dev: device
42486bbfd53dSAlan Cox  *
42496bbfd53dSAlan Cox  *	Perform drive side detection decoding, allowing for device vendors
42506bbfd53dSAlan Cox  *	who can't follow the documentation.
42516bbfd53dSAlan Cox  */
42526bbfd53dSAlan Cox 
42536bbfd53dSAlan Cox static int ata_is_40wire(struct ata_device *dev)
42546bbfd53dSAlan Cox {
42556bbfd53dSAlan Cox 	if (dev->horkage & ATA_HORKAGE_IVB)
42566bbfd53dSAlan Cox 		return ata_drive_40wire_relaxed(dev->id);
42576bbfd53dSAlan Cox 	return ata_drive_40wire(dev->id);
42586bbfd53dSAlan Cox }
42596bbfd53dSAlan Cox 
42606bbfd53dSAlan Cox /**
4261c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4262c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4263c6fd2807SJeff Garzik  *
4264c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4265c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4266c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4267c6fd2807SJeff Garzik  *	blacklist, etc...
4268c6fd2807SJeff Garzik  *
4269c6fd2807SJeff Garzik  *	LOCKING:
4270c6fd2807SJeff Garzik  *	None.
4271c6fd2807SJeff Garzik  */
4272c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4273c6fd2807SJeff Garzik {
42749af5c9c9STejun Heo 	struct ata_link *link = dev->link;
42759af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4276cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4277c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4278c6fd2807SJeff Garzik 
4279c6fd2807SJeff Garzik 	/* controller modes available */
4280c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4281c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4282c6fd2807SJeff Garzik 
42838343f889SRobert Hancock 	/* drive modes available */
4284c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4285c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4286c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4287c6fd2807SJeff Garzik 
4288b352e57dSAlan Cox 	/*
4289b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4290b352e57dSAlan Cox 	 *	cable
4291b352e57dSAlan Cox 	 */
4292b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4293b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4294b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4295b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4296b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4297b352e57dSAlan Cox 	}
4298b352e57dSAlan Cox 
4299c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4300c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4301c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4302c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4303c6fd2807SJeff Garzik 	}
4304c6fd2807SJeff Garzik 
430514d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
430614d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4307c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4308c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4309c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4310c6fd2807SJeff Garzik 	}
4311c6fd2807SJeff Garzik 
4312e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4313e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4314e424675fSJeff Garzik 
4315c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4316a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4317c6fd2807SJeff Garzik 
43188343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
43198343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
43208343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
43218343f889SRobert Hancock 	 * solely limited by the cable.
43228343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
43238343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
43248343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
43258343f889SRobert Hancock 	 */
43268343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
43278343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
43288343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
43296bbfd53dSAlan Cox 		    (ata_is_40wire(dev) &&
43308343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
43318343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
43328343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
43338343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
43348343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
43358343f889SRobert Hancock 		}
43368343f889SRobert Hancock 
4337c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4338c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4339c6fd2807SJeff Garzik }
4340c6fd2807SJeff Garzik 
4341c6fd2807SJeff Garzik /**
4342c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4343c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4344c6fd2807SJeff Garzik  *
4345c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4346c6fd2807SJeff Garzik  *	on port @ap.
4347c6fd2807SJeff Garzik  *
4348c6fd2807SJeff Garzik  *	LOCKING:
4349c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4350c6fd2807SJeff Garzik  *
4351c6fd2807SJeff Garzik  *	RETURNS:
4352c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4353c6fd2807SJeff Garzik  */
4354c6fd2807SJeff Garzik 
4355c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4356c6fd2807SJeff Garzik {
4357c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4358c6fd2807SJeff Garzik 	unsigned int err_mask;
4359c6fd2807SJeff Garzik 
4360c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4361c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4362c6fd2807SJeff Garzik 
4363464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4364464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4365464cf177STejun Heo 	 */
4366c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4367c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4368c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4369464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4370c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4371b9f8ab2dSAlan Cox 	/* If we are using IORDY we must send the mode setting command */
4372b9f8ab2dSAlan Cox 	if (ata_pio_need_iordy(dev))
4373c6fd2807SJeff Garzik 		tf.nsect = dev->xfer_mode;
4374b9f8ab2dSAlan Cox 	/* If the device has IORDY and the controller does not - turn it off */
4375b9f8ab2dSAlan Cox  	else if (ata_id_has_iordy(dev->id))
4376b9f8ab2dSAlan Cox 		tf.nsect = 0x01;
4377b9f8ab2dSAlan Cox 	else /* In the ancient relic department - skip all of this */
4378b9f8ab2dSAlan Cox 		return 0;
4379c6fd2807SJeff Garzik 
43802b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4381c6fd2807SJeff Garzik 
4382c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4383c6fd2807SJeff Garzik 	return err_mask;
4384c6fd2807SJeff Garzik }
4385c6fd2807SJeff Garzik /**
4386218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
43879f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
43889f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4389218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
43909f45cbd3SKristen Carlson Accardi  *
43919f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4392218f3d30SJeff Garzik  *	on port @ap with sector count
43939f45cbd3SKristen Carlson Accardi  *
43949f45cbd3SKristen Carlson Accardi  *	LOCKING:
43959f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
43969f45cbd3SKristen Carlson Accardi  *
43979f45cbd3SKristen Carlson Accardi  *	RETURNS:
43989f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
43999f45cbd3SKristen Carlson Accardi  */
4400218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4401218f3d30SJeff Garzik 					u8 feature)
44029f45cbd3SKristen Carlson Accardi {
44039f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
44049f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
44059f45cbd3SKristen Carlson Accardi 
44069f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
44079f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
44089f45cbd3SKristen Carlson Accardi 
44099f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
44109f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
44119f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
44129f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
44139f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4414218f3d30SJeff Garzik 	tf.nsect = feature;
44159f45cbd3SKristen Carlson Accardi 
44162b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
44179f45cbd3SKristen Carlson Accardi 
44189f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
44199f45cbd3SKristen Carlson Accardi 	return err_mask;
44209f45cbd3SKristen Carlson Accardi }
44219f45cbd3SKristen Carlson Accardi 
44229f45cbd3SKristen Carlson Accardi /**
4423c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4424c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4425c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4426c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4427c6fd2807SJeff Garzik  *
4428c6fd2807SJeff Garzik  *	LOCKING:
4429c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4430c6fd2807SJeff Garzik  *
4431c6fd2807SJeff Garzik  *	RETURNS:
4432c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4433c6fd2807SJeff Garzik  */
4434c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4435c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4436c6fd2807SJeff Garzik {
4437c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4438c6fd2807SJeff Garzik 	unsigned int err_mask;
4439c6fd2807SJeff Garzik 
4440c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4441c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4442c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4443c6fd2807SJeff Garzik 
4444c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4445c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4446c6fd2807SJeff Garzik 
4447c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4448c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4449c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4450c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4451c6fd2807SJeff Garzik 	tf.nsect = sectors;
4452c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4453c6fd2807SJeff Garzik 
44542b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
445518b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
445618b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
445718b2466cSAlan Cox 	   drive reported working geometry */
445818b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
445918b2466cSAlan Cox 		err_mask = 0;
4460c6fd2807SJeff Garzik 
4461c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4462c6fd2807SJeff Garzik 	return err_mask;
4463c6fd2807SJeff Garzik }
4464c6fd2807SJeff Garzik 
4465c6fd2807SJeff Garzik /**
4466c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4467c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4468c6fd2807SJeff Garzik  *
4469c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4470c6fd2807SJeff Garzik  *
4471c6fd2807SJeff Garzik  *	LOCKING:
4472cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4473c6fd2807SJeff Garzik  */
447470e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4475c6fd2807SJeff Garzik {
4476c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4477ff2aeb1eSTejun Heo 	struct scatterlist *sg = qc->sg;
4478c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4479c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4480c6fd2807SJeff Garzik 
4481c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4482c6fd2807SJeff Garzik 
4483ff2aeb1eSTejun Heo 	VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem);
4484c6fd2807SJeff Garzik 
4485c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4486c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4487c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4488c6fd2807SJeff Garzik 	 */
4489c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4490c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4491c6fd2807SJeff Garzik 
4492ff2aeb1eSTejun Heo 	if (qc->mapped_n_elem)
4493ff2aeb1eSTejun Heo 		dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4494c6fd2807SJeff Garzik 	/* restore last sg */
4495ff2aeb1eSTejun Heo 	if (qc->last_sg)
4496ff2aeb1eSTejun Heo 		*qc->last_sg = qc->saved_last_sg;
4497c6fd2807SJeff Garzik 	if (pad_buf) {
4498ff2aeb1eSTejun Heo 		struct scatterlist *psg = &qc->extra_sg[1];
449945711f1aSJens Axboe 		void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4500c6fd2807SJeff Garzik 		memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4501c6fd2807SJeff Garzik 		kunmap_atomic(addr, KM_IRQ0);
4502c6fd2807SJeff Garzik 	}
4503c6fd2807SJeff Garzik 
4504c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4505ff2aeb1eSTejun Heo 	qc->sg = NULL;
4506c6fd2807SJeff Garzik }
4507c6fd2807SJeff Garzik 
4508c6fd2807SJeff Garzik /**
4509c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4510c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4511c6fd2807SJeff Garzik  *
4512c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4513c6fd2807SJeff Garzik  *	associated with the current disk command.
4514c6fd2807SJeff Garzik  *
4515c6fd2807SJeff Garzik  *	LOCKING:
4516cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4517c6fd2807SJeff Garzik  *
4518c6fd2807SJeff Garzik  */
4519c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4520c6fd2807SJeff Garzik {
4521c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4522c6fd2807SJeff Garzik 	struct scatterlist *sg;
4523ff2aeb1eSTejun Heo 	unsigned int si, pi;
4524c6fd2807SJeff Garzik 
4525ff2aeb1eSTejun Heo 	pi = 0;
4526ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
4527c6fd2807SJeff Garzik 		u32 addr, offset;
4528c6fd2807SJeff Garzik 		u32 sg_len, len;
4529c6fd2807SJeff Garzik 
4530c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4531c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4532c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4533c6fd2807SJeff Garzik 		 */
4534c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4535c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4536c6fd2807SJeff Garzik 
4537c6fd2807SJeff Garzik 		while (sg_len) {
4538c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4539c6fd2807SJeff Garzik 			len = sg_len;
4540c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4541c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4542c6fd2807SJeff Garzik 
4543ff2aeb1eSTejun Heo 			ap->prd[pi].addr = cpu_to_le32(addr);
4544ff2aeb1eSTejun Heo 			ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4545ff2aeb1eSTejun Heo 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4546c6fd2807SJeff Garzik 
4547ff2aeb1eSTejun Heo 			pi++;
4548c6fd2807SJeff Garzik 			sg_len -= len;
4549c6fd2807SJeff Garzik 			addr += len;
4550c6fd2807SJeff Garzik 		}
4551c6fd2807SJeff Garzik 	}
4552c6fd2807SJeff Garzik 
4553ff2aeb1eSTejun Heo 	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4554c6fd2807SJeff Garzik }
4555b9a4197eSTejun Heo 
4556c6fd2807SJeff Garzik /**
4557d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4558d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4559d26fc955SAlan Cox  *
4560d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4561d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4562d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4563d26fc955SAlan Cox  *	controllers that don't follow the spec.
4564d26fc955SAlan Cox  *
4565d26fc955SAlan Cox  *	LOCKING:
4566d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4567d26fc955SAlan Cox  *
4568d26fc955SAlan Cox  */
4569d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4570d26fc955SAlan Cox {
4571d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4572d26fc955SAlan Cox 	struct scatterlist *sg;
4573ff2aeb1eSTejun Heo 	unsigned int si, pi;
4574d26fc955SAlan Cox 
4575ff2aeb1eSTejun Heo 	pi = 0;
4576ff2aeb1eSTejun Heo 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
4577d26fc955SAlan Cox 		u32 addr, offset;
4578d26fc955SAlan Cox 		u32 sg_len, len, blen;
4579d26fc955SAlan Cox 
4580d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4581d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4582d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4583d26fc955SAlan Cox 		 */
4584d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4585d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4586d26fc955SAlan Cox 
4587d26fc955SAlan Cox 		while (sg_len) {
4588d26fc955SAlan Cox 			offset = addr & 0xffff;
4589d26fc955SAlan Cox 			len = sg_len;
4590d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4591d26fc955SAlan Cox 				len = 0x10000 - offset;
4592d26fc955SAlan Cox 
4593d26fc955SAlan Cox 			blen = len & 0xffff;
4594ff2aeb1eSTejun Heo 			ap->prd[pi].addr = cpu_to_le32(addr);
4595d26fc955SAlan Cox 			if (blen == 0) {
4596d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4597d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4598ff2aeb1eSTejun Heo 				ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4599d26fc955SAlan Cox 				blen = 0x8000;
4600ff2aeb1eSTejun Heo 				ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4601d26fc955SAlan Cox 			}
4602ff2aeb1eSTejun Heo 			ap->prd[pi].flags_len = cpu_to_le32(blen);
4603ff2aeb1eSTejun Heo 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4604d26fc955SAlan Cox 
4605ff2aeb1eSTejun Heo 			pi++;
4606d26fc955SAlan Cox 			sg_len -= len;
4607d26fc955SAlan Cox 			addr += len;
4608d26fc955SAlan Cox 		}
4609d26fc955SAlan Cox 	}
4610d26fc955SAlan Cox 
4611ff2aeb1eSTejun Heo 	ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4612d26fc955SAlan Cox }
4613d26fc955SAlan Cox 
4614d26fc955SAlan Cox /**
4615c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4616c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4617c6fd2807SJeff Garzik  *
4618c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4619c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4620c6fd2807SJeff Garzik  *	supplied PACKET command.
4621c6fd2807SJeff Garzik  *
4622c6fd2807SJeff Garzik  *	LOCKING:
4623cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4624c6fd2807SJeff Garzik  *
4625c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4626c6fd2807SJeff Garzik  *               nonzero otherwise
4627c6fd2807SJeff Garzik  */
4628c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4629c6fd2807SJeff Garzik {
4630c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4631c6fd2807SJeff Garzik 
4632b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4633b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4634b9a4197eSTejun Heo 	 */
4635b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
46366f23a31dSAlbert Lee 		return 1;
46376f23a31dSAlbert Lee 
4638c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4639b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4640c6fd2807SJeff Garzik 
4641b9a4197eSTejun Heo 	return 0;
4642c6fd2807SJeff Garzik }
4643b9a4197eSTejun Heo 
4644c6fd2807SJeff Garzik /**
4645140b5e59STejun Heo  *	atapi_qc_may_overflow - Check whether data transfer may overflow
4646140b5e59STejun Heo  *	@qc: ATA command in question
4647140b5e59STejun Heo  *
4648140b5e59STejun Heo  *	ATAPI commands which transfer variable length data to host
4649140b5e59STejun Heo  *	might overflow due to application error or hardare bug.  This
4650140b5e59STejun Heo  *	function checks whether overflow should be drained and ignored
4651140b5e59STejun Heo  *	for @qc.
4652140b5e59STejun Heo  *
4653140b5e59STejun Heo  *	LOCKING:
4654140b5e59STejun Heo  *	None.
4655140b5e59STejun Heo  *
4656140b5e59STejun Heo  *	RETURNS:
4657140b5e59STejun Heo  *	1 if @qc may overflow; otherwise, 0.
4658140b5e59STejun Heo  */
4659140b5e59STejun Heo static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4660140b5e59STejun Heo {
46610dc36888STejun Heo 	if (qc->tf.protocol != ATAPI_PROT_PIO &&
46620dc36888STejun Heo 	    qc->tf.protocol != ATAPI_PROT_DMA)
4663140b5e59STejun Heo 		return 0;
4664140b5e59STejun Heo 
4665140b5e59STejun Heo 	if (qc->tf.flags & ATA_TFLAG_WRITE)
4666140b5e59STejun Heo 		return 0;
4667140b5e59STejun Heo 
4668140b5e59STejun Heo 	switch (qc->cdb[0]) {
4669140b5e59STejun Heo 	case READ_10:
4670140b5e59STejun Heo 	case READ_12:
4671140b5e59STejun Heo 	case WRITE_10:
4672140b5e59STejun Heo 	case WRITE_12:
4673140b5e59STejun Heo 	case GPCMD_READ_CD:
4674140b5e59STejun Heo 	case GPCMD_READ_CD_MSF:
4675140b5e59STejun Heo 		return 0;
4676140b5e59STejun Heo 	}
4677140b5e59STejun Heo 
4678140b5e59STejun Heo 	return 1;
4679140b5e59STejun Heo }
4680140b5e59STejun Heo 
4681140b5e59STejun Heo /**
468231cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
468331cc23b3STejun Heo  *	@qc: ATA command in question
468431cc23b3STejun Heo  *
468531cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
468631cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
468731cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
468831cc23b3STejun Heo  *	whether a new command @qc can be issued.
468931cc23b3STejun Heo  *
469031cc23b3STejun Heo  *	LOCKING:
469131cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
469231cc23b3STejun Heo  *
469331cc23b3STejun Heo  *	RETURNS:
469431cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
469531cc23b3STejun Heo  */
469631cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
469731cc23b3STejun Heo {
469831cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
469931cc23b3STejun Heo 
470031cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
470131cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
470231cc23b3STejun Heo 			return 0;
470331cc23b3STejun Heo 	} else {
470431cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
470531cc23b3STejun Heo 			return 0;
470631cc23b3STejun Heo 	}
470731cc23b3STejun Heo 
470831cc23b3STejun Heo 	return ATA_DEFER_LINK;
470931cc23b3STejun Heo }
471031cc23b3STejun Heo 
471131cc23b3STejun Heo /**
4712c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4713c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4714c6fd2807SJeff Garzik  *
4715c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4716c6fd2807SJeff Garzik  *
4717c6fd2807SJeff Garzik  *	LOCKING:
4718cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4719c6fd2807SJeff Garzik  */
4720c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4721c6fd2807SJeff Garzik {
4722c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4723c6fd2807SJeff Garzik 		return;
4724c6fd2807SJeff Garzik 
4725c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4726c6fd2807SJeff Garzik }
4727c6fd2807SJeff Garzik 
4728d26fc955SAlan Cox /**
4729d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4730d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4731d26fc955SAlan Cox  *
4732d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4733d26fc955SAlan Cox  *
4734d26fc955SAlan Cox  *	LOCKING:
4735d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4736d26fc955SAlan Cox  */
4737d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4738d26fc955SAlan Cox {
4739d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4740d26fc955SAlan Cox 		return;
4741d26fc955SAlan Cox 
4742d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4743d26fc955SAlan Cox }
4744d26fc955SAlan Cox 
4745c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4746c6fd2807SJeff Garzik 
4747c6fd2807SJeff Garzik /**
4748c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4749c6fd2807SJeff Garzik  *	@qc: Command to be associated
4750c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4751c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4752c6fd2807SJeff Garzik  *
4753c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4754c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4755c6fd2807SJeff Garzik  *	elements.
4756c6fd2807SJeff Garzik  *
4757c6fd2807SJeff Garzik  *	LOCKING:
4758cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4759c6fd2807SJeff Garzik  */
4760c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4761c6fd2807SJeff Garzik 		 unsigned int n_elem)
4762c6fd2807SJeff Garzik {
4763ff2aeb1eSTejun Heo 	qc->sg = sg;
4764c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4765ff2aeb1eSTejun Heo 	qc->cursg = qc->sg;
4766ff2aeb1eSTejun Heo }
4767ff2aeb1eSTejun Heo 
4768ff2aeb1eSTejun Heo static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
47690bcc65adSTejun Heo 				       unsigned int *n_elem_extra,
47700bcc65adSTejun Heo 				       unsigned int *nbytes_extra)
4771ff2aeb1eSTejun Heo {
4772ff2aeb1eSTejun Heo 	struct ata_port *ap = qc->ap;
4773ff2aeb1eSTejun Heo 	unsigned int n_elem = qc->n_elem;
4774ff2aeb1eSTejun Heo 	struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4775ff2aeb1eSTejun Heo 
4776ff2aeb1eSTejun Heo 	*n_elem_extra = 0;
47770bcc65adSTejun Heo 	*nbytes_extra = 0;
4778ff2aeb1eSTejun Heo 
4779ff2aeb1eSTejun Heo 	/* needs padding? */
4780ff2aeb1eSTejun Heo 	qc->pad_len = qc->nbytes & 3;
4781ff2aeb1eSTejun Heo 
4782ff2aeb1eSTejun Heo 	if (likely(!qc->pad_len))
4783ff2aeb1eSTejun Heo 		return n_elem;
4784ff2aeb1eSTejun Heo 
4785ff2aeb1eSTejun Heo 	/* locate last sg and save it */
4786ff2aeb1eSTejun Heo 	lsg = sg_last(qc->sg, n_elem);
4787ff2aeb1eSTejun Heo 	qc->last_sg = lsg;
4788ff2aeb1eSTejun Heo 	qc->saved_last_sg = *lsg;
4789ff2aeb1eSTejun Heo 
4790ff2aeb1eSTejun Heo 	sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4791ff2aeb1eSTejun Heo 
4792ff2aeb1eSTejun Heo 	if (qc->pad_len) {
4793ff2aeb1eSTejun Heo 		struct scatterlist *psg = &qc->extra_sg[1];
4794ff2aeb1eSTejun Heo 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4795ff2aeb1eSTejun Heo 		unsigned int offset;
4796ff2aeb1eSTejun Heo 
4797ff2aeb1eSTejun Heo 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4798ff2aeb1eSTejun Heo 
4799ff2aeb1eSTejun Heo 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4800ff2aeb1eSTejun Heo 
4801ff2aeb1eSTejun Heo 		/* psg->page/offset are used to copy to-be-written
4802ff2aeb1eSTejun Heo 		 * data in this function or read data in ata_sg_clean.
4803ff2aeb1eSTejun Heo 		 */
4804ff2aeb1eSTejun Heo 		offset = lsg->offset + lsg->length - qc->pad_len;
4805ff2aeb1eSTejun Heo 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4806ff2aeb1eSTejun Heo 			    qc->pad_len, offset_in_page(offset));
4807ff2aeb1eSTejun Heo 
4808ff2aeb1eSTejun Heo 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4809ff2aeb1eSTejun Heo 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4810ff2aeb1eSTejun Heo 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4811ff2aeb1eSTejun Heo 			kunmap_atomic(addr, KM_IRQ0);
4812ff2aeb1eSTejun Heo 		}
4813ff2aeb1eSTejun Heo 
4814ff2aeb1eSTejun Heo 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4815ff2aeb1eSTejun Heo 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4816ff2aeb1eSTejun Heo 
4817ff2aeb1eSTejun Heo 		/* Trim the last sg entry and chain the original and
4818ff2aeb1eSTejun Heo 		 * padding sg lists.
4819ff2aeb1eSTejun Heo 		 *
4820ff2aeb1eSTejun Heo 		 * Because chaining consumes one sg entry, one extra
4821ff2aeb1eSTejun Heo 		 * sg entry is allocated and the last sg entry is
4822ff2aeb1eSTejun Heo 		 * copied to it if the length isn't zero after padded
4823ff2aeb1eSTejun Heo 		 * amount is removed.
4824ff2aeb1eSTejun Heo 		 *
4825ff2aeb1eSTejun Heo 		 * If the last sg entry is completely replaced by
4826ff2aeb1eSTejun Heo 		 * padding sg entry, the first sg entry is skipped
4827ff2aeb1eSTejun Heo 		 * while chaining.
4828ff2aeb1eSTejun Heo 		 */
4829ff2aeb1eSTejun Heo 		lsg->length -= qc->pad_len;
4830ff2aeb1eSTejun Heo 		if (lsg->length) {
4831ff2aeb1eSTejun Heo 			copy_lsg = &qc->extra_sg[0];
4832ff2aeb1eSTejun Heo 			tsg = &qc->extra_sg[0];
4833ff2aeb1eSTejun Heo 		} else {
4834ff2aeb1eSTejun Heo 			n_elem--;
4835ff2aeb1eSTejun Heo 			tsg = &qc->extra_sg[1];
4836ff2aeb1eSTejun Heo 		}
4837ff2aeb1eSTejun Heo 
4838ff2aeb1eSTejun Heo 		esg = &qc->extra_sg[1];
4839ff2aeb1eSTejun Heo 
4840ff2aeb1eSTejun Heo 		(*n_elem_extra)++;
48410bcc65adSTejun Heo 		(*nbytes_extra) += 4 - qc->pad_len;
4842ff2aeb1eSTejun Heo 	}
4843ff2aeb1eSTejun Heo 
4844ff2aeb1eSTejun Heo 	if (copy_lsg)
4845ff2aeb1eSTejun Heo 		sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4846ff2aeb1eSTejun Heo 
4847ff2aeb1eSTejun Heo 	sg_chain(lsg, 1, tsg);
4848ff2aeb1eSTejun Heo 	sg_mark_end(esg);
4849ff2aeb1eSTejun Heo 
4850ff2aeb1eSTejun Heo 	/* sglist can't start with chaining sg entry, fast forward */
4851ff2aeb1eSTejun Heo 	if (qc->sg == lsg) {
4852ff2aeb1eSTejun Heo 		qc->sg = tsg;
4853ff2aeb1eSTejun Heo 		qc->cursg = tsg;
4854ff2aeb1eSTejun Heo 	}
4855ff2aeb1eSTejun Heo 
4856ff2aeb1eSTejun Heo 	return n_elem;
4857c6fd2807SJeff Garzik }
4858c6fd2807SJeff Garzik 
4859c6fd2807SJeff Garzik /**
4860c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4861c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4862c6fd2807SJeff Garzik  *
4863c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4864c6fd2807SJeff Garzik  *
4865c6fd2807SJeff Garzik  *	LOCKING:
4866cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4867c6fd2807SJeff Garzik  *
4868c6fd2807SJeff Garzik  *	RETURNS:
4869c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4870c6fd2807SJeff Garzik  *
4871c6fd2807SJeff Garzik  */
4872c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4873c6fd2807SJeff Garzik {
4874c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
48750bcc65adSTejun Heo 	unsigned int n_elem, n_elem_extra, nbytes_extra;
4876c6fd2807SJeff Garzik 
487744877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4878c6fd2807SJeff Garzik 
48790bcc65adSTejun Heo 	n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra);
4880c6fd2807SJeff Garzik 
4881ff2aeb1eSTejun Heo 	if (n_elem) {
4882ff2aeb1eSTejun Heo 		n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4883c6fd2807SJeff Garzik 		if (n_elem < 1) {
4884c6fd2807SJeff Garzik 			/* restore last sg */
4885ff2aeb1eSTejun Heo 			if (qc->last_sg)
4886ff2aeb1eSTejun Heo 				*qc->last_sg = qc->saved_last_sg;
4887c6fd2807SJeff Garzik 			return -1;
4888c6fd2807SJeff Garzik 		}
4889c6fd2807SJeff Garzik 		DPRINTK("%d sg elements mapped\n", n_elem);
4890ff2aeb1eSTejun Heo 	}
4891c6fd2807SJeff Garzik 
4892ff2aeb1eSTejun Heo 	qc->n_elem = qc->mapped_n_elem = n_elem;
4893ff2aeb1eSTejun Heo 	qc->n_elem += n_elem_extra;
48940bcc65adSTejun Heo 	qc->nbytes += nbytes_extra;
4895f92a2636STejun Heo 	qc->flags |= ATA_QCFLAG_DMAMAP;
4896c6fd2807SJeff Garzik 
4897c6fd2807SJeff Garzik 	return 0;
4898c6fd2807SJeff Garzik }
4899c6fd2807SJeff Garzik 
4900c6fd2807SJeff Garzik /**
4901c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4902c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4903c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4904c6fd2807SJeff Garzik  *
4905c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4906c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4907c6fd2807SJeff Garzik  *	vice-versa.
4908c6fd2807SJeff Garzik  *
4909c6fd2807SJeff Garzik  *	LOCKING:
4910c6fd2807SJeff Garzik  *	Inherited from caller.
4911c6fd2807SJeff Garzik  */
4912c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4913c6fd2807SJeff Garzik {
4914c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4915c6fd2807SJeff Garzik 	unsigned int i;
4916c6fd2807SJeff Garzik 
4917c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4918c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4919c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4920c6fd2807SJeff Garzik }
4921c6fd2807SJeff Garzik 
4922c6fd2807SJeff Garzik /**
49230d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
492455dba312STejun Heo  *	@dev: device to target
4925c6fd2807SJeff Garzik  *	@buf: data buffer
4926c6fd2807SJeff Garzik  *	@buflen: buffer length
49270affa456SLinus Nilsson  *	@rw: read/write
4928c6fd2807SJeff Garzik  *
4929c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4930c6fd2807SJeff Garzik  *
4931c6fd2807SJeff Garzik  *	LOCKING:
4932c6fd2807SJeff Garzik  *	Inherited from caller.
493355dba312STejun Heo  *
493455dba312STejun Heo  *	RETURNS:
493555dba312STejun Heo  *	Bytes consumed.
4936c6fd2807SJeff Garzik  */
493755dba312STejun Heo unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
493855dba312STejun Heo 			   unsigned int buflen, int rw)
4939c6fd2807SJeff Garzik {
494055dba312STejun Heo 	struct ata_port *ap = dev->link->ap;
494155dba312STejun Heo 	void __iomem *data_addr = ap->ioaddr.data_addr;
4942c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4943c6fd2807SJeff Garzik 
4944c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
494555dba312STejun Heo 	if (rw == READ)
494655dba312STejun Heo 		ioread16_rep(data_addr, buf, words);
4947c6fd2807SJeff Garzik 	else
494855dba312STejun Heo 		iowrite16_rep(data_addr, buf, words);
4949c6fd2807SJeff Garzik 
4950c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4951c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
49524ca4e439SAl Viro 		__le16 align_buf[1] = { 0 };
4953c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4954c6fd2807SJeff Garzik 
495555dba312STejun Heo 		if (rw == READ) {
495655dba312STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(data_addr));
4957c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
495855dba312STejun Heo 		} else {
495955dba312STejun Heo 			memcpy(align_buf, trailing_buf, 1);
496055dba312STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), data_addr);
4961c6fd2807SJeff Garzik 		}
496255dba312STejun Heo 		words++;
4963c6fd2807SJeff Garzik 	}
496455dba312STejun Heo 
496555dba312STejun Heo 	return words << 1;
4966c6fd2807SJeff Garzik }
4967c6fd2807SJeff Garzik 
4968c6fd2807SJeff Garzik /**
49690d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
497055dba312STejun Heo  *	@dev: device to target
4971c6fd2807SJeff Garzik  *	@buf: data buffer
4972c6fd2807SJeff Garzik  *	@buflen: buffer length
49730affa456SLinus Nilsson  *	@rw: read/write
4974c6fd2807SJeff Garzik  *
4975c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4976c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4977c6fd2807SJeff Garzik  *
4978c6fd2807SJeff Garzik  *	LOCKING:
4979c6fd2807SJeff Garzik  *	Inherited from caller.
498055dba312STejun Heo  *
498155dba312STejun Heo  *	RETURNS:
498255dba312STejun Heo  *	Bytes consumed.
4983c6fd2807SJeff Garzik  */
498455dba312STejun Heo unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
498555dba312STejun Heo 				 unsigned int buflen, int rw)
4986c6fd2807SJeff Garzik {
4987c6fd2807SJeff Garzik 	unsigned long flags;
498855dba312STejun Heo 	unsigned int consumed;
498955dba312STejun Heo 
4990c6fd2807SJeff Garzik 	local_irq_save(flags);
499155dba312STejun Heo 	consumed = ata_data_xfer(dev, buf, buflen, rw);
4992c6fd2807SJeff Garzik 	local_irq_restore(flags);
499355dba312STejun Heo 
499455dba312STejun Heo 	return consumed;
4995c6fd2807SJeff Garzik }
4996c6fd2807SJeff Garzik 
4997c6fd2807SJeff Garzik 
4998c6fd2807SJeff Garzik /**
49995a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
5000c6fd2807SJeff Garzik  *	@qc: Command on going
5001c6fd2807SJeff Garzik  *
50025a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
5003c6fd2807SJeff Garzik  *
5004c6fd2807SJeff Garzik  *	LOCKING:
5005c6fd2807SJeff Garzik  *	Inherited from caller.
5006c6fd2807SJeff Garzik  */
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
5009c6fd2807SJeff Garzik {
5010c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5011c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5012c6fd2807SJeff Garzik 	struct page *page;
5013c6fd2807SJeff Garzik 	unsigned int offset;
5014c6fd2807SJeff Garzik 	unsigned char *buf;
5015c6fd2807SJeff Garzik 
50165a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
5017c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5018c6fd2807SJeff Garzik 
501945711f1aSJens Axboe 	page = sg_page(qc->cursg);
502087260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
5021c6fd2807SJeff Garzik 
5022c6fd2807SJeff Garzik 	/* get the current page and offset */
5023c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5024c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5025c6fd2807SJeff Garzik 
5026c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5027c6fd2807SJeff Garzik 
5028c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5029c6fd2807SJeff Garzik 		unsigned long flags;
5030c6fd2807SJeff Garzik 
5031c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
5032c6fd2807SJeff Garzik 		local_irq_save(flags);
5033c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5034c6fd2807SJeff Garzik 
5035c6fd2807SJeff Garzik 		/* do the actual data transfer */
50365a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5037c6fd2807SJeff Garzik 
5038c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5039c6fd2807SJeff Garzik 		local_irq_restore(flags);
5040c6fd2807SJeff Garzik 	} else {
5041c6fd2807SJeff Garzik 		buf = page_address(page);
50425a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5043c6fd2807SJeff Garzik 	}
5044c6fd2807SJeff Garzik 
50455a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
50465a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
5047c6fd2807SJeff Garzik 
504887260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
504987260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5050c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5051c6fd2807SJeff Garzik 	}
5052c6fd2807SJeff Garzik }
5053c6fd2807SJeff Garzik 
5054c6fd2807SJeff Garzik /**
50555a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
5056c6fd2807SJeff Garzik  *	@qc: Command on going
5057c6fd2807SJeff Garzik  *
50585a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
5059c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
5060c6fd2807SJeff Garzik  *
5061c6fd2807SJeff Garzik  *	LOCKING:
5062c6fd2807SJeff Garzik  *	Inherited from caller.
5063c6fd2807SJeff Garzik  */
5064c6fd2807SJeff Garzik 
5065c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
5066c6fd2807SJeff Garzik {
5067c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
5068c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
5069c6fd2807SJeff Garzik 		unsigned int nsect;
5070c6fd2807SJeff Garzik 
5071c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
5072c6fd2807SJeff Garzik 
50735a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5074726f0785STejun Heo 			    qc->dev->multi_count);
5075c6fd2807SJeff Garzik 		while (nsect--)
5076c6fd2807SJeff Garzik 			ata_pio_sector(qc);
5077c6fd2807SJeff Garzik 	} else
5078c6fd2807SJeff Garzik 		ata_pio_sector(qc);
50794cc980b3SAlbert Lee 
50804cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
5081c6fd2807SJeff Garzik }
5082c6fd2807SJeff Garzik 
5083c6fd2807SJeff Garzik /**
5084c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
5085c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
5086c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
5087c6fd2807SJeff Garzik  *
5088c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
5089c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
5090c6fd2807SJeff Garzik  *
5091c6fd2807SJeff Garzik  *	LOCKING:
5092c6fd2807SJeff Garzik  *	caller.
5093c6fd2807SJeff Garzik  */
5094c6fd2807SJeff Garzik 
5095c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5096c6fd2807SJeff Garzik {
5097c6fd2807SJeff Garzik 	/* send SCSI cdb */
5098c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
5099c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
5100c6fd2807SJeff Garzik 
5101c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5102c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
5103c6fd2807SJeff Garzik 
5104c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
51050dc36888STejun Heo 	case ATAPI_PROT_PIO:
5106c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
5107c6fd2807SJeff Garzik 		break;
51080dc36888STejun Heo 	case ATAPI_PROT_NODATA:
5109c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5110c6fd2807SJeff Garzik 		break;
51110dc36888STejun Heo 	case ATAPI_PROT_DMA:
5112c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5113c6fd2807SJeff Garzik 		/* initiate bmdma */
5114c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
5115c6fd2807SJeff Garzik 		break;
5116c6fd2807SJeff Garzik 	}
5117c6fd2807SJeff Garzik }
5118c6fd2807SJeff Garzik 
5119c6fd2807SJeff Garzik /**
5120c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
5121c6fd2807SJeff Garzik  *	@qc: Command on going
5122c6fd2807SJeff Garzik  *	@bytes: number of bytes
5123c6fd2807SJeff Garzik  *
5124c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5125c6fd2807SJeff Garzik  *
5126c6fd2807SJeff Garzik  *	LOCKING:
5127c6fd2807SJeff Garzik  *	Inherited from caller.
5128c6fd2807SJeff Garzik  *
5129c6fd2807SJeff Garzik  */
5130140b5e59STejun Heo static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5131c6fd2807SJeff Garzik {
5132c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5133c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5134140b5e59STejun Heo 	struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5135140b5e59STejun Heo 	struct scatterlist *sg;
5136c6fd2807SJeff Garzik 	struct page *page;
5137c6fd2807SJeff Garzik 	unsigned char *buf;
5138c6fd2807SJeff Garzik 	unsigned int offset, count;
5139c6fd2807SJeff Garzik 
5140c6fd2807SJeff Garzik next_sg:
5141140b5e59STejun Heo 	sg = qc->cursg;
5142140b5e59STejun Heo 	if (unlikely(!sg)) {
5143c6fd2807SJeff Garzik 		/*
5144c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
5145c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
5146c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
5147c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
5148c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
5149c6fd2807SJeff Garzik 		 */
5150c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
5151c6fd2807SJeff Garzik 		unsigned int i;
5152c6fd2807SJeff Garzik 
5153140b5e59STejun Heo 		if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5154140b5e59STejun Heo 			ata_ehi_push_desc(ehi, "too much trailing data "
5155140b5e59STejun Heo 					  "buf=%u cur=%u bytes=%u",
5156140b5e59STejun Heo 					  qc->nbytes, qc->curbytes, bytes);
5157140b5e59STejun Heo 			return -1;
5158c6fd2807SJeff Garzik 		}
5159c6fd2807SJeff Garzik 
5160140b5e59STejun Heo 		 /* overflow is exptected for misc ATAPI commands */
5161140b5e59STejun Heo 		if (bytes && !atapi_qc_may_overflow(qc))
5162140b5e59STejun Heo 			ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5163140b5e59STejun Heo 				       "trailing data (cdb=%02x nbytes=%u)\n",
5164140b5e59STejun Heo 				       bytes, qc->cdb[0], qc->nbytes);
5165140b5e59STejun Heo 
5166140b5e59STejun Heo 		for (i = 0; i < (bytes + 1) / 2; i++)
5167140b5e59STejun Heo 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5168140b5e59STejun Heo 
5169140b5e59STejun Heo 		qc->curbytes += bytes;
5170140b5e59STejun Heo 
5171140b5e59STejun Heo 		return 0;
5172140b5e59STejun Heo 	}
5173c6fd2807SJeff Garzik 
517445711f1aSJens Axboe 	page = sg_page(sg);
5175c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5176c6fd2807SJeff Garzik 
5177c6fd2807SJeff Garzik 	/* get the current page and offset */
5178c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5179c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5180c6fd2807SJeff Garzik 
5181c6fd2807SJeff Garzik 	/* don't overrun current sg */
5182c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5183c6fd2807SJeff Garzik 
5184c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5185c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5186c6fd2807SJeff Garzik 
5187c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5188c6fd2807SJeff Garzik 
5189c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5190c6fd2807SJeff Garzik 		unsigned long flags;
5191c6fd2807SJeff Garzik 
5192c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5193c6fd2807SJeff Garzik 		local_irq_save(flags);
5194c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5195c6fd2807SJeff Garzik 
5196c6fd2807SJeff Garzik 		/* do the actual data transfer */
5197c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5198c6fd2807SJeff Garzik 
5199c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5200c6fd2807SJeff Garzik 		local_irq_restore(flags);
5201c6fd2807SJeff Garzik 	} else {
5202c6fd2807SJeff Garzik 		buf = page_address(page);
5203c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5204c6fd2807SJeff Garzik 	}
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik 	bytes -= count;
5207140b5e59STejun Heo 	if ((count & 1) && bytes)
5208140b5e59STejun Heo 		bytes--;
5209c6fd2807SJeff Garzik 	qc->curbytes += count;
5210c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5211c6fd2807SJeff Garzik 
5212c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
521387260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5214c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5215c6fd2807SJeff Garzik 	}
5216c6fd2807SJeff Garzik 
5217c6fd2807SJeff Garzik 	if (bytes)
5218c6fd2807SJeff Garzik 		goto next_sg;
5219140b5e59STejun Heo 
5220140b5e59STejun Heo 	return 0;
5221c6fd2807SJeff Garzik }
5222c6fd2807SJeff Garzik 
5223c6fd2807SJeff Garzik /**
5224c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5225c6fd2807SJeff Garzik  *	@qc: Command on going
5226c6fd2807SJeff Garzik  *
5227c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5228c6fd2807SJeff Garzik  *
5229c6fd2807SJeff Garzik  *	LOCKING:
5230c6fd2807SJeff Garzik  *	Inherited from caller.
5231c6fd2807SJeff Garzik  */
5232c6fd2807SJeff Garzik 
5233c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5234c6fd2807SJeff Garzik {
5235c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5236c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5237c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5238c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5239c6fd2807SJeff Garzik 
5240c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5241c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5242c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5243c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5244c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5245c6fd2807SJeff Garzik 	 */
5246c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5247c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5248c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5249c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5250c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5251c6fd2807SJeff Garzik 
5252c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
52530106372dSAlbert Lee 	if (unlikely(ireason & (1 << 0)))
5254c6fd2807SJeff Garzik 		goto err_out;
5255c6fd2807SJeff Garzik 
5256c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5257c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
52580106372dSAlbert Lee 	if (unlikely(do_write != i_write))
52590106372dSAlbert Lee 		goto err_out;
52600106372dSAlbert Lee 
52610106372dSAlbert Lee 	if (unlikely(!bytes))
5262c6fd2807SJeff Garzik 		goto err_out;
5263c6fd2807SJeff Garzik 
526444877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5265c6fd2807SJeff Garzik 
5266140b5e59STejun Heo 	if (__atapi_pio_bytes(qc, bytes))
5267140b5e59STejun Heo 		goto err_out;
52684cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5269c6fd2807SJeff Garzik 
5270c6fd2807SJeff Garzik 	return;
5271c6fd2807SJeff Garzik 
5272c6fd2807SJeff Garzik err_out:
5273c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5274c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5275c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5276c6fd2807SJeff Garzik }
5277c6fd2807SJeff Garzik 
5278c6fd2807SJeff Garzik /**
5279c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5280c6fd2807SJeff Garzik  *	@ap: the target ata_port
5281c6fd2807SJeff Garzik  *	@qc: qc on going
5282c6fd2807SJeff Garzik  *
5283c6fd2807SJeff Garzik  *	RETURNS:
5284c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5285c6fd2807SJeff Garzik  */
5286c6fd2807SJeff Garzik 
5287c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5288c6fd2807SJeff Garzik {
5289c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5290c6fd2807SJeff Garzik 		return 1;
5291c6fd2807SJeff Garzik 
5292c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5293c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5294c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5295c6fd2807SJeff Garzik 		    return 1;
5296c6fd2807SJeff Garzik 
5297405e66b3STejun Heo 		if (ata_is_atapi(qc->tf.protocol) &&
5298c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5299c6fd2807SJeff Garzik 			return 1;
5300c6fd2807SJeff Garzik 	}
5301c6fd2807SJeff Garzik 
5302c6fd2807SJeff Garzik 	return 0;
5303c6fd2807SJeff Garzik }
5304c6fd2807SJeff Garzik 
5305c6fd2807SJeff Garzik /**
5306c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5307c6fd2807SJeff Garzik  *	@qc: Command to complete
5308c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5309c6fd2807SJeff Garzik  *
5310c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5311c6fd2807SJeff Garzik  *
5312c6fd2807SJeff Garzik  *	LOCKING:
5313cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5314c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5315c6fd2807SJeff Garzik  */
5316c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5317c6fd2807SJeff Garzik {
5318c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5319c6fd2807SJeff Garzik 	unsigned long flags;
5320c6fd2807SJeff Garzik 
5321c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5322c6fd2807SJeff Garzik 		if (in_wq) {
5323c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5324c6fd2807SJeff Garzik 
5325cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5326cca3974eSJeff Garzik 			 * released.
5327c6fd2807SJeff Garzik 			 */
5328c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5329c6fd2807SJeff Garzik 			if (qc) {
5330c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
533183625006SAkira Iguchi 					ap->ops->irq_on(ap);
5332c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5333c6fd2807SJeff Garzik 				} else
5334c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5335c6fd2807SJeff Garzik 			}
5336c6fd2807SJeff Garzik 
5337c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5338c6fd2807SJeff Garzik 		} else {
5339c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5340c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5341c6fd2807SJeff Garzik 			else
5342c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5343c6fd2807SJeff Garzik 		}
5344c6fd2807SJeff Garzik 	} else {
5345c6fd2807SJeff Garzik 		if (in_wq) {
5346c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
534783625006SAkira Iguchi 			ap->ops->irq_on(ap);
5348c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5349c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5350c6fd2807SJeff Garzik 		} else
5351c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5352c6fd2807SJeff Garzik 	}
5353c6fd2807SJeff Garzik }
5354c6fd2807SJeff Garzik 
5355c6fd2807SJeff Garzik /**
5356c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5357c6fd2807SJeff Garzik  *	@ap: the target ata_port
5358c6fd2807SJeff Garzik  *	@qc: qc on going
5359c6fd2807SJeff Garzik  *	@status: current device status
5360c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5361c6fd2807SJeff Garzik  *
5362c6fd2807SJeff Garzik  *	RETURNS:
5363c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5364c6fd2807SJeff Garzik  */
5365c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5366c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5367c6fd2807SJeff Garzik {
5368c6fd2807SJeff Garzik 	unsigned long flags = 0;
5369c6fd2807SJeff Garzik 	int poll_next;
5370c6fd2807SJeff Garzik 
5371c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5372c6fd2807SJeff Garzik 
5373c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5374c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5375c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5376c6fd2807SJeff Garzik 	 */
5377c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5378c6fd2807SJeff Garzik 
5379c6fd2807SJeff Garzik fsm_start:
5380c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
538144877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5382c6fd2807SJeff Garzik 
5383c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5384c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5385c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5386c6fd2807SJeff Garzik 
5387c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5388c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5389c6fd2807SJeff Garzik 		 * takes over after sending the data.
5390c6fd2807SJeff Garzik 		 */
5391c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5392c6fd2807SJeff Garzik 
5393c6fd2807SJeff Garzik 		/* check device status */
5394c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5395c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5396c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5397c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5398c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5399c6fd2807SJeff Garzik 			else
5400c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5401c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5402c6fd2807SJeff Garzik 
5403c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5404c6fd2807SJeff Garzik 			goto fsm_start;
5405c6fd2807SJeff Garzik 		}
5406c6fd2807SJeff Garzik 
5407c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5408c6fd2807SJeff Garzik 		 * when it finds something wrong.
5409c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5410c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5411c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5412c6fd2807SJeff Garzik 		 */
5413c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
54142d3b8eeaSAlbert Lee 			/* Some ATAPI tape drives forget to clear the ERR bit
54152d3b8eeaSAlbert Lee 			 * when doing the next command (mostly request sense).
54162d3b8eeaSAlbert Lee 			 * We ignore ERR here to workaround and proceed sending
54172d3b8eeaSAlbert Lee 			 * the CDB.
54182d3b8eeaSAlbert Lee 			 */
54192d3b8eeaSAlbert Lee 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
54202d3b8eeaSAlbert Lee 				ata_port_printk(ap, KERN_WARNING,
54212d3b8eeaSAlbert Lee 						"DRQ=1 with device error, "
54222d3b8eeaSAlbert Lee 						"dev_stat 0x%X\n", status);
5423c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5424c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5425c6fd2807SJeff Garzik 				goto fsm_start;
5426c6fd2807SJeff Garzik 			}
54272d3b8eeaSAlbert Lee 		}
5428c6fd2807SJeff Garzik 
5429c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5430c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5431c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5432c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5433c6fd2807SJeff Garzik 		 */
5434c6fd2807SJeff Garzik 		if (in_wq)
5435c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5436c6fd2807SJeff Garzik 
5437c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5438c6fd2807SJeff Garzik 			/* PIO data out protocol.
5439c6fd2807SJeff Garzik 			 * send first data block.
5440c6fd2807SJeff Garzik 			 */
5441c6fd2807SJeff Garzik 
5442c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5443c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5444c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5445c6fd2807SJeff Garzik 			 */
5446c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5447c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5448c6fd2807SJeff Garzik 		} else
5449c6fd2807SJeff Garzik 			/* send CDB */
5450c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5451c6fd2807SJeff Garzik 
5452c6fd2807SJeff Garzik 		if (in_wq)
5453c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5454c6fd2807SJeff Garzik 
5455c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5456c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5457c6fd2807SJeff Garzik 		 */
5458c6fd2807SJeff Garzik 		break;
5459c6fd2807SJeff Garzik 
5460c6fd2807SJeff Garzik 	case HSM_ST:
5461c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
54620dc36888STejun Heo 		if (qc->tf.protocol == ATAPI_PROT_PIO) {
5463c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5464c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5465c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5466c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5467c6fd2807SJeff Garzik 				 */
5468c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5469c6fd2807SJeff Garzik 				goto fsm_start;
5470c6fd2807SJeff Garzik 			}
5471c6fd2807SJeff Garzik 
5472c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5473c6fd2807SJeff Garzik 			 * when it finds something wrong.
5474c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5475c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5476c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5477c6fd2807SJeff Garzik 			 */
5478c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
547944877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
548044877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
548144877b4eSTejun Heo 						status);
5482c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5483c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5484c6fd2807SJeff Garzik 				goto fsm_start;
5485c6fd2807SJeff Garzik 			}
5486c6fd2807SJeff Garzik 
5487c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5488c6fd2807SJeff Garzik 
5489c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5490c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5491c6fd2807SJeff Garzik 				goto fsm_start;
5492c6fd2807SJeff Garzik 
5493c6fd2807SJeff Garzik 		} else {
5494c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5495c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5496c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5497c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5498c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5499c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5500c6fd2807SJeff Garzik 				else
550155a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
550255a8e2c8STejun Heo 					 * Phantom devices also trigger this
550355a8e2c8STejun Heo 					 * condition.  Mark hint.
550455a8e2c8STejun Heo 					 */
550555a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
550655a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5507c6fd2807SJeff Garzik 
5508c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5509c6fd2807SJeff Garzik 				goto fsm_start;
5510c6fd2807SJeff Garzik 			}
5511c6fd2807SJeff Garzik 
5512c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5513c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5514c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5515c6fd2807SJeff Garzik 			 * block of junk data before changing the
5516c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5517c6fd2807SJeff Garzik 			 *
5518c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5519c6fd2807SJeff Garzik 			 * sense since the data block has been
5520c6fd2807SJeff Garzik 			 * transferred to the device.
5521c6fd2807SJeff Garzik 			 */
5522c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5523c6fd2807SJeff Garzik 				/* data might be corrputed */
5524c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5525c6fd2807SJeff Garzik 
5526c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5527c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5528c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5529c6fd2807SJeff Garzik 				}
5530c6fd2807SJeff Garzik 
5531c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5532c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5533c6fd2807SJeff Garzik 
5534c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5535c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5536c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5537c6fd2807SJeff Garzik 				 */
5538c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5539c6fd2807SJeff Garzik 				goto fsm_start;
5540c6fd2807SJeff Garzik 			}
5541c6fd2807SJeff Garzik 
5542c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5543c6fd2807SJeff Garzik 
5544c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5545c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5546c6fd2807SJeff Garzik 				/* all data read */
5547c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5548c6fd2807SJeff Garzik 				goto fsm_start;
5549c6fd2807SJeff Garzik 			}
5550c6fd2807SJeff Garzik 		}
5551c6fd2807SJeff Garzik 
5552c6fd2807SJeff Garzik 		poll_next = 1;
5553c6fd2807SJeff Garzik 		break;
5554c6fd2807SJeff Garzik 
5555c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5556c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5557c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5558c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5559c6fd2807SJeff Garzik 			goto fsm_start;
5560c6fd2807SJeff Garzik 		}
5561c6fd2807SJeff Garzik 
5562c6fd2807SJeff Garzik 		/* no more data to transfer */
5563c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
556444877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5565c6fd2807SJeff Garzik 
5566c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5567c6fd2807SJeff Garzik 
5568c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5569c6fd2807SJeff Garzik 
5570c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5571c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5572c6fd2807SJeff Garzik 
5573c6fd2807SJeff Garzik 		poll_next = 0;
5574c6fd2807SJeff Garzik 		break;
5575c6fd2807SJeff Garzik 
5576c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5577c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5578c6fd2807SJeff Garzik 		 * know what's wrong and recover
5579c6fd2807SJeff Garzik 		 */
5580c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5581c6fd2807SJeff Garzik 
5582c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5583c6fd2807SJeff Garzik 
5584c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5585c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5586c6fd2807SJeff Garzik 
5587c6fd2807SJeff Garzik 		poll_next = 0;
5588c6fd2807SJeff Garzik 		break;
5589c6fd2807SJeff Garzik 	default:
5590c6fd2807SJeff Garzik 		poll_next = 0;
5591c6fd2807SJeff Garzik 		BUG();
5592c6fd2807SJeff Garzik 	}
5593c6fd2807SJeff Garzik 
5594c6fd2807SJeff Garzik 	return poll_next;
5595c6fd2807SJeff Garzik }
5596c6fd2807SJeff Garzik 
559765f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5598c6fd2807SJeff Garzik {
559965f27f38SDavid Howells 	struct ata_port *ap =
560065f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
560165f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5602c6fd2807SJeff Garzik 	u8 status;
5603c6fd2807SJeff Garzik 	int poll_next;
5604c6fd2807SJeff Garzik 
5605c6fd2807SJeff Garzik fsm_start:
5606c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5607c6fd2807SJeff Garzik 
5608c6fd2807SJeff Garzik 	/*
5609c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5610c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5611c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5612c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5613c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5614c6fd2807SJeff Garzik 	 */
5615c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5616c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5617c6fd2807SJeff Garzik 		msleep(2);
5618c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5619c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5620442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
5621c6fd2807SJeff Garzik 			return;
5622c6fd2807SJeff Garzik 		}
5623c6fd2807SJeff Garzik 	}
5624c6fd2807SJeff Garzik 
5625c6fd2807SJeff Garzik 	/* move the HSM */
5626c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5627c6fd2807SJeff Garzik 
5628c6fd2807SJeff Garzik 	/* another command or interrupt handler
5629c6fd2807SJeff Garzik 	 * may be running at this point.
5630c6fd2807SJeff Garzik 	 */
5631c6fd2807SJeff Garzik 	if (poll_next)
5632c6fd2807SJeff Garzik 		goto fsm_start;
5633c6fd2807SJeff Garzik }
5634c6fd2807SJeff Garzik 
5635c6fd2807SJeff Garzik /**
5636c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5637c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5638c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5639c6fd2807SJeff Garzik  *
5640c6fd2807SJeff Garzik  *	LOCKING:
5641c6fd2807SJeff Garzik  *	None.
5642c6fd2807SJeff Garzik  */
5643c6fd2807SJeff Garzik 
5644c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5645c6fd2807SJeff Garzik {
5646c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5647c6fd2807SJeff Garzik 	unsigned int i;
5648c6fd2807SJeff Garzik 
5649c6fd2807SJeff Garzik 	/* no command while frozen */
5650c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5651c6fd2807SJeff Garzik 		return NULL;
5652c6fd2807SJeff Garzik 
5653c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5654c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5655c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5656c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5657c6fd2807SJeff Garzik 			break;
5658c6fd2807SJeff Garzik 		}
5659c6fd2807SJeff Garzik 
5660c6fd2807SJeff Garzik 	if (qc)
5661c6fd2807SJeff Garzik 		qc->tag = i;
5662c6fd2807SJeff Garzik 
5663c6fd2807SJeff Garzik 	return qc;
5664c6fd2807SJeff Garzik }
5665c6fd2807SJeff Garzik 
5666c6fd2807SJeff Garzik /**
5667c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5668c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5669c6fd2807SJeff Garzik  *
5670c6fd2807SJeff Garzik  *	LOCKING:
5671c6fd2807SJeff Garzik  *	None.
5672c6fd2807SJeff Garzik  */
5673c6fd2807SJeff Garzik 
5674c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5675c6fd2807SJeff Garzik {
56769af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5677c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5678c6fd2807SJeff Garzik 
5679c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5680c6fd2807SJeff Garzik 	if (qc) {
5681c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5682c6fd2807SJeff Garzik 		qc->ap = ap;
5683c6fd2807SJeff Garzik 		qc->dev = dev;
5684c6fd2807SJeff Garzik 
5685c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5686c6fd2807SJeff Garzik 	}
5687c6fd2807SJeff Garzik 
5688c6fd2807SJeff Garzik 	return qc;
5689c6fd2807SJeff Garzik }
5690c6fd2807SJeff Garzik 
5691c6fd2807SJeff Garzik /**
5692c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5693c6fd2807SJeff Garzik  *	@qc: Command to complete
5694c6fd2807SJeff Garzik  *
5695c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5696c6fd2807SJeff Garzik  *	in case something prevents using it.
5697c6fd2807SJeff Garzik  *
5698c6fd2807SJeff Garzik  *	LOCKING:
5699cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5700c6fd2807SJeff Garzik  */
5701c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5702c6fd2807SJeff Garzik {
5703c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5704c6fd2807SJeff Garzik 	unsigned int tag;
5705c6fd2807SJeff Garzik 
5706c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5707c6fd2807SJeff Garzik 
5708c6fd2807SJeff Garzik 	qc->flags = 0;
5709c6fd2807SJeff Garzik 	tag = qc->tag;
5710c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5711c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5712c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5713c6fd2807SJeff Garzik 	}
5714c6fd2807SJeff Garzik }
5715c6fd2807SJeff Garzik 
5716c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5717c6fd2807SJeff Garzik {
5718c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57199af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5720c6fd2807SJeff Garzik 
5721c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5722c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5723c6fd2807SJeff Garzik 
5724c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5725c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5726c6fd2807SJeff Garzik 
5727c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5728da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
57299af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5730da917d69STejun Heo 		if (!link->sactive)
5731da917d69STejun Heo 			ap->nr_active_links--;
5732da917d69STejun Heo 	} else {
57339af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5734da917d69STejun Heo 		ap->nr_active_links--;
5735da917d69STejun Heo 	}
5736da917d69STejun Heo 
5737da917d69STejun Heo 	/* clear exclusive status */
5738da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5739da917d69STejun Heo 		     ap->excl_link == link))
5740da917d69STejun Heo 		ap->excl_link = NULL;
5741c6fd2807SJeff Garzik 
5742c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5743c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5744c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5745c6fd2807SJeff Garzik 	 */
5746c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5747c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5748c6fd2807SJeff Garzik 
5749c6fd2807SJeff Garzik 	/* call completion callback */
5750c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5751c6fd2807SJeff Garzik }
5752c6fd2807SJeff Garzik 
575339599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
575439599a53STejun Heo {
575539599a53STejun Heo 	struct ata_port *ap = qc->ap;
575639599a53STejun Heo 
575739599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
57584742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
575939599a53STejun Heo }
576039599a53STejun Heo 
576100115e0fSTejun Heo static void ata_verify_xfer(struct ata_queued_cmd *qc)
576200115e0fSTejun Heo {
576300115e0fSTejun Heo 	struct ata_device *dev = qc->dev;
576400115e0fSTejun Heo 
576500115e0fSTejun Heo 	if (ata_tag_internal(qc->tag))
576600115e0fSTejun Heo 		return;
576700115e0fSTejun Heo 
576800115e0fSTejun Heo 	if (ata_is_nodata(qc->tf.protocol))
576900115e0fSTejun Heo 		return;
577000115e0fSTejun Heo 
577100115e0fSTejun Heo 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
577200115e0fSTejun Heo 		return;
577300115e0fSTejun Heo 
577400115e0fSTejun Heo 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
577500115e0fSTejun Heo }
577600115e0fSTejun Heo 
5777c6fd2807SJeff Garzik /**
5778c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5779c6fd2807SJeff Garzik  *	@qc: Command to complete
5780c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5781c6fd2807SJeff Garzik  *
5782c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5783c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5784c6fd2807SJeff Garzik  *
5785c6fd2807SJeff Garzik  *	LOCKING:
5786cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5787c6fd2807SJeff Garzik  */
5788c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5789c6fd2807SJeff Garzik {
5790c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5791c6fd2807SJeff Garzik 
5792c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5793c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5794c6fd2807SJeff Garzik 	 *
5795c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5796c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5797c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5798c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5799c6fd2807SJeff Garzik 	 *
5800c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5801c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5802c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5803c6fd2807SJeff Garzik 	 * taken care of.
5804c6fd2807SJeff Garzik 	 */
5805c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
58064dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
58074dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
58084dbfa39bSTejun Heo 
5809c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5810c6fd2807SJeff Garzik 
5811c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5812c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5813c6fd2807SJeff Garzik 
5814c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5815c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5816c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
581739599a53STejun Heo 				fill_result_tf(qc);
5818c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5819c6fd2807SJeff Garzik 				return;
5820c6fd2807SJeff Garzik 			}
5821c6fd2807SJeff Garzik 		}
5822c6fd2807SJeff Garzik 
5823c6fd2807SJeff Garzik 		/* read result TF if requested */
5824c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
582539599a53STejun Heo 			fill_result_tf(qc);
5826c6fd2807SJeff Garzik 
58274dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
58284dbfa39bSTejun Heo 		 * completion.
58294dbfa39bSTejun Heo 		 */
58304dbfa39bSTejun Heo 		switch (qc->tf.command) {
58314dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
58324dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
58334dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
58344dbfa39bSTejun Heo 				break;
58354dbfa39bSTejun Heo 			/* fall through */
58364dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
58374dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
58384dbfa39bSTejun Heo 			/* revalidate device */
58394dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
58404dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
58414dbfa39bSTejun Heo 			break;
5842054a5fbaSTejun Heo 
5843054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5844054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5845054a5fbaSTejun Heo 			break;
58464dbfa39bSTejun Heo 		}
58474dbfa39bSTejun Heo 
584800115e0fSTejun Heo 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
584900115e0fSTejun Heo 			ata_verify_xfer(qc);
585000115e0fSTejun Heo 
5851c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5852c6fd2807SJeff Garzik 	} else {
5853c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5854c6fd2807SJeff Garzik 			return;
5855c6fd2807SJeff Garzik 
5856c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5857c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
585839599a53STejun Heo 			fill_result_tf(qc);
5859c6fd2807SJeff Garzik 
5860c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5861c6fd2807SJeff Garzik 	}
5862c6fd2807SJeff Garzik }
5863c6fd2807SJeff Garzik 
5864c6fd2807SJeff Garzik /**
5865c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5866c6fd2807SJeff Garzik  *	@ap: port in question
5867c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5868c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5869c6fd2807SJeff Garzik  *
5870c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5871c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5872c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5873c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5874c6fd2807SJeff Garzik  *
5875c6fd2807SJeff Garzik  *	LOCKING:
5876cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5877c6fd2807SJeff Garzik  *
5878c6fd2807SJeff Garzik  *	RETURNS:
5879c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5880c6fd2807SJeff Garzik  */
5881c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5882c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5883c6fd2807SJeff Garzik {
5884c6fd2807SJeff Garzik 	int nr_done = 0;
5885c6fd2807SJeff Garzik 	u32 done_mask;
5886c6fd2807SJeff Garzik 	int i;
5887c6fd2807SJeff Garzik 
5888c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5889c6fd2807SJeff Garzik 
5890c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5891c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5892c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5893c6fd2807SJeff Garzik 		return -EINVAL;
5894c6fd2807SJeff Garzik 	}
5895c6fd2807SJeff Garzik 
5896c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5897c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5898c6fd2807SJeff Garzik 
5899c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5900c6fd2807SJeff Garzik 			continue;
5901c6fd2807SJeff Garzik 
5902c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5903c6fd2807SJeff Garzik 			if (finish_qc)
5904c6fd2807SJeff Garzik 				finish_qc(qc);
5905c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5906c6fd2807SJeff Garzik 			nr_done++;
5907c6fd2807SJeff Garzik 		}
5908c6fd2807SJeff Garzik 	}
5909c6fd2807SJeff Garzik 
5910c6fd2807SJeff Garzik 	return nr_done;
5911c6fd2807SJeff Garzik }
5912c6fd2807SJeff Garzik 
5913c6fd2807SJeff Garzik /**
5914c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5915c6fd2807SJeff Garzik  *	@qc: command to issue to device
5916c6fd2807SJeff Garzik  *
5917c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5918c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5919c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5920c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5921c6fd2807SJeff Garzik  *
5922c6fd2807SJeff Garzik  *	LOCKING:
5923cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5924c6fd2807SJeff Garzik  */
5925c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5926c6fd2807SJeff Garzik {
5927c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
59289af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5929405e66b3STejun Heo 	u8 prot = qc->tf.protocol;
5930c6fd2807SJeff Garzik 
5931c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5932c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5933c6fd2807SJeff Garzik 	 * request ATAPI sense.
5934c6fd2807SJeff Garzik 	 */
59359af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5936c6fd2807SJeff Garzik 
59371973a023STejun Heo 	if (ata_is_ncq(prot)) {
59389af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5939da917d69STejun Heo 
5940da917d69STejun Heo 		if (!link->sactive)
5941da917d69STejun Heo 			ap->nr_active_links++;
59429af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5943c6fd2807SJeff Garzik 	} else {
59449af5c9c9STejun Heo 		WARN_ON(link->sactive);
5945da917d69STejun Heo 
5946da917d69STejun Heo 		ap->nr_active_links++;
59479af5c9c9STejun Heo 		link->active_tag = qc->tag;
5948c6fd2807SJeff Garzik 	}
5949c6fd2807SJeff Garzik 
5950c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5951c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5952c6fd2807SJeff Garzik 
5953f92a2636STejun Heo 	/* We guarantee to LLDs that they will have at least one
5954f92a2636STejun Heo 	 * non-zero sg if the command is a data command.
5955f92a2636STejun Heo 	 */
5956ff2aeb1eSTejun Heo 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5957f92a2636STejun Heo 
59580bcc65adSTejun Heo 	/* ata_sg_setup() may update nbytes */
59590bcc65adSTejun Heo 	qc->raw_nbytes = qc->nbytes;
59600bcc65adSTejun Heo 
5961405e66b3STejun Heo 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5962f92a2636STejun Heo 				 (ap->flags & ATA_FLAG_PIO_DMA)))
5963c6fd2807SJeff Garzik 		if (ata_sg_setup(qc))
5964c6fd2807SJeff Garzik 			goto sg_err;
5965c6fd2807SJeff Garzik 
5966054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
5967054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5968054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
5969054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5970054a5fbaSTejun Heo 		ata_link_abort(link);
5971054a5fbaSTejun Heo 		return;
5972054a5fbaSTejun Heo 	}
5973054a5fbaSTejun Heo 
5974c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5975c6fd2807SJeff Garzik 
5976c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5977c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5978c6fd2807SJeff Garzik 		goto err;
5979c6fd2807SJeff Garzik 	return;
5980c6fd2807SJeff Garzik 
5981c6fd2807SJeff Garzik sg_err:
5982c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5983c6fd2807SJeff Garzik err:
5984c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5985c6fd2807SJeff Garzik }
5986c6fd2807SJeff Garzik 
5987c6fd2807SJeff Garzik /**
5988c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5989c6fd2807SJeff Garzik  *	@qc: command to issue to device
5990c6fd2807SJeff Garzik  *
5991c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5992c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5993c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5994c6fd2807SJeff Garzik  *	is slightly different.
5995c6fd2807SJeff Garzik  *
5996c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5997c6fd2807SJeff Garzik  *
5998c6fd2807SJeff Garzik  *	LOCKING:
5999cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6000c6fd2807SJeff Garzik  *
6001c6fd2807SJeff Garzik  *	RETURNS:
6002c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
6003c6fd2807SJeff Garzik  */
6004c6fd2807SJeff Garzik 
6005c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6006c6fd2807SJeff Garzik {
6007c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
6008c6fd2807SJeff Garzik 
6009c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
6010c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
6011c6fd2807SJeff Garzik 	 */
6012c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
6013c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
6014c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
6015e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
60160dc36888STejun Heo 		case ATAPI_PROT_PIO:
60170dc36888STejun Heo 		case ATAPI_PROT_NODATA:
6018c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
6019c6fd2807SJeff Garzik 			break;
60200dc36888STejun Heo 		case ATAPI_PROT_DMA:
6021c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6022c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
6023c6fd2807SJeff Garzik 				BUG();
6024c6fd2807SJeff Garzik 			break;
6025c6fd2807SJeff Garzik 		default:
6026c6fd2807SJeff Garzik 			break;
6027c6fd2807SJeff Garzik 		}
6028c6fd2807SJeff Garzik 	}
6029c6fd2807SJeff Garzik 
6030c6fd2807SJeff Garzik 	/* select the device */
6031c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
6032c6fd2807SJeff Garzik 
6033c6fd2807SJeff Garzik 	/* start the command */
6034c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
6035c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
6036c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6037c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6038c6fd2807SJeff Garzik 
6039c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6040c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6041c6fd2807SJeff Garzik 
6042c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6043442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6044c6fd2807SJeff Garzik 
6045c6fd2807SJeff Garzik 		break;
6046c6fd2807SJeff Garzik 
6047c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
6048c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6049c6fd2807SJeff Garzik 
6050c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6051c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6052c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
6053c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6054c6fd2807SJeff Garzik 		break;
6055c6fd2807SJeff Garzik 
6056c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
6057c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6058c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6059c6fd2807SJeff Garzik 
6060c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6061c6fd2807SJeff Garzik 
6062c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
6063c6fd2807SJeff Garzik 			/* PIO data out protocol */
6064c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
6065442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6066c6fd2807SJeff Garzik 
6067c6fd2807SJeff Garzik 			/* always send first data block using
6068c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
6069c6fd2807SJeff Garzik 			 */
6070c6fd2807SJeff Garzik 		} else {
6071c6fd2807SJeff Garzik 			/* PIO data in protocol */
6072c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
6073c6fd2807SJeff Garzik 
6074c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
6075442eacc3SJeff Garzik 				ata_pio_queue_task(ap, qc, 0);
6076c6fd2807SJeff Garzik 
6077c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
6078c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
6079c6fd2807SJeff Garzik 			 */
6080c6fd2807SJeff Garzik 		}
6081c6fd2807SJeff Garzik 
6082c6fd2807SJeff Garzik 		break;
6083c6fd2807SJeff Garzik 
60840dc36888STejun Heo 	case ATAPI_PROT_PIO:
60850dc36888STejun Heo 	case ATAPI_PROT_NODATA:
6086c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6087c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6088c6fd2807SJeff Garzik 
6089c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6090c6fd2807SJeff Garzik 
6091c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6092c6fd2807SJeff Garzik 
6093c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6094c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6095c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
6096442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6097c6fd2807SJeff Garzik 		break;
6098c6fd2807SJeff Garzik 
60990dc36888STejun Heo 	case ATAPI_PROT_DMA:
6100c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6101c6fd2807SJeff Garzik 
6102c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6103c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6104c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6105c6fd2807SJeff Garzik 
6106c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6107c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6108442eacc3SJeff Garzik 			ata_pio_queue_task(ap, qc, 0);
6109c6fd2807SJeff Garzik 		break;
6110c6fd2807SJeff Garzik 
6111c6fd2807SJeff Garzik 	default:
6112c6fd2807SJeff Garzik 		WARN_ON(1);
6113c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
6114c6fd2807SJeff Garzik 	}
6115c6fd2807SJeff Garzik 
6116c6fd2807SJeff Garzik 	return 0;
6117c6fd2807SJeff Garzik }
6118c6fd2807SJeff Garzik 
6119c6fd2807SJeff Garzik /**
6120c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
6121c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
6122c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
6123c6fd2807SJeff Garzik  *
6124c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
6125c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
6126c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
6127c6fd2807SJeff Garzik  *
6128c6fd2807SJeff Garzik  *	LOCKING:
6129cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6130c6fd2807SJeff Garzik  *
6131c6fd2807SJeff Garzik  *	RETURNS:
6132c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
6133c6fd2807SJeff Garzik  */
6134c6fd2807SJeff Garzik 
6135c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
6136c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
6137c6fd2807SJeff Garzik {
61389af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
6139c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
6140c6fd2807SJeff Garzik 
6141c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
614244877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6143c6fd2807SJeff Garzik 
6144c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
6145c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
6146c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
6147c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
6148c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6149c6fd2807SJeff Garzik 		 */
6150c6fd2807SJeff Garzik 
6151c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6152405e66b3STejun Heo 		 * The flag was turned on only for atapi devices.  No
6153405e66b3STejun Heo 		 * need to check ata_is_atapi(qc->tf.protocol) again.
6154c6fd2807SJeff Garzik 		 */
6155c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6156c6fd2807SJeff Garzik 			goto idle_irq;
6157c6fd2807SJeff Garzik 		break;
6158c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6159c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
61600dc36888STejun Heo 		    qc->tf.protocol == ATAPI_PROT_DMA) {
6161c6fd2807SJeff Garzik 			/* check status of DMA engine */
6162c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
616344877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
616444877b4eSTejun Heo 				ap->print_id, host_stat);
6165c6fd2807SJeff Garzik 
6166c6fd2807SJeff Garzik 			/* if it's not our irq... */
6167c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6168c6fd2807SJeff Garzik 				goto idle_irq;
6169c6fd2807SJeff Garzik 
6170c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6171c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6172c6fd2807SJeff Garzik 
6173c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6174c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6175c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6176c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6177c6fd2807SJeff Garzik 			}
6178c6fd2807SJeff Garzik 		}
6179c6fd2807SJeff Garzik 		break;
6180c6fd2807SJeff Garzik 	case HSM_ST:
6181c6fd2807SJeff Garzik 		break;
6182c6fd2807SJeff Garzik 	default:
6183c6fd2807SJeff Garzik 		goto idle_irq;
6184c6fd2807SJeff Garzik 	}
6185c6fd2807SJeff Garzik 
6186c6fd2807SJeff Garzik 	/* check altstatus */
6187c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6188c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6189c6fd2807SJeff Garzik 		goto idle_irq;
6190c6fd2807SJeff Garzik 
6191c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6192c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6193c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6194c6fd2807SJeff Garzik 		goto idle_irq;
6195c6fd2807SJeff Garzik 
6196c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6197c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6198c6fd2807SJeff Garzik 
6199c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6200ea54763fSTejun Heo 
6201ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
62020dc36888STejun Heo 				       qc->tf.protocol == ATAPI_PROT_DMA))
6203ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6204ea54763fSTejun Heo 
6205c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6206c6fd2807SJeff Garzik 
6207c6fd2807SJeff Garzik idle_irq:
6208c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6209c6fd2807SJeff Garzik 
6210c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6211c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
62126d32d30fSJeff Garzik 		ata_chk_status(ap);
62136d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6214c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6215c6fd2807SJeff Garzik 		return 1;
6216c6fd2807SJeff Garzik 	}
6217c6fd2807SJeff Garzik #endif
6218c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6219c6fd2807SJeff Garzik }
6220c6fd2807SJeff Garzik 
6221c6fd2807SJeff Garzik /**
6222c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6223c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6224cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6225c6fd2807SJeff Garzik  *
6226c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6227c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6228c6fd2807SJeff Garzik  *
6229c6fd2807SJeff Garzik  *	LOCKING:
6230cca3974eSJeff Garzik  *	Obtains host lock during operation.
6231c6fd2807SJeff Garzik  *
6232c6fd2807SJeff Garzik  *	RETURNS:
6233c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6234c6fd2807SJeff Garzik  */
6235c6fd2807SJeff Garzik 
62367d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6237c6fd2807SJeff Garzik {
6238cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6239c6fd2807SJeff Garzik 	unsigned int i;
6240c6fd2807SJeff Garzik 	unsigned int handled = 0;
6241c6fd2807SJeff Garzik 	unsigned long flags;
6242c6fd2807SJeff Garzik 
6243c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6244cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6245c6fd2807SJeff Garzik 
6246cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6247c6fd2807SJeff Garzik 		struct ata_port *ap;
6248c6fd2807SJeff Garzik 
6249cca3974eSJeff Garzik 		ap = host->ports[i];
6250c6fd2807SJeff Garzik 		if (ap &&
6251c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6252c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6253c6fd2807SJeff Garzik 
62549af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6255c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6256c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6257c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6258c6fd2807SJeff Garzik 		}
6259c6fd2807SJeff Garzik 	}
6260c6fd2807SJeff Garzik 
6261cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6262c6fd2807SJeff Garzik 
6263c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6264c6fd2807SJeff Garzik }
6265c6fd2807SJeff Garzik 
6266c6fd2807SJeff Garzik /**
6267c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6268936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6269c6fd2807SJeff Garzik  *
6270936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6271c6fd2807SJeff Garzik  *
6272c6fd2807SJeff Garzik  *	LOCKING:
6273c6fd2807SJeff Garzik  *	None.
6274c6fd2807SJeff Garzik  *
6275c6fd2807SJeff Garzik  *	RETURNS:
6276c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6277c6fd2807SJeff Garzik  */
6278936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6279c6fd2807SJeff Garzik {
6280936fd732STejun Heo 	struct ata_port *ap = link->ap;
6281936fd732STejun Heo 
6282a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6283c6fd2807SJeff Garzik }
6284c6fd2807SJeff Garzik 
6285c6fd2807SJeff Garzik /**
6286c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6287936fd732STejun Heo  *	@link: ATA link to read SCR for
6288c6fd2807SJeff Garzik  *	@reg: SCR to read
6289c6fd2807SJeff Garzik  *	@val: Place to store read value
6290c6fd2807SJeff Garzik  *
6291936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6292633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6293633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6294c6fd2807SJeff Garzik  *
6295c6fd2807SJeff Garzik  *	LOCKING:
6296633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6297c6fd2807SJeff Garzik  *
6298c6fd2807SJeff Garzik  *	RETURNS:
6299c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6300c6fd2807SJeff Garzik  */
6301936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6302c6fd2807SJeff Garzik {
6303633273a3STejun Heo 	if (ata_is_host_link(link)) {
6304936fd732STejun Heo 		struct ata_port *ap = link->ap;
6305936fd732STejun Heo 
6306936fd732STejun Heo 		if (sata_scr_valid(link))
6307da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6308c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6309c6fd2807SJeff Garzik 	}
6310c6fd2807SJeff Garzik 
6311633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6312633273a3STejun Heo }
6313633273a3STejun Heo 
6314c6fd2807SJeff Garzik /**
6315c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6316936fd732STejun Heo  *	@link: ATA link to write SCR for
6317c6fd2807SJeff Garzik  *	@reg: SCR to write
6318c6fd2807SJeff Garzik  *	@val: value to write
6319c6fd2807SJeff Garzik  *
6320936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6321633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6322633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6323c6fd2807SJeff Garzik  *
6324c6fd2807SJeff Garzik  *	LOCKING:
6325633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6326c6fd2807SJeff Garzik  *
6327c6fd2807SJeff Garzik  *	RETURNS:
6328c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6329c6fd2807SJeff Garzik  */
6330936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6331c6fd2807SJeff Garzik {
6332633273a3STejun Heo 	if (ata_is_host_link(link)) {
6333936fd732STejun Heo 		struct ata_port *ap = link->ap;
6334936fd732STejun Heo 
6335936fd732STejun Heo 		if (sata_scr_valid(link))
6336da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6337c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6338c6fd2807SJeff Garzik 	}
6339c6fd2807SJeff Garzik 
6340633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6341633273a3STejun Heo }
6342633273a3STejun Heo 
6343c6fd2807SJeff Garzik /**
6344c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6345936fd732STejun Heo  *	@link: ATA link to write SCR for
6346c6fd2807SJeff Garzik  *	@reg: SCR to write
6347c6fd2807SJeff Garzik  *	@val: value to write
6348c6fd2807SJeff Garzik  *
6349c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6350c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6351c6fd2807SJeff Garzik  *
6352c6fd2807SJeff Garzik  *	LOCKING:
6353633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6354c6fd2807SJeff Garzik  *
6355c6fd2807SJeff Garzik  *	RETURNS:
6356c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6357c6fd2807SJeff Garzik  */
6358936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6359c6fd2807SJeff Garzik {
6360633273a3STejun Heo 	if (ata_is_host_link(link)) {
6361936fd732STejun Heo 		struct ata_port *ap = link->ap;
6362da3dbb17STejun Heo 		int rc;
6363da3dbb17STejun Heo 
6364936fd732STejun Heo 		if (sata_scr_valid(link)) {
6365da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6366da3dbb17STejun Heo 			if (rc == 0)
6367da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6368da3dbb17STejun Heo 			return rc;
6369c6fd2807SJeff Garzik 		}
6370c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6371c6fd2807SJeff Garzik 	}
6372c6fd2807SJeff Garzik 
6373633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6374633273a3STejun Heo }
6375633273a3STejun Heo 
6376c6fd2807SJeff Garzik /**
6377936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6378936fd732STejun Heo  *	@link: ATA link to test
6379c6fd2807SJeff Garzik  *
6380936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6381936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6382936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6383c6fd2807SJeff Garzik  *
6384c6fd2807SJeff Garzik  *	LOCKING:
6385c6fd2807SJeff Garzik  *	None.
6386c6fd2807SJeff Garzik  *
6387c6fd2807SJeff Garzik  *	RETURNS:
6388c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6389c6fd2807SJeff Garzik  */
6390936fd732STejun Heo int ata_link_online(struct ata_link *link)
6391c6fd2807SJeff Garzik {
6392c6fd2807SJeff Garzik 	u32 sstatus;
6393c6fd2807SJeff Garzik 
6394936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6395936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6396c6fd2807SJeff Garzik 		return 1;
6397c6fd2807SJeff Garzik 	return 0;
6398c6fd2807SJeff Garzik }
6399c6fd2807SJeff Garzik 
6400c6fd2807SJeff Garzik /**
6401936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6402936fd732STejun Heo  *	@link: ATA link to test
6403c6fd2807SJeff Garzik  *
6404936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6405936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6406936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6407c6fd2807SJeff Garzik  *
6408c6fd2807SJeff Garzik  *	LOCKING:
6409c6fd2807SJeff Garzik  *	None.
6410c6fd2807SJeff Garzik  *
6411c6fd2807SJeff Garzik  *	RETURNS:
6412c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6413c6fd2807SJeff Garzik  */
6414936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6415c6fd2807SJeff Garzik {
6416c6fd2807SJeff Garzik 	u32 sstatus;
6417c6fd2807SJeff Garzik 
6418936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6419936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6420c6fd2807SJeff Garzik 		return 1;
6421c6fd2807SJeff Garzik 	return 0;
6422c6fd2807SJeff Garzik }
6423c6fd2807SJeff Garzik 
6424c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6425c6fd2807SJeff Garzik {
6426c6fd2807SJeff Garzik 	unsigned int err_mask;
6427c6fd2807SJeff Garzik 	u8 cmd;
6428c6fd2807SJeff Garzik 
6429c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6430c6fd2807SJeff Garzik 		return 0;
6431c6fd2807SJeff Garzik 
64326fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6433c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6434c6fd2807SJeff Garzik 	else
6435c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6436c6fd2807SJeff Garzik 
64374f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
64384f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
64394f34337bSAlan Cox 	   a further flush command to continue the writeback until it
64404f34337bSAlan Cox 	   does not error */
6441c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6442c6fd2807SJeff Garzik 	if (err_mask) {
6443c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6444c6fd2807SJeff Garzik 		return -EIO;
6445c6fd2807SJeff Garzik 	}
6446c6fd2807SJeff Garzik 
6447c6fd2807SJeff Garzik 	return 0;
6448c6fd2807SJeff Garzik }
6449c6fd2807SJeff Garzik 
64506ffa01d8STejun Heo #ifdef CONFIG_PM
6451cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6452cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6453cca3974eSJeff Garzik 			       int wait)
6454c6fd2807SJeff Garzik {
6455c6fd2807SJeff Garzik 	unsigned long flags;
6456c6fd2807SJeff Garzik 	int i, rc;
6457c6fd2807SJeff Garzik 
6458cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6459cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6460e3667ebfSTejun Heo 		struct ata_link *link;
6461c6fd2807SJeff Garzik 
6462c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6463c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6464c6fd2807SJeff Garzik 		 */
6465c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6466c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6467c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6468c6fd2807SJeff Garzik 		}
6469c6fd2807SJeff Garzik 
6470c6fd2807SJeff Garzik 		/* request PM ops to EH */
6471c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6472c6fd2807SJeff Garzik 
6473c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6474c6fd2807SJeff Garzik 		if (wait) {
6475c6fd2807SJeff Garzik 			rc = 0;
6476c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6477c6fd2807SJeff Garzik 		}
6478c6fd2807SJeff Garzik 
6479c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6480e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6481e3667ebfSTejun Heo 			link->eh_info.action |= action;
6482e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6483e3667ebfSTejun Heo 		}
6484c6fd2807SJeff Garzik 
6485c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6486c6fd2807SJeff Garzik 
6487c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6488c6fd2807SJeff Garzik 
6489c6fd2807SJeff Garzik 		/* wait and check result */
6490c6fd2807SJeff Garzik 		if (wait) {
6491c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6492c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6493c6fd2807SJeff Garzik 			if (rc)
6494c6fd2807SJeff Garzik 				return rc;
6495c6fd2807SJeff Garzik 		}
6496c6fd2807SJeff Garzik 	}
6497c6fd2807SJeff Garzik 
6498c6fd2807SJeff Garzik 	return 0;
6499c6fd2807SJeff Garzik }
6500c6fd2807SJeff Garzik 
6501c6fd2807SJeff Garzik /**
6502cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6503cca3974eSJeff Garzik  *	@host: host to suspend
6504c6fd2807SJeff Garzik  *	@mesg: PM message
6505c6fd2807SJeff Garzik  *
6506cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6507c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6508c6fd2807SJeff Garzik  *	to finish.
6509c6fd2807SJeff Garzik  *
6510c6fd2807SJeff Garzik  *	LOCKING:
6511c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6512c6fd2807SJeff Garzik  *
6513c6fd2807SJeff Garzik  *	RETURNS:
6514c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6515c6fd2807SJeff Garzik  */
6516cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6517c6fd2807SJeff Garzik {
65189666f400STejun Heo 	int rc;
6519c6fd2807SJeff Garzik 
6520ca77329fSKristen Carlson Accardi 	/*
6521ca77329fSKristen Carlson Accardi 	 * disable link pm on all ports before requesting
6522ca77329fSKristen Carlson Accardi 	 * any pm activity
6523ca77329fSKristen Carlson Accardi 	 */
6524ca77329fSKristen Carlson Accardi 	ata_lpm_enable(host);
6525ca77329fSKristen Carlson Accardi 
6526cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
65279666f400STejun Heo 	if (rc == 0)
6528cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6529c6fd2807SJeff Garzik 	return rc;
6530c6fd2807SJeff Garzik }
6531c6fd2807SJeff Garzik 
6532c6fd2807SJeff Garzik /**
6533cca3974eSJeff Garzik  *	ata_host_resume - resume host
6534cca3974eSJeff Garzik  *	@host: host to resume
6535c6fd2807SJeff Garzik  *
6536cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6537c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6538c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6539c6fd2807SJeff Garzik  *
6540c6fd2807SJeff Garzik  *	LOCKING:
6541c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6542c6fd2807SJeff Garzik  */
6543cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6544c6fd2807SJeff Garzik {
6545cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6546c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6547cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6548ca77329fSKristen Carlson Accardi 
6549ca77329fSKristen Carlson Accardi 	/* reenable link pm */
6550ca77329fSKristen Carlson Accardi 	ata_lpm_disable(host);
6551c6fd2807SJeff Garzik }
65526ffa01d8STejun Heo #endif
6553c6fd2807SJeff Garzik 
6554c6fd2807SJeff Garzik /**
6555c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6556c6fd2807SJeff Garzik  *	@ap: Port to initialize
6557c6fd2807SJeff Garzik  *
6558c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6559c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6560c6fd2807SJeff Garzik  *
6561c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6562c6fd2807SJeff Garzik  *
6563c6fd2807SJeff Garzik  *	LOCKING:
6564c6fd2807SJeff Garzik  *	Inherited from caller.
6565c6fd2807SJeff Garzik  */
6566c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6567c6fd2807SJeff Garzik {
6568c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6569c6fd2807SJeff Garzik 	int rc;
6570c6fd2807SJeff Garzik 
6571f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6572f0d36efdSTejun Heo 				      GFP_KERNEL);
6573c6fd2807SJeff Garzik 	if (!ap->prd)
6574c6fd2807SJeff Garzik 		return -ENOMEM;
6575c6fd2807SJeff Garzik 
6576c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6577f0d36efdSTejun Heo 	if (rc)
6578c6fd2807SJeff Garzik 		return rc;
6579c6fd2807SJeff Garzik 
6580f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6581f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6582c6fd2807SJeff Garzik 	return 0;
6583c6fd2807SJeff Garzik }
6584c6fd2807SJeff Garzik 
6585c6fd2807SJeff Garzik /**
6586c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6587c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6588c6fd2807SJeff Garzik  *
6589c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6590c6fd2807SJeff Garzik  *
6591c6fd2807SJeff Garzik  *	LOCKING:
6592c6fd2807SJeff Garzik  *	Inherited from caller.
6593c6fd2807SJeff Garzik  */
6594c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6595c6fd2807SJeff Garzik {
65969af5c9c9STejun Heo 	struct ata_link *link = dev->link;
65979af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6598c6fd2807SJeff Garzik 	unsigned long flags;
6599c6fd2807SJeff Garzik 
6600c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
66019af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66029af5c9c9STejun Heo 	link->sata_spd = 0;
6603c6fd2807SJeff Garzik 
6604c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6605c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6606cca3974eSJeff Garzik 	 * host lock.
6607c6fd2807SJeff Garzik 	 */
6608c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6609c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
66103dcc323fSTejun Heo 	dev->horkage = 0;
6611c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6612c6fd2807SJeff Garzik 
6613c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6614c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6615c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6616c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6617c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6618c6fd2807SJeff Garzik }
6619c6fd2807SJeff Garzik 
6620c6fd2807SJeff Garzik /**
66214fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
66224fb37a25STejun Heo  *	@ap: ATA port link is attached to
66234fb37a25STejun Heo  *	@link: Link structure to initialize
66248989805dSTejun Heo  *	@pmp: Port multiplier port number
66254fb37a25STejun Heo  *
66264fb37a25STejun Heo  *	Initialize @link.
66274fb37a25STejun Heo  *
66284fb37a25STejun Heo  *	LOCKING:
66294fb37a25STejun Heo  *	Kernel thread context (may sleep)
66304fb37a25STejun Heo  */
6631fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
66324fb37a25STejun Heo {
66334fb37a25STejun Heo 	int i;
66344fb37a25STejun Heo 
66354fb37a25STejun Heo 	/* clear everything except for devices */
66364fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
66374fb37a25STejun Heo 
66384fb37a25STejun Heo 	link->ap = ap;
66398989805dSTejun Heo 	link->pmp = pmp;
66404fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
66414fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
66424fb37a25STejun Heo 
66434fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
66444fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
66454fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
66464fb37a25STejun Heo 
66474fb37a25STejun Heo 		dev->link = link;
66484fb37a25STejun Heo 		dev->devno = dev - link->device;
66494fb37a25STejun Heo 		ata_dev_init(dev);
66504fb37a25STejun Heo 	}
66514fb37a25STejun Heo }
66524fb37a25STejun Heo 
66534fb37a25STejun Heo /**
66544fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
66554fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
66564fb37a25STejun Heo  *
66574fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
66584fb37a25STejun Heo  *	configured value.
66594fb37a25STejun Heo  *
66604fb37a25STejun Heo  *	LOCKING:
66614fb37a25STejun Heo  *	Kernel thread context (may sleep).
66624fb37a25STejun Heo  *
66634fb37a25STejun Heo  *	RETURNS:
66644fb37a25STejun Heo  *	0 on success, -errno on failure.
66654fb37a25STejun Heo  */
6666fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
66674fb37a25STejun Heo {
66684fb37a25STejun Heo 	u32 scontrol, spd;
66694fb37a25STejun Heo 	int rc;
66704fb37a25STejun Heo 
66714fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
66724fb37a25STejun Heo 	if (rc)
66734fb37a25STejun Heo 		return rc;
66744fb37a25STejun Heo 
66754fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
66764fb37a25STejun Heo 	if (spd)
66774fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
66784fb37a25STejun Heo 
66794fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66804fb37a25STejun Heo 
66814fb37a25STejun Heo 	return 0;
66824fb37a25STejun Heo }
66834fb37a25STejun Heo 
66844fb37a25STejun Heo /**
6685f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6686f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6687c6fd2807SJeff Garzik  *
6688f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6689f3187195STejun Heo  *
6690f3187195STejun Heo  *	RETURNS:
6691f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6692c6fd2807SJeff Garzik  *
6693c6fd2807SJeff Garzik  *	LOCKING:
6694f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6695c6fd2807SJeff Garzik  */
6696f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6697c6fd2807SJeff Garzik {
6698f3187195STejun Heo 	struct ata_port *ap;
6699c6fd2807SJeff Garzik 
6700f3187195STejun Heo 	DPRINTK("ENTER\n");
6701f3187195STejun Heo 
6702f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6703f3187195STejun Heo 	if (!ap)
6704f3187195STejun Heo 		return NULL;
6705f3187195STejun Heo 
6706f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6707cca3974eSJeff Garzik 	ap->lock = &host->lock;
6708c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6709f3187195STejun Heo 	ap->print_id = -1;
6710c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6711cca3974eSJeff Garzik 	ap->host = host;
6712f3187195STejun Heo 	ap->dev = host->dev;
6713c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6714c6fd2807SJeff Garzik 
6715c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6716c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6717c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6718c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6719c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6720c6fd2807SJeff Garzik #else
6721c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6722c6fd2807SJeff Garzik #endif
6723c6fd2807SJeff Garzik 
6724442eacc3SJeff Garzik 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
672565f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
672665f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6727c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6728c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
67295ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
67305ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
67315ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6732c6fd2807SJeff Garzik 
6733c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6734c6fd2807SJeff Garzik 
67358989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6736c6fd2807SJeff Garzik 
6737c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6738c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6739c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6740c6fd2807SJeff Garzik #endif
6741c6fd2807SJeff Garzik 	return ap;
6742c6fd2807SJeff Garzik }
6743c6fd2807SJeff Garzik 
6744f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6745f0d36efdSTejun Heo {
6746f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6747f0d36efdSTejun Heo 	int i;
6748f0d36efdSTejun Heo 
6749f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6750f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6751f0d36efdSTejun Heo 
6752ecef7253STejun Heo 		if (!ap)
6753ecef7253STejun Heo 			continue;
6754ecef7253STejun Heo 
67554911487aSTejun Heo 		if (ap->scsi_host)
67561aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
67571aa506e4STejun Heo 
6758633273a3STejun Heo 		kfree(ap->pmp_link);
67594911487aSTejun Heo 		kfree(ap);
67601aa506e4STejun Heo 		host->ports[i] = NULL;
67611aa506e4STejun Heo 	}
67621aa506e4STejun Heo 
67631aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6764f0d36efdSTejun Heo }
6765f0d36efdSTejun Heo 
6766c6fd2807SJeff Garzik /**
6767f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6768f3187195STejun Heo  *	@dev: generic device this host is associated with
6769f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6770f3187195STejun Heo  *
6771f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6772f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6773f3187195STejun Heo  *	attaches it using ata_host_register().
6774f3187195STejun Heo  *
6775f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6776f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6777f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6778f3187195STejun Heo  *	ports will be automatically freed on registration.
6779f3187195STejun Heo  *
6780f3187195STejun Heo  *	RETURNS:
6781f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6782f3187195STejun Heo  *
6783f3187195STejun Heo  *	LOCKING:
6784f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6785f3187195STejun Heo  */
6786f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6787f3187195STejun Heo {
6788f3187195STejun Heo 	struct ata_host *host;
6789f3187195STejun Heo 	size_t sz;
6790f3187195STejun Heo 	int i;
6791f3187195STejun Heo 
6792f3187195STejun Heo 	DPRINTK("ENTER\n");
6793f3187195STejun Heo 
6794f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6795f3187195STejun Heo 		return NULL;
6796f3187195STejun Heo 
6797f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6798f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6799f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6800f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6801f3187195STejun Heo 	if (!host)
6802f3187195STejun Heo 		goto err_out;
6803f3187195STejun Heo 
6804f3187195STejun Heo 	devres_add(dev, host);
6805f3187195STejun Heo 	dev_set_drvdata(dev, host);
6806f3187195STejun Heo 
6807f3187195STejun Heo 	spin_lock_init(&host->lock);
6808f3187195STejun Heo 	host->dev = dev;
6809f3187195STejun Heo 	host->n_ports = max_ports;
6810f3187195STejun Heo 
6811f3187195STejun Heo 	/* allocate ports bound to this host */
6812f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6813f3187195STejun Heo 		struct ata_port *ap;
6814f3187195STejun Heo 
6815f3187195STejun Heo 		ap = ata_port_alloc(host);
6816f3187195STejun Heo 		if (!ap)
6817f3187195STejun Heo 			goto err_out;
6818f3187195STejun Heo 
6819f3187195STejun Heo 		ap->port_no = i;
6820f3187195STejun Heo 		host->ports[i] = ap;
6821f3187195STejun Heo 	}
6822f3187195STejun Heo 
6823f3187195STejun Heo 	devres_remove_group(dev, NULL);
6824f3187195STejun Heo 	return host;
6825f3187195STejun Heo 
6826f3187195STejun Heo  err_out:
6827f3187195STejun Heo 	devres_release_group(dev, NULL);
6828f3187195STejun Heo 	return NULL;
6829f3187195STejun Heo }
6830f3187195STejun Heo 
6831f3187195STejun Heo /**
6832f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6833f5cda257STejun Heo  *	@dev: generic device this host is associated with
6834f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6835f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6836f5cda257STejun Heo  *
6837f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6838f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6839f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6840f5cda257STejun Heo  *
6841f5cda257STejun Heo  *	RETURNS:
6842f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6843f5cda257STejun Heo  *
6844f5cda257STejun Heo  *	LOCKING:
6845f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6846f5cda257STejun Heo  */
6847f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6848f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6849f5cda257STejun Heo 				      int n_ports)
6850f5cda257STejun Heo {
6851f5cda257STejun Heo 	const struct ata_port_info *pi;
6852f5cda257STejun Heo 	struct ata_host *host;
6853f5cda257STejun Heo 	int i, j;
6854f5cda257STejun Heo 
6855f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6856f5cda257STejun Heo 	if (!host)
6857f5cda257STejun Heo 		return NULL;
6858f5cda257STejun Heo 
6859f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6860f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6861f5cda257STejun Heo 
6862f5cda257STejun Heo 		if (ppi[j])
6863f5cda257STejun Heo 			pi = ppi[j++];
6864f5cda257STejun Heo 
6865f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6866f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6867f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6868f5cda257STejun Heo 		ap->flags |= pi->flags;
68690c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6870f5cda257STejun Heo 		ap->ops = pi->port_ops;
6871f5cda257STejun Heo 
6872f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6873f5cda257STejun Heo 			host->ops = pi->port_ops;
6874f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6875f5cda257STejun Heo 			host->private_data = pi->private_data;
6876f5cda257STejun Heo 	}
6877f5cda257STejun Heo 
6878f5cda257STejun Heo 	return host;
6879f5cda257STejun Heo }
6880f5cda257STejun Heo 
688132ebbc0cSTejun Heo static void ata_host_stop(struct device *gendev, void *res)
688232ebbc0cSTejun Heo {
688332ebbc0cSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
688432ebbc0cSTejun Heo 	int i;
688532ebbc0cSTejun Heo 
688632ebbc0cSTejun Heo 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
688732ebbc0cSTejun Heo 
688832ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
688932ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
689032ebbc0cSTejun Heo 
689132ebbc0cSTejun Heo 		if (ap->ops->port_stop)
689232ebbc0cSTejun Heo 			ap->ops->port_stop(ap);
689332ebbc0cSTejun Heo 	}
689432ebbc0cSTejun Heo 
689532ebbc0cSTejun Heo 	if (host->ops->host_stop)
689632ebbc0cSTejun Heo 		host->ops->host_stop(host);
689732ebbc0cSTejun Heo }
689832ebbc0cSTejun Heo 
6899f5cda257STejun Heo /**
6900ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6901ecef7253STejun Heo  *	@host: ATA host to start ports for
6902ecef7253STejun Heo  *
6903ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6904ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6905ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6906f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6907f3187195STejun Heo  *	first non-dummy port ops.
6908ecef7253STejun Heo  *
6909ecef7253STejun Heo  *	LOCKING:
6910ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6911ecef7253STejun Heo  *
6912ecef7253STejun Heo  *	RETURNS:
6913ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6914ecef7253STejun Heo  */
6915ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6916ecef7253STejun Heo {
691732ebbc0cSTejun Heo 	int have_stop = 0;
691832ebbc0cSTejun Heo 	void *start_dr = NULL;
6919ecef7253STejun Heo 	int i, rc;
6920ecef7253STejun Heo 
6921ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6922ecef7253STejun Heo 		return 0;
6923ecef7253STejun Heo 
6924ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6925ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6926ecef7253STejun Heo 
6927f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6928f3187195STejun Heo 			host->ops = ap->ops;
6929f3187195STejun Heo 
693032ebbc0cSTejun Heo 		if (ap->ops->port_stop)
693132ebbc0cSTejun Heo 			have_stop = 1;
693232ebbc0cSTejun Heo 	}
693332ebbc0cSTejun Heo 
693432ebbc0cSTejun Heo 	if (host->ops->host_stop)
693532ebbc0cSTejun Heo 		have_stop = 1;
693632ebbc0cSTejun Heo 
693732ebbc0cSTejun Heo 	if (have_stop) {
693832ebbc0cSTejun Heo 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
693932ebbc0cSTejun Heo 		if (!start_dr)
694032ebbc0cSTejun Heo 			return -ENOMEM;
694132ebbc0cSTejun Heo 	}
694232ebbc0cSTejun Heo 
694332ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
694432ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
694532ebbc0cSTejun Heo 
6946ecef7253STejun Heo 		if (ap->ops->port_start) {
6947ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6948ecef7253STejun Heo 			if (rc) {
69490f9fe9b7SAlan Cox 				if (rc != -ENODEV)
69500f757743SAndrew Morton 					dev_printk(KERN_ERR, host->dev,
69510f757743SAndrew Morton 						"failed to start port %d "
69520f757743SAndrew Morton 						"(errno=%d)\n", i, rc);
6953ecef7253STejun Heo 				goto err_out;
6954ecef7253STejun Heo 			}
6955ecef7253STejun Heo 		}
6956ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6957ecef7253STejun Heo 	}
6958ecef7253STejun Heo 
695932ebbc0cSTejun Heo 	if (start_dr)
696032ebbc0cSTejun Heo 		devres_add(host->dev, start_dr);
6961ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6962ecef7253STejun Heo 	return 0;
6963ecef7253STejun Heo 
6964ecef7253STejun Heo  err_out:
6965ecef7253STejun Heo 	while (--i >= 0) {
6966ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6967ecef7253STejun Heo 
6968ecef7253STejun Heo 		if (ap->ops->port_stop)
6969ecef7253STejun Heo 			ap->ops->port_stop(ap);
6970ecef7253STejun Heo 	}
697132ebbc0cSTejun Heo 	devres_free(start_dr);
6972ecef7253STejun Heo 	return rc;
6973ecef7253STejun Heo }
6974ecef7253STejun Heo 
6975ecef7253STejun Heo /**
6976cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6977cca3974eSJeff Garzik  *	@host:	host to initialize
6978cca3974eSJeff Garzik  *	@dev:	device host is attached to
6979cca3974eSJeff Garzik  *	@flags:	host flags
6980c6fd2807SJeff Garzik  *	@ops:	port_ops
6981c6fd2807SJeff Garzik  *
6982c6fd2807SJeff Garzik  *	LOCKING:
6983c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6984c6fd2807SJeff Garzik  *
6985c6fd2807SJeff Garzik  */
6986f3187195STejun Heo /* KILLME - the only user left is ipr */
6987cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6988cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6989c6fd2807SJeff Garzik {
6990cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6991cca3974eSJeff Garzik 	host->dev = dev;
6992cca3974eSJeff Garzik 	host->flags = flags;
6993cca3974eSJeff Garzik 	host->ops = ops;
6994c6fd2807SJeff Garzik }
6995c6fd2807SJeff Garzik 
6996c6fd2807SJeff Garzik /**
6997f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6998f3187195STejun Heo  *	@host: ATA host to register
6999f3187195STejun Heo  *	@sht: template for SCSI host
7000c6fd2807SJeff Garzik  *
7001f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
7002f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
7003f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
7004f3187195STejun Heo  *	probe registered devices.
7005c6fd2807SJeff Garzik  *
7006c6fd2807SJeff Garzik  *	LOCKING:
7007f3187195STejun Heo  *	Inherited from calling layer (may sleep).
7008c6fd2807SJeff Garzik  *
7009c6fd2807SJeff Garzik  *	RETURNS:
7010f3187195STejun Heo  *	0 on success, -errno otherwise.
7011c6fd2807SJeff Garzik  */
7012f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7013c6fd2807SJeff Garzik {
7014f3187195STejun Heo 	int i, rc;
7015c6fd2807SJeff Garzik 
7016f3187195STejun Heo 	/* host must have been started */
7017f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
7018f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
7019f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
7020f3187195STejun Heo 		WARN_ON(1);
7021f3187195STejun Heo 		return -EINVAL;
702202f076aaSAlan Cox 	}
7023f0d36efdSTejun Heo 
7024f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
7025f3187195STejun Heo 	 * determine the exact number of ports to allocate at
7026f3187195STejun Heo 	 * allocation time.
7027f3187195STejun Heo 	 */
7028f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
7029f3187195STejun Heo 		kfree(host->ports[i]);
7030f0d36efdSTejun Heo 
7031f3187195STejun Heo 	/* give ports names and add SCSI hosts */
7032f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
7033f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
7034c6fd2807SJeff Garzik 
7035f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
7036ecef7253STejun Heo 	if (rc)
7037f3187195STejun Heo 		return rc;
7038ecef7253STejun Heo 
7039fafbae87STejun Heo 	/* associate with ACPI nodes */
7040fafbae87STejun Heo 	ata_acpi_associate(host);
7041fafbae87STejun Heo 
7042f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
7043cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7044cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7045f3187195STejun Heo 		unsigned long xfer_mask;
7046f3187195STejun Heo 
7047f3187195STejun Heo 		/* set SATA cable type if still unset */
7048f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7049f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
7050c6fd2807SJeff Garzik 
7051c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
70524fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
7053c6fd2807SJeff Garzik 
7054cbcdd875STejun Heo 		/* print per-port info to dmesg */
7055f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7056f3187195STejun Heo 					      ap->udma_mask);
7057f3187195STejun Heo 
7058abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
7059cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
7060cbcdd875STejun Heo 					"%cATA max %s %s\n",
7061a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7062f3187195STejun Heo 					ata_mode_string(xfer_mask),
7063cbcdd875STejun Heo 					ap->link.eh_info.desc);
7064abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
7065abf6e8edSTejun Heo 		} else
7066f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7067c6fd2807SJeff Garzik 	}
7068c6fd2807SJeff Garzik 
7069f3187195STejun Heo 	/* perform each probe synchronously */
7070f3187195STejun Heo 	DPRINTK("probe begin\n");
7071f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
7072f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
7073f3187195STejun Heo 		int rc;
7074f3187195STejun Heo 
7075f3187195STejun Heo 		/* probe */
7076c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
70779af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
7078c6fd2807SJeff Garzik 			unsigned long flags;
7079c6fd2807SJeff Garzik 
7080c6fd2807SJeff Garzik 			ata_port_probe(ap);
7081c6fd2807SJeff Garzik 
7082c6fd2807SJeff Garzik 			/* kick EH for boot probing */
7083c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
7084c6fd2807SJeff Garzik 
7085f58229f8STejun Heo 			ehi->probe_mask =
7086f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
7087c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
7088c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7089c6fd2807SJeff Garzik 
7090f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7091c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
7092c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
7093c6fd2807SJeff Garzik 
7094c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
7095c6fd2807SJeff Garzik 
7096c6fd2807SJeff Garzik 			/* wait for EH to finish */
7097c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
7098c6fd2807SJeff Garzik 		} else {
709944877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7100c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
710144877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
7102c6fd2807SJeff Garzik 
7103c6fd2807SJeff Garzik 			if (rc) {
7104c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
7105c6fd2807SJeff Garzik 				 * Current libata behavior will
7106c6fd2807SJeff Garzik 				 * tear down everything when
7107c6fd2807SJeff Garzik 				 * the module is removed
7108c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
7109c6fd2807SJeff Garzik 				 */
7110c6fd2807SJeff Garzik 			}
7111c6fd2807SJeff Garzik 		}
7112c6fd2807SJeff Garzik 	}
7113c6fd2807SJeff Garzik 
7114c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
7115c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
7116cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7117cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7118c6fd2807SJeff Garzik 
71191ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
7120ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
7121c6fd2807SJeff Garzik 	}
7122c6fd2807SJeff Garzik 
7123f3187195STejun Heo 	return 0;
7124f3187195STejun Heo }
7125f3187195STejun Heo 
7126f3187195STejun Heo /**
7127f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
7128f5cda257STejun Heo  *	@host: target ATA host
7129f5cda257STejun Heo  *	@irq: IRQ to request
7130f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
7131f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
7132f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
7133f5cda257STejun Heo  *
7134f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
7135f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
7136f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
7137f5cda257STejun Heo  *	arguments and performs the three steps in one go.
7138f5cda257STejun Heo  *
71393d46b2e2SPaul Mundt  *	An invalid IRQ skips the IRQ registration and expects the host to
71403d46b2e2SPaul Mundt  *	have set polling mode on the port. In this case, @irq_handler
71413d46b2e2SPaul Mundt  *	should be NULL.
71423d46b2e2SPaul Mundt  *
7143f5cda257STejun Heo  *	LOCKING:
7144f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
7145f5cda257STejun Heo  *
7146f5cda257STejun Heo  *	RETURNS:
7147f5cda257STejun Heo  *	0 on success, -errno otherwise.
7148f5cda257STejun Heo  */
7149f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
7150f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
7151f5cda257STejun Heo 		      struct scsi_host_template *sht)
7152f5cda257STejun Heo {
7153cbcdd875STejun Heo 	int i, rc;
7154f5cda257STejun Heo 
7155f5cda257STejun Heo 	rc = ata_host_start(host);
7156f5cda257STejun Heo 	if (rc)
7157f5cda257STejun Heo 		return rc;
7158f5cda257STejun Heo 
71593d46b2e2SPaul Mundt 	/* Special case for polling mode */
71603d46b2e2SPaul Mundt 	if (!irq) {
71613d46b2e2SPaul Mundt 		WARN_ON(irq_handler);
71623d46b2e2SPaul Mundt 		return ata_host_register(host, sht);
71633d46b2e2SPaul Mundt 	}
71643d46b2e2SPaul Mundt 
7165f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7166f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
7167f5cda257STejun Heo 	if (rc)
7168f5cda257STejun Heo 		return rc;
7169f5cda257STejun Heo 
7170cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
7171cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
71724031826bSTejun Heo 
7173f5cda257STejun Heo 	rc = ata_host_register(host, sht);
7174f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
7175f5cda257STejun Heo 	if (rc)
7176f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
7177f5cda257STejun Heo 
7178f5cda257STejun Heo 	return rc;
7179f5cda257STejun Heo }
7180f5cda257STejun Heo 
7181f5cda257STejun Heo /**
7182c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
7183c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
7184c6fd2807SJeff Garzik  *
7185c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
7186c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
7187c6fd2807SJeff Garzik  *	be quiescent on return from this function.
7188c6fd2807SJeff Garzik  *
7189c6fd2807SJeff Garzik  *	LOCKING:
7190c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
7191c6fd2807SJeff Garzik  */
7192741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
7193c6fd2807SJeff Garzik {
7194c6fd2807SJeff Garzik 	unsigned long flags;
719541bda9c9STejun Heo 	struct ata_link *link;
7196f58229f8STejun Heo 	struct ata_device *dev;
7197c6fd2807SJeff Garzik 
7198c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7199c6fd2807SJeff Garzik 		goto skip_eh;
7200c6fd2807SJeff Garzik 
7201c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7202c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7203c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7204c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7205c6fd2807SJeff Garzik 
7206c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7207c6fd2807SJeff Garzik 
72087f9ad9b8STejun Heo 	/* EH is now guaranteed to see UNLOADING - EH context belongs
72097f9ad9b8STejun Heo 	 * to us.  Disable all existing devices.
7210c6fd2807SJeff Garzik 	 */
721141bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
721241bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7213f58229f8STejun Heo 			ata_dev_disable(dev);
721441bda9c9STejun Heo 	}
7215c6fd2807SJeff Garzik 
7216c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7217c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7218c6fd2807SJeff Garzik 	 * target.
7219c6fd2807SJeff Garzik 	 */
7220c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7221c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7222c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7223c6fd2807SJeff Garzik 
7224c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
722545a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7226c6fd2807SJeff Garzik 
7227c6fd2807SJeff Garzik  skip_eh:
7228c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7229cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7230c6fd2807SJeff Garzik }
7231c6fd2807SJeff Garzik 
7232c6fd2807SJeff Garzik /**
72330529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
72340529c159STejun Heo  *	@host: Host to detach
72350529c159STejun Heo  *
72360529c159STejun Heo  *	Detach all ports of @host.
72370529c159STejun Heo  *
72380529c159STejun Heo  *	LOCKING:
72390529c159STejun Heo  *	Kernel thread context (may sleep).
72400529c159STejun Heo  */
72410529c159STejun Heo void ata_host_detach(struct ata_host *host)
72420529c159STejun Heo {
72430529c159STejun Heo 	int i;
72440529c159STejun Heo 
72450529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
72460529c159STejun Heo 		ata_port_detach(host->ports[i]);
7247562f0c2dSTejun Heo 
7248562f0c2dSTejun Heo 	/* the host is dead now, dissociate ACPI */
7249562f0c2dSTejun Heo 	ata_acpi_dissociate(host);
72500529c159STejun Heo }
72510529c159STejun Heo 
7252c6fd2807SJeff Garzik /**
7253c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7254c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7255c6fd2807SJeff Garzik  *
7256c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7257c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7258c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7259c6fd2807SJeff Garzik  *	relative to cmd_addr.
7260c6fd2807SJeff Garzik  *
7261c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7262c6fd2807SJeff Garzik  */
7263c6fd2807SJeff Garzik 
7264c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7265c6fd2807SJeff Garzik {
7266c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7267c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7268c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7269c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7270c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7271c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7272c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7273c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7274c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7275c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7276c6fd2807SJeff Garzik }
7277c6fd2807SJeff Garzik 
7278c6fd2807SJeff Garzik 
7279c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7280c6fd2807SJeff Garzik 
7281c6fd2807SJeff Garzik /**
7282c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7283c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7284c6fd2807SJeff Garzik  *
7285b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7286b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7287b878ca5dSTejun Heo  *	release is handled via devres.
7288c6fd2807SJeff Garzik  *
7289c6fd2807SJeff Garzik  *	LOCKING:
7290c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7291c6fd2807SJeff Garzik  */
7292c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7293c6fd2807SJeff Garzik {
72942855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7295cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7296c6fd2807SJeff Garzik 
7297f0d36efdSTejun Heo 	ata_host_detach(host);
7298c6fd2807SJeff Garzik }
7299c6fd2807SJeff Garzik 
7300c6fd2807SJeff Garzik /* move to PCI subsystem */
7301c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7302c6fd2807SJeff Garzik {
7303c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7304c6fd2807SJeff Garzik 
7305c6fd2807SJeff Garzik 	switch (bits->width) {
7306c6fd2807SJeff Garzik 	case 1: {
7307c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7308c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7309c6fd2807SJeff Garzik 		tmp = tmp8;
7310c6fd2807SJeff Garzik 		break;
7311c6fd2807SJeff Garzik 	}
7312c6fd2807SJeff Garzik 	case 2: {
7313c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7314c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7315c6fd2807SJeff Garzik 		tmp = tmp16;
7316c6fd2807SJeff Garzik 		break;
7317c6fd2807SJeff Garzik 	}
7318c6fd2807SJeff Garzik 	case 4: {
7319c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7320c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7321c6fd2807SJeff Garzik 		tmp = tmp32;
7322c6fd2807SJeff Garzik 		break;
7323c6fd2807SJeff Garzik 	}
7324c6fd2807SJeff Garzik 
7325c6fd2807SJeff Garzik 	default:
7326c6fd2807SJeff Garzik 		return -EINVAL;
7327c6fd2807SJeff Garzik 	}
7328c6fd2807SJeff Garzik 
7329c6fd2807SJeff Garzik 	tmp &= bits->mask;
7330c6fd2807SJeff Garzik 
7331c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7332c6fd2807SJeff Garzik }
7333c6fd2807SJeff Garzik 
73346ffa01d8STejun Heo #ifdef CONFIG_PM
7335c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7336c6fd2807SJeff Garzik {
7337c6fd2807SJeff Garzik 	pci_save_state(pdev);
7338c6fd2807SJeff Garzik 	pci_disable_device(pdev);
73394c90d971STejun Heo 
73404c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7341c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7342c6fd2807SJeff Garzik }
7343c6fd2807SJeff Garzik 
7344553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7345c6fd2807SJeff Garzik {
7346553c4aa6STejun Heo 	int rc;
7347553c4aa6STejun Heo 
7348c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7349c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7350553c4aa6STejun Heo 
7351f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7352553c4aa6STejun Heo 	if (rc) {
7353553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7354553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7355553c4aa6STejun Heo 		return rc;
7356553c4aa6STejun Heo 	}
7357553c4aa6STejun Heo 
7358c6fd2807SJeff Garzik 	pci_set_master(pdev);
7359553c4aa6STejun Heo 	return 0;
7360c6fd2807SJeff Garzik }
7361c6fd2807SJeff Garzik 
7362c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7363c6fd2807SJeff Garzik {
7364cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7365c6fd2807SJeff Garzik 	int rc = 0;
7366c6fd2807SJeff Garzik 
7367cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7368c6fd2807SJeff Garzik 	if (rc)
7369c6fd2807SJeff Garzik 		return rc;
7370c6fd2807SJeff Garzik 
7371c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7372c6fd2807SJeff Garzik 
7373c6fd2807SJeff Garzik 	return 0;
7374c6fd2807SJeff Garzik }
7375c6fd2807SJeff Garzik 
7376c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7377c6fd2807SJeff Garzik {
7378cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7379553c4aa6STejun Heo 	int rc;
7380c6fd2807SJeff Garzik 
7381553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7382553c4aa6STejun Heo 	if (rc == 0)
7383cca3974eSJeff Garzik 		ata_host_resume(host);
7384553c4aa6STejun Heo 	return rc;
7385c6fd2807SJeff Garzik }
73866ffa01d8STejun Heo #endif /* CONFIG_PM */
73876ffa01d8STejun Heo 
7388c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7389c6fd2807SJeff Garzik 
7390c6fd2807SJeff Garzik 
7391c6fd2807SJeff Garzik static int __init ata_init(void)
7392c6fd2807SJeff Garzik {
7393c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7394c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7395c6fd2807SJeff Garzik 	if (!ata_wq)
7396c6fd2807SJeff Garzik 		return -ENOMEM;
7397c6fd2807SJeff Garzik 
7398c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7399c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7400c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7401c6fd2807SJeff Garzik 		return -ENOMEM;
7402c6fd2807SJeff Garzik 	}
7403c6fd2807SJeff Garzik 
7404c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7405c6fd2807SJeff Garzik 	return 0;
7406c6fd2807SJeff Garzik }
7407c6fd2807SJeff Garzik 
7408c6fd2807SJeff Garzik static void __exit ata_exit(void)
7409c6fd2807SJeff Garzik {
7410c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7411c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7412c6fd2807SJeff Garzik }
7413c6fd2807SJeff Garzik 
7414a4625085SBrian King subsys_initcall(ata_init);
7415c6fd2807SJeff Garzik module_exit(ata_exit);
7416c6fd2807SJeff Garzik 
7417c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7418c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7419c6fd2807SJeff Garzik 
7420c6fd2807SJeff Garzik int ata_ratelimit(void)
7421c6fd2807SJeff Garzik {
7422c6fd2807SJeff Garzik 	int rc;
7423c6fd2807SJeff Garzik 	unsigned long flags;
7424c6fd2807SJeff Garzik 
7425c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7426c6fd2807SJeff Garzik 
7427c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7428c6fd2807SJeff Garzik 		rc = 1;
7429c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7430c6fd2807SJeff Garzik 	} else
7431c6fd2807SJeff Garzik 		rc = 0;
7432c6fd2807SJeff Garzik 
7433c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7434c6fd2807SJeff Garzik 
7435c6fd2807SJeff Garzik 	return rc;
7436c6fd2807SJeff Garzik }
7437c6fd2807SJeff Garzik 
7438c6fd2807SJeff Garzik /**
7439c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7440c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7441c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7442c6fd2807SJeff Garzik  *	@val: Wait condition
7443c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7444c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7445c6fd2807SJeff Garzik  *
7446c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7447c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7448c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7449c6fd2807SJeff Garzik  *
7450c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7451c6fd2807SJeff Garzik  *
7452c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7453c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7454c6fd2807SJeff Garzik  *
7455c6fd2807SJeff Garzik  *	LOCKING:
7456c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7457c6fd2807SJeff Garzik  *
7458c6fd2807SJeff Garzik  *	RETURNS:
7459c6fd2807SJeff Garzik  *	The final register value.
7460c6fd2807SJeff Garzik  */
7461c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7462c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7463c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7464c6fd2807SJeff Garzik {
7465c6fd2807SJeff Garzik 	unsigned long timeout;
7466c6fd2807SJeff Garzik 	u32 tmp;
7467c6fd2807SJeff Garzik 
7468c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7469c6fd2807SJeff Garzik 
7470c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7471c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7472c6fd2807SJeff Garzik 	 * eat away the timeout.
7473c6fd2807SJeff Garzik 	 */
7474c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7475c6fd2807SJeff Garzik 
7476c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7477c6fd2807SJeff Garzik 		msleep(interval_msec);
7478c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7479c6fd2807SJeff Garzik 	}
7480c6fd2807SJeff Garzik 
7481c6fd2807SJeff Garzik 	return tmp;
7482c6fd2807SJeff Garzik }
7483c6fd2807SJeff Garzik 
7484c6fd2807SJeff Garzik /*
7485c6fd2807SJeff Garzik  * Dummy port_ops
7486c6fd2807SJeff Garzik  */
7487c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7488c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7489c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7490c6fd2807SJeff Garzik 
7491c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7492c6fd2807SJeff Garzik {
7493c6fd2807SJeff Garzik 	return ATA_DRDY;
7494c6fd2807SJeff Garzik }
7495c6fd2807SJeff Garzik 
7496c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7497c6fd2807SJeff Garzik {
7498c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7499c6fd2807SJeff Garzik }
7500c6fd2807SJeff Garzik 
7501c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7502c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7503c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7504c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7505c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7506c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7507c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7508c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7509c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7510c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7511c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7512c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7513c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7514c6fd2807SJeff Garzik };
7515c6fd2807SJeff Garzik 
751621b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
751721b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
751821b0ad4fSTejun Heo };
751921b0ad4fSTejun Heo 
7520c6fd2807SJeff Garzik /*
7521c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7522c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7523c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7524c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7525c6fd2807SJeff Garzik  */
7526c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7527c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7528c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7529c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
753021b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7531c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7532c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7533cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7534f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7535f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7536ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7537f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7538f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
75390529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7540c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7541c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7542c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7543c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7544c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7545c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7546c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7547c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7548c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
754943727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7550c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7551c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
75526357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_pack_xfermask);
75536357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
75546357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
75556357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
75566357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
75576357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_mode_string);
75586357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_id_xfermask);
7559c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7560c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7561c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7562c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7563d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7564c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
756504351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
75660d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
75670d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
756831cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7569c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7570d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7571c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7572c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7573c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7574c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7575c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7576c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7577c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7578c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7579c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7580c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7581c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7582c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
758310305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7584c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7585936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7586936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7587c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7588c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7589c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7590cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7591c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7592c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7593c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7594c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7595c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7596c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7597c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7598c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
759988ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7600d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7601c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7602c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7603c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7604c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7605c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7606c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7607c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7608c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7609c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7610c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7611936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7612936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
76136ffa01d8STejun Heo #ifdef CONFIG_PM
7614cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7615cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
76166ffa01d8STejun Heo #endif /* CONFIG_PM */
7617c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7618c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
7619c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7620c6fd2807SJeff Garzik 
7621c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
76226357357cSTejun Heo EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7623c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7624c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7625a0f79b92STejun Heo EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7626c6fd2807SJeff Garzik 
7627c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7628c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7629d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
76301626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7631d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
76324e6b79faSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
7633c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7634c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
76356ffa01d8STejun Heo #ifdef CONFIG_PM
7636c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7637c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7638c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7639c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
76406ffa01d8STejun Heo #endif /* CONFIG_PM */
7641c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7642c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7643c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7644c6fd2807SJeff Garzik 
764531f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
76463af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
76473af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
76483af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
76493af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
76503af9a77aSTejun Heo 
7651b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7652b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7653b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7654cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7655cbcdd875STejun Heo #ifdef CONFIG_PCI
7656cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7657cbcdd875STejun Heo #endif /* CONFIG_PCI */
7658c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7659dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7660c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7661c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
76627d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7663c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7664c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7665c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7666c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7667c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
766883625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7669a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7670be0d18dfSAlan Cox 
7671be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7672be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7673be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7674c88f90c3STejun Heo EXPORT_SYMBOL_GPL(ata_cable_ignore);
7675be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7676