xref: /openbmc/linux/drivers/ata/libata-core.c (revision a520f261)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
52c6fd2807SJeff Garzik #include <scsi/scsi.h>
53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
55c6fd2807SJeff Garzik #include <linux/libata.h>
56c6fd2807SJeff Garzik #include <asm/io.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
628bc3fc47SJeff Garzik #define DRV_VERSION	"2.21"	/* must be exactly four chars */
63fda0efc5SJeff Garzik 
64fda0efc5SJeff Garzik 
65c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69c6fd2807SJeff Garzik 
70c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
71c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
72c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
7475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75c6fd2807SJeff Garzik 
76f3187195STejun Heo unsigned int ata_print_id = 1;
77c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
78c6fd2807SJeff Garzik 
79c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
80c6fd2807SJeff Garzik 
81c6fd2807SJeff Garzik int atapi_enabled = 1;
82c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
83c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84c6fd2807SJeff Garzik 
85c6fd2807SJeff Garzik int atapi_dmadir = 0;
86c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
87c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88c6fd2807SJeff Garzik 
89c6fd2807SJeff Garzik int libata_fua = 0;
90c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
91c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92c6fd2807SJeff Garzik 
931e999736SAlan Cox static int ata_ignore_hpa = 0;
941e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
951e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
961e999736SAlan Cox 
97c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
98c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
99c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
100c6fd2807SJeff Garzik 
101d7d0dad6SJeff Garzik int libata_noacpi = 1;
102d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
10311ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
10411ef697bSKristen Carlson Accardi 
105c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
106c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
107c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
108c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
109c6fd2807SJeff Garzik 
110c6fd2807SJeff Garzik 
111c6fd2807SJeff Garzik /**
112c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
113c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
114c6fd2807SJeff Garzik  *	@fis: Buffer into which data will output
115c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
116c6fd2807SJeff Garzik  *
117c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
118c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
119c6fd2807SJeff Garzik  *
120c6fd2807SJeff Garzik  *	LOCKING:
121c6fd2807SJeff Garzik  *	Inherited from caller.
122c6fd2807SJeff Garzik  */
123c6fd2807SJeff Garzik 
124c6fd2807SJeff Garzik void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
125c6fd2807SJeff Garzik {
126c6fd2807SJeff Garzik 	fis[0] = 0x27;	/* Register - Host to Device FIS */
127c6fd2807SJeff Garzik 	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
128c6fd2807SJeff Garzik 					    bit 7 indicates Command FIS */
129c6fd2807SJeff Garzik 	fis[2] = tf->command;
130c6fd2807SJeff Garzik 	fis[3] = tf->feature;
131c6fd2807SJeff Garzik 
132c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
133c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
134c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
135c6fd2807SJeff Garzik 	fis[7] = tf->device;
136c6fd2807SJeff Garzik 
137c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
138c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
139c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
140c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
141c6fd2807SJeff Garzik 
142c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
143c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
144c6fd2807SJeff Garzik 	fis[14] = 0;
145c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
146c6fd2807SJeff Garzik 
147c6fd2807SJeff Garzik 	fis[16] = 0;
148c6fd2807SJeff Garzik 	fis[17] = 0;
149c6fd2807SJeff Garzik 	fis[18] = 0;
150c6fd2807SJeff Garzik 	fis[19] = 0;
151c6fd2807SJeff Garzik }
152c6fd2807SJeff Garzik 
153c6fd2807SJeff Garzik /**
154c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
155c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
156c6fd2807SJeff Garzik  *	@tf: Taskfile to output
157c6fd2807SJeff Garzik  *
158c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
159c6fd2807SJeff Garzik  *
160c6fd2807SJeff Garzik  *	LOCKING:
161c6fd2807SJeff Garzik  *	Inherited from caller.
162c6fd2807SJeff Garzik  */
163c6fd2807SJeff Garzik 
164c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
165c6fd2807SJeff Garzik {
166c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
167c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
168c6fd2807SJeff Garzik 
169c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
170c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
171c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
172c6fd2807SJeff Garzik 	tf->device	= fis[7];
173c6fd2807SJeff Garzik 
174c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
175c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
176c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
177c6fd2807SJeff Garzik 
178c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
179c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
180c6fd2807SJeff Garzik }
181c6fd2807SJeff Garzik 
182c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
183c6fd2807SJeff Garzik 	/* pio multi */
184c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
185c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
186c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
187c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
188c6fd2807SJeff Garzik 	0,
189c6fd2807SJeff Garzik 	0,
190c6fd2807SJeff Garzik 	0,
191c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
192c6fd2807SJeff Garzik 	/* pio */
193c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
194c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
195c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
196c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
197c6fd2807SJeff Garzik 	0,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	0,
201c6fd2807SJeff Garzik 	/* dma */
202c6fd2807SJeff Garzik 	ATA_CMD_READ,
203c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
204c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
205c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
206c6fd2807SJeff Garzik 	0,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	0,
209c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
210c6fd2807SJeff Garzik };
211c6fd2807SJeff Garzik 
212c6fd2807SJeff Garzik /**
213c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
214bd056d7eSTejun Heo  *	@tf: command to examine and configure
215bd056d7eSTejun Heo  *	@dev: device tf belongs to
216c6fd2807SJeff Garzik  *
217c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
218c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
219c6fd2807SJeff Garzik  *
220c6fd2807SJeff Garzik  *	LOCKING:
221c6fd2807SJeff Garzik  *	caller.
222c6fd2807SJeff Garzik  */
223bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
224c6fd2807SJeff Garzik {
225c6fd2807SJeff Garzik 	u8 cmd;
226c6fd2807SJeff Garzik 
227c6fd2807SJeff Garzik 	int index, fua, lba48, write;
228c6fd2807SJeff Garzik 
229c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
230c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
231c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
232c6fd2807SJeff Garzik 
233c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
234c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
235c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
236bd056d7eSTejun Heo 	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
237c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
238c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
239c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
240c6fd2807SJeff Garzik 	} else {
241c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
242c6fd2807SJeff Garzik 		index = 16;
243c6fd2807SJeff Garzik 	}
244c6fd2807SJeff Garzik 
245c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
246c6fd2807SJeff Garzik 	if (cmd) {
247c6fd2807SJeff Garzik 		tf->command = cmd;
248c6fd2807SJeff Garzik 		return 0;
249c6fd2807SJeff Garzik 	}
250c6fd2807SJeff Garzik 	return -1;
251c6fd2807SJeff Garzik }
252c6fd2807SJeff Garzik 
253c6fd2807SJeff Garzik /**
25435b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
25535b649feSTejun Heo  *	@tf: ATA taskfile of interest
25635b649feSTejun Heo  *	@dev: ATA device @tf belongs to
25735b649feSTejun Heo  *
25835b649feSTejun Heo  *	LOCKING:
25935b649feSTejun Heo  *	None.
26035b649feSTejun Heo  *
26135b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
26235b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
26335b649feSTejun Heo  *	flags select the address format to use.
26435b649feSTejun Heo  *
26535b649feSTejun Heo  *	RETURNS:
26635b649feSTejun Heo  *	Block address read from @tf.
26735b649feSTejun Heo  */
26835b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
26935b649feSTejun Heo {
27035b649feSTejun Heo 	u64 block = 0;
27135b649feSTejun Heo 
27235b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
27335b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
27435b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
27535b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
27635b649feSTejun Heo 			block |= tf->hob_lbal << 24;
27735b649feSTejun Heo 		} else
27835b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
27935b649feSTejun Heo 
28035b649feSTejun Heo 		block |= tf->lbah << 16;
28135b649feSTejun Heo 		block |= tf->lbam << 8;
28235b649feSTejun Heo 		block |= tf->lbal;
28335b649feSTejun Heo 	} else {
28435b649feSTejun Heo 		u32 cyl, head, sect;
28535b649feSTejun Heo 
28635b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
28735b649feSTejun Heo 		head = tf->device & 0xf;
28835b649feSTejun Heo 		sect = tf->lbal;
28935b649feSTejun Heo 
29035b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
29135b649feSTejun Heo 	}
29235b649feSTejun Heo 
29335b649feSTejun Heo 	return block;
29435b649feSTejun Heo }
29535b649feSTejun Heo 
29635b649feSTejun Heo /**
297bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
298bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
299bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
300bd056d7eSTejun Heo  *	@block: Block address
301bd056d7eSTejun Heo  *	@n_block: Number of blocks
302bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
303bd056d7eSTejun Heo  *	@tag: tag
304bd056d7eSTejun Heo  *
305bd056d7eSTejun Heo  *	LOCKING:
306bd056d7eSTejun Heo  *	None.
307bd056d7eSTejun Heo  *
308bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
309bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
310bd056d7eSTejun Heo  *
311bd056d7eSTejun Heo  *	RETURNS:
312bd056d7eSTejun Heo  *
313bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
314bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
315bd056d7eSTejun Heo  */
316bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
317bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
318bd056d7eSTejun Heo 		    unsigned int tag)
319bd056d7eSTejun Heo {
320bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
321bd056d7eSTejun Heo 	tf->flags |= tf_flags;
322bd056d7eSTejun Heo 
3236d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
324bd056d7eSTejun Heo 		/* yay, NCQ */
325bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
326bd056d7eSTejun Heo 			return -ERANGE;
327bd056d7eSTejun Heo 
328bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
329bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
330bd056d7eSTejun Heo 
331bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
332bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
333bd056d7eSTejun Heo 		else
334bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
335bd056d7eSTejun Heo 
336bd056d7eSTejun Heo 		tf->nsect = tag << 3;
337bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
338bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
339bd056d7eSTejun Heo 
340bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
341bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
342bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
343bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
344bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
345bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
346bd056d7eSTejun Heo 
347bd056d7eSTejun Heo 		tf->device = 1 << 6;
348bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
349bd056d7eSTejun Heo 			tf->device |= 1 << 7;
350bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
351bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
352bd056d7eSTejun Heo 
353bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
354bd056d7eSTejun Heo 			/* use LBA28 */
355bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
356bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
357bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
358bd056d7eSTejun Heo 				return -ERANGE;
359bd056d7eSTejun Heo 
360bd056d7eSTejun Heo 			/* use LBA48 */
361bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
364bd056d7eSTejun Heo 
365bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
366bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
367bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
368bd056d7eSTejun Heo 		} else
369bd056d7eSTejun Heo 			/* request too large even for LBA48 */
370bd056d7eSTejun Heo 			return -ERANGE;
371bd056d7eSTejun Heo 
372bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
373bd056d7eSTejun Heo 			return -EINVAL;
374bd056d7eSTejun Heo 
375bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
376bd056d7eSTejun Heo 
377bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
378bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
379bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
380bd056d7eSTejun Heo 
381bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
382bd056d7eSTejun Heo 	} else {
383bd056d7eSTejun Heo 		/* CHS */
384bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
385bd056d7eSTejun Heo 
386bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
387bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
388bd056d7eSTejun Heo 			return -ERANGE;
389bd056d7eSTejun Heo 
390bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
391bd056d7eSTejun Heo 			return -EINVAL;
392bd056d7eSTejun Heo 
393bd056d7eSTejun Heo 		/* Convert LBA to CHS */
394bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
395bd056d7eSTejun Heo 		cyl   = track / dev->heads;
396bd056d7eSTejun Heo 		head  = track % dev->heads;
397bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
398bd056d7eSTejun Heo 
399bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
400bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
401bd056d7eSTejun Heo 
402bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
403bd056d7eSTejun Heo 		   Cylinder: 0-65535
404bd056d7eSTejun Heo 		   Head: 0-15
405bd056d7eSTejun Heo 		   Sector: 1-255*/
406bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
407bd056d7eSTejun Heo 			return -ERANGE;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
410bd056d7eSTejun Heo 		tf->lbal = sect;
411bd056d7eSTejun Heo 		tf->lbam = cyl;
412bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
413bd056d7eSTejun Heo 		tf->device |= head;
414bd056d7eSTejun Heo 	}
415bd056d7eSTejun Heo 
416bd056d7eSTejun Heo 	return 0;
417bd056d7eSTejun Heo }
418bd056d7eSTejun Heo 
419bd056d7eSTejun Heo /**
420c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
421c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
422c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
423c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
424c6fd2807SJeff Garzik  *
425c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
426c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
427c6fd2807SJeff Garzik  *
428c6fd2807SJeff Garzik  *	LOCKING:
429c6fd2807SJeff Garzik  *	None.
430c6fd2807SJeff Garzik  *
431c6fd2807SJeff Garzik  *	RETURNS:
432c6fd2807SJeff Garzik  *	Packed xfer_mask.
433c6fd2807SJeff Garzik  */
434c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
435c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
436c6fd2807SJeff Garzik 				      unsigned int udma_mask)
437c6fd2807SJeff Garzik {
438c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
439c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
440c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
441c6fd2807SJeff Garzik }
442c6fd2807SJeff Garzik 
443c6fd2807SJeff Garzik /**
444c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
445c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
446c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
447c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
448c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
449c6fd2807SJeff Garzik  *
450c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
451c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
452c6fd2807SJeff Garzik  */
453c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
454c6fd2807SJeff Garzik 				unsigned int *pio_mask,
455c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
456c6fd2807SJeff Garzik 				unsigned int *udma_mask)
457c6fd2807SJeff Garzik {
458c6fd2807SJeff Garzik 	if (pio_mask)
459c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
460c6fd2807SJeff Garzik 	if (mwdma_mask)
461c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
462c6fd2807SJeff Garzik 	if (udma_mask)
463c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
464c6fd2807SJeff Garzik }
465c6fd2807SJeff Garzik 
466c6fd2807SJeff Garzik static const struct ata_xfer_ent {
467c6fd2807SJeff Garzik 	int shift, bits;
468c6fd2807SJeff Garzik 	u8 base;
469c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
470c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
471c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
472c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
473c6fd2807SJeff Garzik 	{ -1, },
474c6fd2807SJeff Garzik };
475c6fd2807SJeff Garzik 
476c6fd2807SJeff Garzik /**
477c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
478c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
479c6fd2807SJeff Garzik  *
480c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
481c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
482c6fd2807SJeff Garzik  *
483c6fd2807SJeff Garzik  *	LOCKING:
484c6fd2807SJeff Garzik  *	None.
485c6fd2807SJeff Garzik  *
486c6fd2807SJeff Garzik  *	RETURNS:
487c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
488c6fd2807SJeff Garzik  */
489c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
490c6fd2807SJeff Garzik {
491c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
492c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
493c6fd2807SJeff Garzik 
494c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
495c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
496c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
497c6fd2807SJeff Garzik 	return 0;
498c6fd2807SJeff Garzik }
499c6fd2807SJeff Garzik 
500c6fd2807SJeff Garzik /**
501c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
502c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
503c6fd2807SJeff Garzik  *
504c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
505c6fd2807SJeff Garzik  *
506c6fd2807SJeff Garzik  *	LOCKING:
507c6fd2807SJeff Garzik  *	None.
508c6fd2807SJeff Garzik  *
509c6fd2807SJeff Garzik  *	RETURNS:
510c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
511c6fd2807SJeff Garzik  */
512c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
513c6fd2807SJeff Garzik {
514c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
515c6fd2807SJeff Garzik 
516c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
517c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
518c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
519c6fd2807SJeff Garzik 	return 0;
520c6fd2807SJeff Garzik }
521c6fd2807SJeff Garzik 
522c6fd2807SJeff Garzik /**
523c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
524c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
525c6fd2807SJeff Garzik  *
526c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
527c6fd2807SJeff Garzik  *
528c6fd2807SJeff Garzik  *	LOCKING:
529c6fd2807SJeff Garzik  *	None.
530c6fd2807SJeff Garzik  *
531c6fd2807SJeff Garzik  *	RETURNS:
532c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
533c6fd2807SJeff Garzik  */
534c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
535c6fd2807SJeff Garzik {
536c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
537c6fd2807SJeff Garzik 
538c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
539c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
540c6fd2807SJeff Garzik 			return ent->shift;
541c6fd2807SJeff Garzik 	return -1;
542c6fd2807SJeff Garzik }
543c6fd2807SJeff Garzik 
544c6fd2807SJeff Garzik /**
545c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
546c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
547c6fd2807SJeff Garzik  *
548c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
549c6fd2807SJeff Garzik  *	(highest bit in @modemask).
550c6fd2807SJeff Garzik  *
551c6fd2807SJeff Garzik  *	LOCKING:
552c6fd2807SJeff Garzik  *	None.
553c6fd2807SJeff Garzik  *
554c6fd2807SJeff Garzik  *	RETURNS:
555c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
556c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
557c6fd2807SJeff Garzik  */
558c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
559c6fd2807SJeff Garzik {
560c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
561c6fd2807SJeff Garzik 		"PIO0",
562c6fd2807SJeff Garzik 		"PIO1",
563c6fd2807SJeff Garzik 		"PIO2",
564c6fd2807SJeff Garzik 		"PIO3",
565c6fd2807SJeff Garzik 		"PIO4",
566b352e57dSAlan Cox 		"PIO5",
567b352e57dSAlan Cox 		"PIO6",
568c6fd2807SJeff Garzik 		"MWDMA0",
569c6fd2807SJeff Garzik 		"MWDMA1",
570c6fd2807SJeff Garzik 		"MWDMA2",
571b352e57dSAlan Cox 		"MWDMA3",
572b352e57dSAlan Cox 		"MWDMA4",
573c6fd2807SJeff Garzik 		"UDMA/16",
574c6fd2807SJeff Garzik 		"UDMA/25",
575c6fd2807SJeff Garzik 		"UDMA/33",
576c6fd2807SJeff Garzik 		"UDMA/44",
577c6fd2807SJeff Garzik 		"UDMA/66",
578c6fd2807SJeff Garzik 		"UDMA/100",
579c6fd2807SJeff Garzik 		"UDMA/133",
580c6fd2807SJeff Garzik 		"UDMA7",
581c6fd2807SJeff Garzik 	};
582c6fd2807SJeff Garzik 	int highbit;
583c6fd2807SJeff Garzik 
584c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
585c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
586c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
587c6fd2807SJeff Garzik 	return "<n/a>";
588c6fd2807SJeff Garzik }
589c6fd2807SJeff Garzik 
590c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
591c6fd2807SJeff Garzik {
592c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
593c6fd2807SJeff Garzik 		"1.5 Gbps",
594c6fd2807SJeff Garzik 		"3.0 Gbps",
595c6fd2807SJeff Garzik 	};
596c6fd2807SJeff Garzik 
597c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
598c6fd2807SJeff Garzik 		return "<unknown>";
599c6fd2807SJeff Garzik 	return spd_str[spd - 1];
600c6fd2807SJeff Garzik }
601c6fd2807SJeff Garzik 
602c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
603c6fd2807SJeff Garzik {
60409d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
60509d7f9b0STejun Heo 		if (ata_msg_drv(dev->ap))
606c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6074ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6084ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
609c6fd2807SJeff Garzik 		dev->class++;
610c6fd2807SJeff Garzik 	}
611c6fd2807SJeff Garzik }
612c6fd2807SJeff Garzik 
613c6fd2807SJeff Garzik /**
614c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
615c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
616c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
617c6fd2807SJeff Garzik  *
6180d5ff566STejun Heo  *	This technique was originally described in
6190d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6200d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6210d5ff566STejun Heo  *
6220d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6230d5ff566STejun Heo  *	and if a device is present, it will respond by
6240d5ff566STejun Heo  *	correctly storing and echoing back the
6250d5ff566STejun Heo  *	ATA shadow register contents.
626c6fd2807SJeff Garzik  *
627c6fd2807SJeff Garzik  *	LOCKING:
628c6fd2807SJeff Garzik  *	caller.
629c6fd2807SJeff Garzik  */
630c6fd2807SJeff Garzik 
6310d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
632c6fd2807SJeff Garzik {
6330d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6340d5ff566STejun Heo 	u8 nsect, lbal;
6350d5ff566STejun Heo 
6360d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6370d5ff566STejun Heo 
6380d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6390d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6400d5ff566STejun Heo 
6410d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6420d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6430d5ff566STejun Heo 
6440d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6450d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6460d5ff566STejun Heo 
6470d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6480d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6490d5ff566STejun Heo 
6500d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6510d5ff566STejun Heo 		return 1;	/* we found a device */
6520d5ff566STejun Heo 
6530d5ff566STejun Heo 	return 0;		/* nothing found */
654c6fd2807SJeff Garzik }
655c6fd2807SJeff Garzik 
656c6fd2807SJeff Garzik /**
657c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
658c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
659c6fd2807SJeff Garzik  *
660c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
661c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
662c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
663c6fd2807SJeff Garzik  *
664c6fd2807SJeff Garzik  *	LOCKING:
665c6fd2807SJeff Garzik  *	None.
666c6fd2807SJeff Garzik  *
667c6fd2807SJeff Garzik  *	RETURNS:
668c6fd2807SJeff Garzik  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
669c6fd2807SJeff Garzik  *	the event of failure.
670c6fd2807SJeff Garzik  */
671c6fd2807SJeff Garzik 
672c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
673c6fd2807SJeff Garzik {
674c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
675c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
676c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
677c6fd2807SJeff Garzik 	 */
678c6fd2807SJeff Garzik 
679c6fd2807SJeff Garzik 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
680c6fd2807SJeff Garzik 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
681c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
682c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
683c6fd2807SJeff Garzik 	}
684c6fd2807SJeff Garzik 
685c6fd2807SJeff Garzik 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
686c6fd2807SJeff Garzik 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
687c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
688c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
689c6fd2807SJeff Garzik 	}
690c6fd2807SJeff Garzik 
691c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
692c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
693c6fd2807SJeff Garzik }
694c6fd2807SJeff Garzik 
695c6fd2807SJeff Garzik /**
696c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
697c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
698c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
699c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
700c6fd2807SJeff Garzik  *
701c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
702c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
703c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
704c6fd2807SJeff Garzik  *	and diagnostics.
705c6fd2807SJeff Garzik  *
706c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
707c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
708c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
709c6fd2807SJeff Garzik  *
710c6fd2807SJeff Garzik  *	LOCKING:
711c6fd2807SJeff Garzik  *	caller.
712c6fd2807SJeff Garzik  *
713c6fd2807SJeff Garzik  *	RETURNS:
714c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
715c6fd2807SJeff Garzik  */
716c6fd2807SJeff Garzik 
717a619f981SAkira Iguchi unsigned int
718c6fd2807SJeff Garzik ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
719c6fd2807SJeff Garzik {
720c6fd2807SJeff Garzik 	struct ata_taskfile tf;
721c6fd2807SJeff Garzik 	unsigned int class;
722c6fd2807SJeff Garzik 	u8 err;
723c6fd2807SJeff Garzik 
724c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
725c6fd2807SJeff Garzik 
726c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
727c6fd2807SJeff Garzik 
728c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
729c6fd2807SJeff Garzik 	err = tf.feature;
730c6fd2807SJeff Garzik 	if (r_err)
731c6fd2807SJeff Garzik 		*r_err = err;
732c6fd2807SJeff Garzik 
73393590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
73493590859SAlan Cox 	if (err == 0 && device == 0)
73593590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
73693590859SAlan Cox 		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
73793590859SAlan Cox 	else if (err == 1)
738c6fd2807SJeff Garzik 		/* do nothing */ ;
739c6fd2807SJeff Garzik 	else if ((device == 0) && (err == 0x81))
740c6fd2807SJeff Garzik 		/* do nothing */ ;
741c6fd2807SJeff Garzik 	else
742c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
743c6fd2807SJeff Garzik 
744c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
745c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
746c6fd2807SJeff Garzik 
747c6fd2807SJeff Garzik 	if (class == ATA_DEV_UNKNOWN)
748c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
749c6fd2807SJeff Garzik 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
750c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
751c6fd2807SJeff Garzik 	return class;
752c6fd2807SJeff Garzik }
753c6fd2807SJeff Garzik 
754c6fd2807SJeff Garzik /**
755c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
756c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
757c6fd2807SJeff Garzik  *	@s: string into which data is output
758c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
759c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
760c6fd2807SJeff Garzik  *
761c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
762c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
763c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
764c6fd2807SJeff Garzik  *
765c6fd2807SJeff Garzik  *	LOCKING:
766c6fd2807SJeff Garzik  *	caller.
767c6fd2807SJeff Garzik  */
768c6fd2807SJeff Garzik 
769c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
770c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
771c6fd2807SJeff Garzik {
772c6fd2807SJeff Garzik 	unsigned int c;
773c6fd2807SJeff Garzik 
774c6fd2807SJeff Garzik 	while (len > 0) {
775c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
776c6fd2807SJeff Garzik 		*s = c;
777c6fd2807SJeff Garzik 		s++;
778c6fd2807SJeff Garzik 
779c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
780c6fd2807SJeff Garzik 		*s = c;
781c6fd2807SJeff Garzik 		s++;
782c6fd2807SJeff Garzik 
783c6fd2807SJeff Garzik 		ofs++;
784c6fd2807SJeff Garzik 		len -= 2;
785c6fd2807SJeff Garzik 	}
786c6fd2807SJeff Garzik }
787c6fd2807SJeff Garzik 
788c6fd2807SJeff Garzik /**
789c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
790c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
791c6fd2807SJeff Garzik  *	@s: string into which data is output
792c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
793c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
794c6fd2807SJeff Garzik  *
795c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
796c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
797c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
798c6fd2807SJeff Garzik  *
799c6fd2807SJeff Garzik  *	LOCKING:
800c6fd2807SJeff Garzik  *	caller.
801c6fd2807SJeff Garzik  */
802c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
803c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
804c6fd2807SJeff Garzik {
805c6fd2807SJeff Garzik 	unsigned char *p;
806c6fd2807SJeff Garzik 
807c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
808c6fd2807SJeff Garzik 
809c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
810c6fd2807SJeff Garzik 
811c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
812c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
813c6fd2807SJeff Garzik 		p--;
814c6fd2807SJeff Garzik 	*p = '\0';
815c6fd2807SJeff Garzik }
816c6fd2807SJeff Garzik 
8171e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8181e999736SAlan Cox {
8191e999736SAlan Cox 	u64 sectors = 0;
8201e999736SAlan Cox 
8211e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8221e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8231e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8241e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8251e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8261e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8271e999736SAlan Cox 
8281e999736SAlan Cox 	return ++sectors;
8291e999736SAlan Cox }
8301e999736SAlan Cox 
8311e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8321e999736SAlan Cox {
8331e999736SAlan Cox 	u64 sectors = 0;
8341e999736SAlan Cox 
8351e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8361e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8371e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8381e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8391e999736SAlan Cox 
8401e999736SAlan Cox 	return ++sectors;
8411e999736SAlan Cox }
8421e999736SAlan Cox 
8431e999736SAlan Cox /**
8441e999736SAlan Cox  *	ata_read_native_max_address_ext	-	LBA48 native max query
8451e999736SAlan Cox  *	@dev: Device to query
8461e999736SAlan Cox  *
8471e999736SAlan Cox  *	Perform an LBA48 size query upon the device in question. Return the
8481e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
8491e999736SAlan Cox  */
8501e999736SAlan Cox 
8511e999736SAlan Cox static u64 ata_read_native_max_address_ext(struct ata_device *dev)
8521e999736SAlan Cox {
8531e999736SAlan Cox 	unsigned int err;
8541e999736SAlan Cox 	struct ata_taskfile tf;
8551e999736SAlan Cox 
8561e999736SAlan Cox 	ata_tf_init(dev, &tf);
8571e999736SAlan Cox 
8581e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
8591e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
8601e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8611e999736SAlan Cox 	tf.device |= 0x40;
8621e999736SAlan Cox 
8631e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8641e999736SAlan Cox 	if (err)
8651e999736SAlan Cox 		return 0;
8661e999736SAlan Cox 
8671e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
8681e999736SAlan Cox }
8691e999736SAlan Cox 
8701e999736SAlan Cox /**
8711e999736SAlan Cox  *	ata_read_native_max_address	-	LBA28 native max query
8721e999736SAlan Cox  *	@dev: Device to query
8731e999736SAlan Cox  *
8741e999736SAlan Cox  *	Performa an LBA28 size query upon the device in question. Return the
8751e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
8761e999736SAlan Cox  */
8771e999736SAlan Cox 
8781e999736SAlan Cox static u64 ata_read_native_max_address(struct ata_device *dev)
8791e999736SAlan Cox {
8801e999736SAlan Cox 	unsigned int err;
8811e999736SAlan Cox 	struct ata_taskfile tf;
8821e999736SAlan Cox 
8831e999736SAlan Cox 	ata_tf_init(dev, &tf);
8841e999736SAlan Cox 
8851e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX;
8861e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
8871e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8881e999736SAlan Cox 	tf.device |= 0x40;
8891e999736SAlan Cox 
8901e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8911e999736SAlan Cox 	if (err)
8921e999736SAlan Cox 		return 0;
8931e999736SAlan Cox 
8941e999736SAlan Cox 	return ata_tf_to_lba(&tf);
8951e999736SAlan Cox }
8961e999736SAlan Cox 
8971e999736SAlan Cox /**
8981e999736SAlan Cox  *	ata_set_native_max_address_ext	-	LBA48 native max set
8991e999736SAlan Cox  *	@dev: Device to query
9006b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9011e999736SAlan Cox  *
9021e999736SAlan Cox  *	Perform an LBA48 size set max upon the device in question. Return the
9031e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
9041e999736SAlan Cox  */
9051e999736SAlan Cox 
9061e999736SAlan Cox static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
9071e999736SAlan Cox {
9081e999736SAlan Cox 	unsigned int err;
9091e999736SAlan Cox 	struct ata_taskfile tf;
9101e999736SAlan Cox 
9111e999736SAlan Cox 	new_sectors--;
9121e999736SAlan Cox 
9131e999736SAlan Cox 	ata_tf_init(dev, &tf);
9141e999736SAlan Cox 
9151e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX_EXT;
9161e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
9171e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9181e999736SAlan Cox 	tf.device |= 0x40;
9191e999736SAlan Cox 
9201e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9211e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9221e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9231e999736SAlan Cox 
9241e999736SAlan Cox 	tf.hob_lbal = (new_sectors >> 24) & 0xff;
9251e999736SAlan Cox 	tf.hob_lbam = (new_sectors >> 32) & 0xff;
9261e999736SAlan Cox 	tf.hob_lbah = (new_sectors >> 40) & 0xff;
9271e999736SAlan Cox 
9281e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9291e999736SAlan Cox 	if (err)
9301e999736SAlan Cox 		return 0;
9311e999736SAlan Cox 
9321e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
9331e999736SAlan Cox }
9341e999736SAlan Cox 
9351e999736SAlan Cox /**
9361e999736SAlan Cox  *	ata_set_native_max_address	-	LBA28 native max set
9371e999736SAlan Cox  *	@dev: Device to query
9386b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9391e999736SAlan Cox  *
9401e999736SAlan Cox  *	Perform an LBA28 size set max upon the device in question. Return the
9411e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
9421e999736SAlan Cox  */
9431e999736SAlan Cox 
9441e999736SAlan Cox static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
9451e999736SAlan Cox {
9461e999736SAlan Cox 	unsigned int err;
9471e999736SAlan Cox 	struct ata_taskfile tf;
9481e999736SAlan Cox 
9491e999736SAlan Cox 	new_sectors--;
9501e999736SAlan Cox 
9511e999736SAlan Cox 	ata_tf_init(dev, &tf);
9521e999736SAlan Cox 
9531e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX;
9541e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9551e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9561e999736SAlan Cox 
9571e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9581e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9591e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9601e999736SAlan Cox 	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
9611e999736SAlan Cox 
9621e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9631e999736SAlan Cox 	if (err)
9641e999736SAlan Cox 		return 0;
9651e999736SAlan Cox 
9661e999736SAlan Cox 	return ata_tf_to_lba(&tf);
9671e999736SAlan Cox }
9681e999736SAlan Cox 
9691e999736SAlan Cox /**
9701e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
9711e999736SAlan Cox  *	@dev: Device to resize
9721e999736SAlan Cox  *
9731e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
9741e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
9751e999736SAlan Cox  *	the drive has the HPA feature set enabled.
9761e999736SAlan Cox  */
9771e999736SAlan Cox 
9781e999736SAlan Cox static u64 ata_hpa_resize(struct ata_device *dev)
9791e999736SAlan Cox {
9801e999736SAlan Cox 	u64 sectors = dev->n_sectors;
9811e999736SAlan Cox 	u64 hpa_sectors;
9821e999736SAlan Cox 
9831e999736SAlan Cox 	if (ata_id_has_lba48(dev->id))
9841e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address_ext(dev);
9851e999736SAlan Cox 	else
9861e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address(dev);
9871e999736SAlan Cox 
9881e999736SAlan Cox 	if (hpa_sectors > sectors) {
9891e999736SAlan Cox 		ata_dev_printk(dev, KERN_INFO,
9901e999736SAlan Cox 			"Host Protected Area detected:\n"
9911e999736SAlan Cox 			"\tcurrent size: %lld sectors\n"
9921e999736SAlan Cox 			"\tnative size: %lld sectors\n",
993bd1d5ec6SAndrew Morton 			(long long)sectors, (long long)hpa_sectors);
9941e999736SAlan Cox 
9951e999736SAlan Cox 		if (ata_ignore_hpa) {
9961e999736SAlan Cox 			if (ata_id_has_lba48(dev->id))
9971e999736SAlan Cox 				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
9981e999736SAlan Cox 			else
999bd1d5ec6SAndrew Morton 				hpa_sectors = ata_set_native_max_address(dev,
1000bd1d5ec6SAndrew Morton 								hpa_sectors);
10011e999736SAlan Cox 
10021e999736SAlan Cox 			if (hpa_sectors) {
1003bd1d5ec6SAndrew Morton 				ata_dev_printk(dev, KERN_INFO, "native size "
1004bd1d5ec6SAndrew Morton 					"increased to %lld sectors\n",
1005bd1d5ec6SAndrew Morton 					(long long)hpa_sectors);
10061e999736SAlan Cox 				return hpa_sectors;
10071e999736SAlan Cox 			}
10081e999736SAlan Cox 		}
100937301a55STejun Heo 	} else if (hpa_sectors < sectors)
101037301a55STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
101137301a55STejun Heo 			       "is smaller than sectors (%lld)\n", __FUNCTION__,
101237301a55STejun Heo 			       (long long)hpa_sectors, (long long)sectors);
101337301a55STejun Heo 
10141e999736SAlan Cox 	return sectors;
10151e999736SAlan Cox }
10161e999736SAlan Cox 
1017c6fd2807SJeff Garzik static u64 ata_id_n_sectors(const u16 *id)
1018c6fd2807SJeff Garzik {
1019c6fd2807SJeff Garzik 	if (ata_id_has_lba(id)) {
1020c6fd2807SJeff Garzik 		if (ata_id_has_lba48(id))
1021c6fd2807SJeff Garzik 			return ata_id_u64(id, 100);
1022c6fd2807SJeff Garzik 		else
1023c6fd2807SJeff Garzik 			return ata_id_u32(id, 60);
1024c6fd2807SJeff Garzik 	} else {
1025c6fd2807SJeff Garzik 		if (ata_id_current_chs_valid(id))
1026c6fd2807SJeff Garzik 			return ata_id_u32(id, 57);
1027c6fd2807SJeff Garzik 		else
1028c6fd2807SJeff Garzik 			return id[1] * id[3] * id[6];
1029c6fd2807SJeff Garzik 	}
1030c6fd2807SJeff Garzik }
1031c6fd2807SJeff Garzik 
1032c6fd2807SJeff Garzik /**
103310305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
103410305f0fSAlan  *	@dev: device to identify
1035cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
103610305f0fSAlan  *
103710305f0fSAlan  *	Set up the timing values for the device based upon the identify
103810305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
103910305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
104010305f0fSAlan  *	mode correctly when possible.
104110305f0fSAlan  *
104210305f0fSAlan  *	In addition we emit similarly formatted messages to the default
104310305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
104410305f0fSAlan  *	presentation.
104510305f0fSAlan  */
104610305f0fSAlan 
104710305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
104810305f0fSAlan {
104910305f0fSAlan 	unsigned int mask;
105010305f0fSAlan 	u8 mode;
105110305f0fSAlan 
105210305f0fSAlan 	/* Pack the DMA modes */
105310305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
105410305f0fSAlan 	if (dev->id[53] & 0x04)
105510305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
105610305f0fSAlan 
105710305f0fSAlan 	/* Select the mode in use */
105810305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
105910305f0fSAlan 
106010305f0fSAlan 	if (mode != 0) {
106110305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
106210305f0fSAlan 		       ata_mode_string(mask));
106310305f0fSAlan 	} else {
106410305f0fSAlan 		/* SWDMA perhaps ? */
106510305f0fSAlan 		mode = unknown;
106610305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
106710305f0fSAlan 	}
106810305f0fSAlan 
106910305f0fSAlan 	/* Configure the device reporting */
107010305f0fSAlan 	dev->xfer_mode = mode;
107110305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
107210305f0fSAlan }
107310305f0fSAlan 
107410305f0fSAlan /**
1075c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1076c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1077c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1078c6fd2807SJeff Garzik  *
1079c6fd2807SJeff Garzik  *	This function performs no actual function.
1080c6fd2807SJeff Garzik  *
1081c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1082c6fd2807SJeff Garzik  *
1083c6fd2807SJeff Garzik  *	LOCKING:
1084c6fd2807SJeff Garzik  *	caller.
1085c6fd2807SJeff Garzik  */
1086c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1087c6fd2807SJeff Garzik {
1088c6fd2807SJeff Garzik }
1089c6fd2807SJeff Garzik 
1090c6fd2807SJeff Garzik 
1091c6fd2807SJeff Garzik /**
1092c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1093c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1094c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1095c6fd2807SJeff Garzik  *
1096c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1097c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1098c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1099c6fd2807SJeff Garzik  *
1100c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1101c6fd2807SJeff Garzik  *
1102c6fd2807SJeff Garzik  *	LOCKING:
1103c6fd2807SJeff Garzik  *	caller.
1104c6fd2807SJeff Garzik  */
1105c6fd2807SJeff Garzik 
1106c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1107c6fd2807SJeff Garzik {
1108c6fd2807SJeff Garzik 	u8 tmp;
1109c6fd2807SJeff Garzik 
1110c6fd2807SJeff Garzik 	if (device == 0)
1111c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1112c6fd2807SJeff Garzik 	else
1113c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1114c6fd2807SJeff Garzik 
11150d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1116c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1117c6fd2807SJeff Garzik }
1118c6fd2807SJeff Garzik 
1119c6fd2807SJeff Garzik /**
1120c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1121c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1122c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1123c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1124c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1125c6fd2807SJeff Garzik  *
1126c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1127c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1128c6fd2807SJeff Garzik  *	ATA channel.
1129c6fd2807SJeff Garzik  *
1130c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1131c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1132c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1133c6fd2807SJeff Garzik  *
1134c6fd2807SJeff Garzik  *	LOCKING:
1135c6fd2807SJeff Garzik  *	caller.
1136c6fd2807SJeff Garzik  */
1137c6fd2807SJeff Garzik 
1138c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1139c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1140c6fd2807SJeff Garzik {
1141c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
114244877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
114344877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1144c6fd2807SJeff Garzik 
1145c6fd2807SJeff Garzik 	if (wait)
1146c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1147c6fd2807SJeff Garzik 
1148c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1149c6fd2807SJeff Garzik 
1150c6fd2807SJeff Garzik 	if (wait) {
1151c6fd2807SJeff Garzik 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1152c6fd2807SJeff Garzik 			msleep(150);
1153c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1154c6fd2807SJeff Garzik 	}
1155c6fd2807SJeff Garzik }
1156c6fd2807SJeff Garzik 
1157c6fd2807SJeff Garzik /**
1158c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1159c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1160c6fd2807SJeff Garzik  *
1161c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1162c6fd2807SJeff Garzik  *	page.
1163c6fd2807SJeff Garzik  *
1164c6fd2807SJeff Garzik  *	LOCKING:
1165c6fd2807SJeff Garzik  *	caller.
1166c6fd2807SJeff Garzik  */
1167c6fd2807SJeff Garzik 
1168c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1169c6fd2807SJeff Garzik {
1170c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1171c6fd2807SJeff Garzik 		"53==0x%04x  "
1172c6fd2807SJeff Garzik 		"63==0x%04x  "
1173c6fd2807SJeff Garzik 		"64==0x%04x  "
1174c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1175c6fd2807SJeff Garzik 		id[49],
1176c6fd2807SJeff Garzik 		id[53],
1177c6fd2807SJeff Garzik 		id[63],
1178c6fd2807SJeff Garzik 		id[64],
1179c6fd2807SJeff Garzik 		id[75]);
1180c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1181c6fd2807SJeff Garzik 		"81==0x%04x  "
1182c6fd2807SJeff Garzik 		"82==0x%04x  "
1183c6fd2807SJeff Garzik 		"83==0x%04x  "
1184c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1185c6fd2807SJeff Garzik 		id[80],
1186c6fd2807SJeff Garzik 		id[81],
1187c6fd2807SJeff Garzik 		id[82],
1188c6fd2807SJeff Garzik 		id[83],
1189c6fd2807SJeff Garzik 		id[84]);
1190c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1191c6fd2807SJeff Garzik 		"93==0x%04x\n",
1192c6fd2807SJeff Garzik 		id[88],
1193c6fd2807SJeff Garzik 		id[93]);
1194c6fd2807SJeff Garzik }
1195c6fd2807SJeff Garzik 
1196c6fd2807SJeff Garzik /**
1197c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1198c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1199c6fd2807SJeff Garzik  *
1200c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1201c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1202c6fd2807SJeff Garzik  *
1203c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1204c6fd2807SJeff Garzik  *
1205c6fd2807SJeff Garzik  *	LOCKING:
1206c6fd2807SJeff Garzik  *	None.
1207c6fd2807SJeff Garzik  *
1208c6fd2807SJeff Garzik  *	RETURNS:
1209c6fd2807SJeff Garzik  *	Computed xfermask
1210c6fd2807SJeff Garzik  */
1211c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1212c6fd2807SJeff Garzik {
1213c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1214c6fd2807SJeff Garzik 
1215c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1216c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1217c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1218c6fd2807SJeff Garzik 		pio_mask <<= 3;
1219c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1220c6fd2807SJeff Garzik 	} else {
1221c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1222c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1223c6fd2807SJeff Garzik 		 * a mask.
1224c6fd2807SJeff Garzik 		 */
12257a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
122646767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
122746767aebSAlan Cox                 	pio_mask = (2 << mode) - 1;
122846767aebSAlan Cox 		else
122946767aebSAlan Cox 			pio_mask = 1;
1230c6fd2807SJeff Garzik 
1231c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1232c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1233c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1234c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1235c6fd2807SJeff Garzik 		 * will get this right anyway
1236c6fd2807SJeff Garzik 		 */
1237c6fd2807SJeff Garzik 	}
1238c6fd2807SJeff Garzik 
1239c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1240c6fd2807SJeff Garzik 
1241b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1242b352e57dSAlan Cox 		/*
1243b352e57dSAlan Cox 		 *	Process compact flash extended modes
1244b352e57dSAlan Cox 		 */
1245b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1246b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1247b352e57dSAlan Cox 
1248b352e57dSAlan Cox 		if (pio)
1249b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1250b352e57dSAlan Cox 		if (pio > 1)
1251b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1252b352e57dSAlan Cox 		if (dma)
1253b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1254b352e57dSAlan Cox 		if (dma > 1)
1255b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1256b352e57dSAlan Cox 	}
1257b352e57dSAlan Cox 
1258c6fd2807SJeff Garzik 	udma_mask = 0;
1259c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1260c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1261c6fd2807SJeff Garzik 
1262c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1263c6fd2807SJeff Garzik }
1264c6fd2807SJeff Garzik 
1265c6fd2807SJeff Garzik /**
1266c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1267c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1268c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
126965f27f38SDavid Howells  *	@data: data for @fn to use
1270c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1271c6fd2807SJeff Garzik  *
1272c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1273c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1274c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1275c6fd2807SJeff Garzik  *	one task is active at any given time.
1276c6fd2807SJeff Garzik  *
1277c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1278c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1279c6fd2807SJeff Garzik  *	synchronization.
1280c6fd2807SJeff Garzik  *
1281c6fd2807SJeff Garzik  *	LOCKING:
1282c6fd2807SJeff Garzik  *	Inherited from caller.
1283c6fd2807SJeff Garzik  */
128465f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1285c6fd2807SJeff Garzik 			 unsigned long delay)
1286c6fd2807SJeff Garzik {
128765f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
128865f27f38SDavid Howells 	ap->port_task_data = data;
1289c6fd2807SJeff Garzik 
129045a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
129145a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1292c6fd2807SJeff Garzik }
1293c6fd2807SJeff Garzik 
1294c6fd2807SJeff Garzik /**
1295c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1296c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1297c6fd2807SJeff Garzik  *
1298c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1299c6fd2807SJeff Garzik  *	be running or scheduled.
1300c6fd2807SJeff Garzik  *
1301c6fd2807SJeff Garzik  *	LOCKING:
1302c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1303c6fd2807SJeff Garzik  */
1304c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1305c6fd2807SJeff Garzik {
1306c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1307c6fd2807SJeff Garzik 
130845a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1309c6fd2807SJeff Garzik 
1310c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1311c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1312c6fd2807SJeff Garzik }
1313c6fd2807SJeff Garzik 
13147102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1315c6fd2807SJeff Garzik {
1316c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1317c6fd2807SJeff Garzik 
1318c6fd2807SJeff Garzik 	complete(waiting);
1319c6fd2807SJeff Garzik }
1320c6fd2807SJeff Garzik 
1321c6fd2807SJeff Garzik /**
13222432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1323c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1324c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1325c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1326c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13272432697bSTejun Heo  *	@sg: sg list for the data buffer of the command
13282432697bSTejun Heo  *	@n_elem: Number of sg entries
1329c6fd2807SJeff Garzik  *
1330c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1331c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1332c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1333c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1334c6fd2807SJeff Garzik  *	clean up after timeout.
1335c6fd2807SJeff Garzik  *
1336c6fd2807SJeff Garzik  *	LOCKING:
1337c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1338c6fd2807SJeff Garzik  *
1339c6fd2807SJeff Garzik  *	RETURNS:
1340c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1341c6fd2807SJeff Garzik  */
13422432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1343c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
13442432697bSTejun Heo 			      int dma_dir, struct scatterlist *sg,
13452432697bSTejun Heo 			      unsigned int n_elem)
1346c6fd2807SJeff Garzik {
1347c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1348c6fd2807SJeff Garzik 	u8 command = tf->command;
1349c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1350c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1351c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1352c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1353c6fd2807SJeff Garzik 	unsigned long flags;
1354c6fd2807SJeff Garzik 	unsigned int err_mask;
1355c6fd2807SJeff Garzik 	int rc;
1356c6fd2807SJeff Garzik 
1357c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1358c6fd2807SJeff Garzik 
1359c6fd2807SJeff Garzik 	/* no internal command while frozen */
1360c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1361c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1362c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1363c6fd2807SJeff Garzik 	}
1364c6fd2807SJeff Garzik 
1365c6fd2807SJeff Garzik 	/* initialize internal qc */
1366c6fd2807SJeff Garzik 
1367c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1368c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1369c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1370c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1371c6fd2807SJeff Garzik 	 */
1372c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1373c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1374c6fd2807SJeff Garzik 	else
1375c6fd2807SJeff Garzik 		tag = 0;
1376c6fd2807SJeff Garzik 
1377c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1378c6fd2807SJeff Garzik 		BUG();
1379c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1380c6fd2807SJeff Garzik 
1381c6fd2807SJeff Garzik 	qc->tag = tag;
1382c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1383c6fd2807SJeff Garzik 	qc->ap = ap;
1384c6fd2807SJeff Garzik 	qc->dev = dev;
1385c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1386c6fd2807SJeff Garzik 
1387c6fd2807SJeff Garzik 	preempted_tag = ap->active_tag;
1388c6fd2807SJeff Garzik 	preempted_sactive = ap->sactive;
1389c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1390c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
1391c6fd2807SJeff Garzik 	ap->sactive = 0;
1392c6fd2807SJeff Garzik 	ap->qc_active = 0;
1393c6fd2807SJeff Garzik 
1394c6fd2807SJeff Garzik 	/* prepare & issue qc */
1395c6fd2807SJeff Garzik 	qc->tf = *tf;
1396c6fd2807SJeff Garzik 	if (cdb)
1397c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1398c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1399c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1400c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14012432697bSTejun Heo 		unsigned int i, buflen = 0;
14022432697bSTejun Heo 
14032432697bSTejun Heo 		for (i = 0; i < n_elem; i++)
14042432697bSTejun Heo 			buflen += sg[i].length;
14052432697bSTejun Heo 
14062432697bSTejun Heo 		ata_sg_init(qc, sg, n_elem);
140749c80429SBrian King 		qc->nbytes = buflen;
1408c6fd2807SJeff Garzik 	}
1409c6fd2807SJeff Garzik 
1410c6fd2807SJeff Garzik 	qc->private_data = &wait;
1411c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1412c6fd2807SJeff Garzik 
1413c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1414c6fd2807SJeff Garzik 
1415c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1416c6fd2807SJeff Garzik 
1417c6fd2807SJeff Garzik 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1418c6fd2807SJeff Garzik 
1419c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1420c6fd2807SJeff Garzik 
1421c6fd2807SJeff Garzik 	if (!rc) {
1422c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1423c6fd2807SJeff Garzik 
1424c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1425c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1426c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1427c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1428c6fd2807SJeff Garzik 		 */
1429c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1430c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1431c6fd2807SJeff Garzik 
1432c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1433c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1434c6fd2807SJeff Garzik 			else
1435c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1436c6fd2807SJeff Garzik 
1437c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1438c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1439c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1440c6fd2807SJeff Garzik 		}
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1443c6fd2807SJeff Garzik 	}
1444c6fd2807SJeff Garzik 
1445c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1446c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1447c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1448c6fd2807SJeff Garzik 
1449a51d644aSTejun Heo 	/* perform minimal error analysis */
1450a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1451a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1452a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1453a51d644aSTejun Heo 
1454a51d644aSTejun Heo 		if (!qc->err_mask)
1455c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1456a51d644aSTejun Heo 
1457a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1458a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1459c6fd2807SJeff Garzik 	}
1460c6fd2807SJeff Garzik 
1461c6fd2807SJeff Garzik 	/* finish up */
1462c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1463c6fd2807SJeff Garzik 
1464c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1465c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1466c6fd2807SJeff Garzik 
1467c6fd2807SJeff Garzik 	ata_qc_free(qc);
1468c6fd2807SJeff Garzik 	ap->active_tag = preempted_tag;
1469c6fd2807SJeff Garzik 	ap->sactive = preempted_sactive;
1470c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1471c6fd2807SJeff Garzik 
1472c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1473c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1474c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1475c6fd2807SJeff Garzik 	 * port.
1476c6fd2807SJeff Garzik 	 *
1477c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1478c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1479c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1480c6fd2807SJeff Garzik 	 *
1481c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1482c6fd2807SJeff Garzik 	 */
1483c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1484c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1485c6fd2807SJeff Garzik 		ata_port_probe(ap);
1486c6fd2807SJeff Garzik 	}
1487c6fd2807SJeff Garzik 
1488c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1489c6fd2807SJeff Garzik 
1490c6fd2807SJeff Garzik 	return err_mask;
1491c6fd2807SJeff Garzik }
1492c6fd2807SJeff Garzik 
1493c6fd2807SJeff Garzik /**
149433480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
14952432697bSTejun Heo  *	@dev: Device to which the command is sent
14962432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
14972432697bSTejun Heo  *	@cdb: CDB for packet command
14982432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
14992432697bSTejun Heo  *	@buf: Data buffer of the command
15002432697bSTejun Heo  *	@buflen: Length of data buffer
15012432697bSTejun Heo  *
15022432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15032432697bSTejun Heo  *	buffer instead of sg list.
15042432697bSTejun Heo  *
15052432697bSTejun Heo  *	LOCKING:
15062432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15072432697bSTejun Heo  *
15082432697bSTejun Heo  *	RETURNS:
15092432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15102432697bSTejun Heo  */
15112432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15122432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15132432697bSTejun Heo 			   int dma_dir, void *buf, unsigned int buflen)
15142432697bSTejun Heo {
151533480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
151633480a0eSTejun Heo 	unsigned int n_elem = 0;
15172432697bSTejun Heo 
151833480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
151933480a0eSTejun Heo 		WARN_ON(!buf);
15202432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
152133480a0eSTejun Heo 		psg = &sg;
152233480a0eSTejun Heo 		n_elem++;
152333480a0eSTejun Heo 	}
15242432697bSTejun Heo 
152533480a0eSTejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
15262432697bSTejun Heo }
15272432697bSTejun Heo 
15282432697bSTejun Heo /**
1529c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1530c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1531c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1532c6fd2807SJeff Garzik  *
1533c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1534c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1535c6fd2807SJeff Garzik  *
1536c6fd2807SJeff Garzik  *	LOCKING:
1537c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1538c6fd2807SJeff Garzik  *
1539c6fd2807SJeff Garzik  *	RETURNS:
1540c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1541c6fd2807SJeff Garzik  */
1542c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1543c6fd2807SJeff Garzik {
1544c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1545c6fd2807SJeff Garzik 
1546c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1547c6fd2807SJeff Garzik 
1548c6fd2807SJeff Garzik 	tf.command = cmd;
1549c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1550c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1551c6fd2807SJeff Garzik 
1552c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1553c6fd2807SJeff Garzik }
1554c6fd2807SJeff Garzik 
1555c6fd2807SJeff Garzik /**
1556c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1557c6fd2807SJeff Garzik  *	@adev: ATA device
1558c6fd2807SJeff Garzik  *
1559c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1560c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1561c6fd2807SJeff Garzik  */
1562c6fd2807SJeff Garzik 
1563c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1564c6fd2807SJeff Garzik {
1565432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1566432729f0SAlan Cox 	   as the caller should know this */
1567432729f0SAlan Cox 	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1568c6fd2807SJeff Garzik 		return 0;
1569432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1570432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1571c6fd2807SJeff Garzik 		return 1;
1572432729f0SAlan Cox 	/* We turn it on when possible */
1573432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1574432729f0SAlan Cox 		return 1;
1575432729f0SAlan Cox 	return 0;
1576432729f0SAlan Cox }
1577c6fd2807SJeff Garzik 
1578432729f0SAlan Cox /**
1579432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1580432729f0SAlan Cox  *	@adev: ATA device
1581432729f0SAlan Cox  *
1582432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1583432729f0SAlan Cox  *	-1 if no iordy mode is available.
1584432729f0SAlan Cox  */
1585432729f0SAlan Cox 
1586432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1587432729f0SAlan Cox {
1588c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1589c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1590432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1591c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1592c6fd2807SJeff Garzik 		if (pio) {
1593c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1594c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1595432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1596432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1597c6fd2807SJeff Garzik 		}
1598c6fd2807SJeff Garzik 	}
1599432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1600c6fd2807SJeff Garzik }
1601c6fd2807SJeff Garzik 
1602c6fd2807SJeff Garzik /**
1603c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1604c6fd2807SJeff Garzik  *	@dev: target device
1605c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1606bff04647STejun Heo  *	@flags: ATA_READID_* flags
1607c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1608c6fd2807SJeff Garzik  *
1609c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1610c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1611c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1612c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1613c6fd2807SJeff Garzik  *
1614c6fd2807SJeff Garzik  *	LOCKING:
1615c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1616c6fd2807SJeff Garzik  *
1617c6fd2807SJeff Garzik  *	RETURNS:
1618c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1619c6fd2807SJeff Garzik  */
1620c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1621bff04647STejun Heo 		    unsigned int flags, u16 *id)
1622c6fd2807SJeff Garzik {
1623c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1624c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1625c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1626c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1627c6fd2807SJeff Garzik 	const char *reason;
162854936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1629c6fd2807SJeff Garzik 	int rc;
1630c6fd2807SJeff Garzik 
1631c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
163244877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1633c6fd2807SJeff Garzik 
1634c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1635c6fd2807SJeff Garzik  retry:
1636c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1637c6fd2807SJeff Garzik 
1638c6fd2807SJeff Garzik 	switch (class) {
1639c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1640c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1641c6fd2807SJeff Garzik 		break;
1642c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1643c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1644c6fd2807SJeff Garzik 		break;
1645c6fd2807SJeff Garzik 	default:
1646c6fd2807SJeff Garzik 		rc = -ENODEV;
1647c6fd2807SJeff Garzik 		reason = "unsupported class";
1648c6fd2807SJeff Garzik 		goto err_out;
1649c6fd2807SJeff Garzik 	}
1650c6fd2807SJeff Garzik 
1651c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
165281afe893STejun Heo 
165381afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
165481afe893STejun Heo 	 * sure those are properly initialized.
165581afe893STejun Heo 	 */
165681afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
165781afe893STejun Heo 
165881afe893STejun Heo 	/* Device presence detection is unreliable on some
165981afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
166081afe893STejun Heo 	 */
166181afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1662c6fd2807SJeff Garzik 
1663c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1664c6fd2807SJeff Garzik 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1665c6fd2807SJeff Garzik 	if (err_mask) {
1666800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
166755a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
166844877b4eSTejun Heo 				ap->print_id, dev->devno);
166955a8e2c8STejun Heo 			return -ENOENT;
167055a8e2c8STejun Heo 		}
167155a8e2c8STejun Heo 
167254936f8bSTejun Heo 		/* Device or controller might have reported the wrong
167354936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
167454936f8bSTejun Heo 		 * the current one is aborted by the device.
167554936f8bSTejun Heo 		 */
167654936f8bSTejun Heo 		if (may_fallback &&
167754936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
167854936f8bSTejun Heo 			may_fallback = 0;
167954936f8bSTejun Heo 
168054936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
168154936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
168254936f8bSTejun Heo 			else
168354936f8bSTejun Heo 				class = ATA_DEV_ATA;
168454936f8bSTejun Heo 			goto retry;
168554936f8bSTejun Heo 		}
168654936f8bSTejun Heo 
1687c6fd2807SJeff Garzik 		rc = -EIO;
1688c6fd2807SJeff Garzik 		reason = "I/O error";
1689c6fd2807SJeff Garzik 		goto err_out;
1690c6fd2807SJeff Garzik 	}
1691c6fd2807SJeff Garzik 
169254936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
169354936f8bSTejun Heo 	 * successfully at least once.
169454936f8bSTejun Heo 	 */
169554936f8bSTejun Heo 	may_fallback = 0;
169654936f8bSTejun Heo 
1697c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1698c6fd2807SJeff Garzik 
1699c6fd2807SJeff Garzik 	/* sanity check */
1700c6fd2807SJeff Garzik 	rc = -EINVAL;
17016070068bSAlan Cox 	reason = "device reports invalid type";
17024a3381feSJeff Garzik 
17034a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17044a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17054a3381feSJeff Garzik 			goto err_out;
17064a3381feSJeff Garzik 	} else {
17074a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1708c6fd2807SJeff Garzik 			goto err_out;
1709c6fd2807SJeff Garzik 	}
1710c6fd2807SJeff Garzik 
1711169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1712169439c2SMark Lord 		tried_spinup = 1;
1713169439c2SMark Lord 		/*
1714169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1715169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1716169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1717169439c2SMark Lord 		 */
1718169439c2SMark Lord 		ata_tf_init(dev, &tf);
1719169439c2SMark Lord 		tf.command = ATA_CMD_SET_FEATURES;
1720169439c2SMark Lord 		tf.feature = SETFEATURES_SPINUP;
1721169439c2SMark Lord 		tf.protocol = ATA_PROT_NODATA;
1722169439c2SMark Lord 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1723169439c2SMark Lord 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1724169439c2SMark Lord 		if (err_mask) {
1725169439c2SMark Lord 			rc = -EIO;
1726169439c2SMark Lord 			reason = "SPINUP failed";
1727169439c2SMark Lord 			goto err_out;
1728169439c2SMark Lord 		}
1729169439c2SMark Lord 		/*
1730169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1731169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1732169439c2SMark Lord 		 */
1733169439c2SMark Lord 		if (id[2] == 0x37c8)
1734169439c2SMark Lord 			goto retry;
1735169439c2SMark Lord 	}
1736169439c2SMark Lord 
1737bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1738c6fd2807SJeff Garzik 		/*
1739c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1740c6fd2807SJeff Garzik 		 * SRST RESET
1741c6fd2807SJeff Garzik 		 * IDENTIFY
1742c6fd2807SJeff Garzik 		 * INITIALIZE DEVICE PARAMETERS
1743c6fd2807SJeff Garzik 		 * anything else..
1744c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
1745c6fd2807SJeff Garzik 		 */
1746c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1747c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1748c6fd2807SJeff Garzik 			if (err_mask) {
1749c6fd2807SJeff Garzik 				rc = -EIO;
1750c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1751c6fd2807SJeff Garzik 				goto err_out;
1752c6fd2807SJeff Garzik 			}
1753c6fd2807SJeff Garzik 
1754c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1755c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1756c6fd2807SJeff Garzik 			 */
1757bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1758c6fd2807SJeff Garzik 			goto retry;
1759c6fd2807SJeff Garzik 		}
1760c6fd2807SJeff Garzik 	}
1761c6fd2807SJeff Garzik 
1762c6fd2807SJeff Garzik 	*p_class = class;
1763c6fd2807SJeff Garzik 
1764c6fd2807SJeff Garzik 	return 0;
1765c6fd2807SJeff Garzik 
1766c6fd2807SJeff Garzik  err_out:
1767c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1768c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1769c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1770c6fd2807SJeff Garzik 	return rc;
1771c6fd2807SJeff Garzik }
1772c6fd2807SJeff Garzik 
1773c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1774c6fd2807SJeff Garzik {
1775c6fd2807SJeff Garzik 	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1776c6fd2807SJeff Garzik }
1777c6fd2807SJeff Garzik 
1778c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1779c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1780c6fd2807SJeff Garzik {
1781c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1782c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1783c6fd2807SJeff Garzik 
1784c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1785c6fd2807SJeff Garzik 		desc[0] = '\0';
1786c6fd2807SJeff Garzik 		return;
1787c6fd2807SJeff Garzik 	}
178875683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
17896919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
17906919a0a6SAlan Cox 		return;
17916919a0a6SAlan Cox 	}
1792c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1793cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1794c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1795c6fd2807SJeff Garzik 	}
1796c6fd2807SJeff Garzik 
1797c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1798c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1799c6fd2807SJeff Garzik 	else
1800c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1801c6fd2807SJeff Garzik }
1802c6fd2807SJeff Garzik 
1803c6fd2807SJeff Garzik /**
1804c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1805c6fd2807SJeff Garzik  *	@dev: Target device to configure
1806c6fd2807SJeff Garzik  *
1807c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1808c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1809c6fd2807SJeff Garzik  *
1810c6fd2807SJeff Garzik  *	LOCKING:
1811c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1812c6fd2807SJeff Garzik  *
1813c6fd2807SJeff Garzik  *	RETURNS:
1814c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1815c6fd2807SJeff Garzik  */
1816efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1817c6fd2807SJeff Garzik {
1818c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
18196746544cSTejun Heo 	struct ata_eh_context *ehc = &ap->eh_context;
18206746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1821c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1822c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1823b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
18243f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
18253f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1826c6fd2807SJeff Garzik 	int rc;
1827c6fd2807SJeff Garzik 
1828c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
182944877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
183044877b4eSTejun Heo 			       __FUNCTION__);
1831c6fd2807SJeff Garzik 		return 0;
1832c6fd2807SJeff Garzik 	}
1833c6fd2807SJeff Garzik 
1834c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
183544877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1836c6fd2807SJeff Garzik 
183775683fe7STejun Heo 	/* set horkage */
183875683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
183975683fe7STejun Heo 
18406746544cSTejun Heo 	/* let ACPI work its magic */
18416746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
18426746544cSTejun Heo 	if (rc)
18436746544cSTejun Heo 		return rc;
184408573a86SKristen Carlson Accardi 
1845c6fd2807SJeff Garzik 	/* print device capabilities */
1846c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1847c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1848c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1849c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1850c6fd2807SJeff Garzik 			       __FUNCTION__,
1851c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1852c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1853c6fd2807SJeff Garzik 
1854c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1855c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1856c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1857c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1858c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1859c6fd2807SJeff Garzik 	dev->cylinders = 0;
1860c6fd2807SJeff Garzik 	dev->heads = 0;
1861c6fd2807SJeff Garzik 	dev->sectors = 0;
1862c6fd2807SJeff Garzik 
1863c6fd2807SJeff Garzik 	/*
1864c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1865c6fd2807SJeff Garzik 	 */
1866c6fd2807SJeff Garzik 
1867c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1868c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1869c6fd2807SJeff Garzik 
1870c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1871c6fd2807SJeff Garzik 		ata_dump_id(id);
1872c6fd2807SJeff Garzik 
1873ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1874ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1875ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1876ef143d57SAlbert Lee 
1877ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1878ef143d57SAlbert Lee 			sizeof(modelbuf));
1879ef143d57SAlbert Lee 
1880c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1881c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1882b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1883b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
188444877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
188544877b4eSTejun Heo 					       "supports DRM functions and may "
188644877b4eSTejun Heo 					       "not be fully accessable.\n");
1887b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
1888b352e57dSAlan Cox 		}
1889b352e57dSAlan Cox 		else
1890b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1891b352e57dSAlan Cox 
1892c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1893c6fd2807SJeff Garzik 
18943f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
18953f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
18963f64f565SEric D. Mudama 
1897c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1898c6fd2807SJeff Garzik 			const char *lba_desc;
1899c6fd2807SJeff Garzik 			char ncq_desc[20];
1900c6fd2807SJeff Garzik 
1901c6fd2807SJeff Garzik 			lba_desc = "LBA";
1902c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1903c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1904c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1905c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19066fc49adbSTejun Heo 
19076fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19086fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19096fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1910c6fd2807SJeff Garzik 			}
1911c6fd2807SJeff Garzik 
19121e999736SAlan Cox 			if (ata_id_hpa_enabled(dev->id))
19131e999736SAlan Cox 				dev->n_sectors = ata_hpa_resize(dev);
19141e999736SAlan Cox 
1915c6fd2807SJeff Garzik 			/* config NCQ */
1916c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1917c6fd2807SJeff Garzik 
1918c6fd2807SJeff Garzik 			/* print device info to dmesg */
19193f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
19203f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19213f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19223f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
19233f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19243f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19253f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
1926c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
19273f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
19283f64f565SEric D. Mudama 			}
1929c6fd2807SJeff Garzik 		} else {
1930c6fd2807SJeff Garzik 			/* CHS */
1931c6fd2807SJeff Garzik 
1932c6fd2807SJeff Garzik 			/* Default translation */
1933c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
1934c6fd2807SJeff Garzik 			dev->heads	= id[3];
1935c6fd2807SJeff Garzik 			dev->sectors	= id[6];
1936c6fd2807SJeff Garzik 
1937c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
1938c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
1939c6fd2807SJeff Garzik 				dev->cylinders = id[54];
1940c6fd2807SJeff Garzik 				dev->heads     = id[55];
1941c6fd2807SJeff Garzik 				dev->sectors   = id[56];
1942c6fd2807SJeff Garzik 			}
1943c6fd2807SJeff Garzik 
1944c6fd2807SJeff Garzik 			/* print device info to dmesg */
19453f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
1946c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
19473f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19483f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
19493f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19503f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19513f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
19523f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
19533f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
19543f64f565SEric D. Mudama 					dev->heads, dev->sectors);
19553f64f565SEric D. Mudama 			}
1956c6fd2807SJeff Garzik 		}
1957c6fd2807SJeff Garzik 
1958c6fd2807SJeff Garzik 		dev->cdb_len = 16;
1959c6fd2807SJeff Garzik 	}
1960c6fd2807SJeff Garzik 
1961c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
1962c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
1963c6fd2807SJeff Garzik 		char *cdb_intr_string = "";
1964c6fd2807SJeff Garzik 
1965c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
1966c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1967c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1968c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1969c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
1970c6fd2807SJeff Garzik 			rc = -EINVAL;
1971c6fd2807SJeff Garzik 			goto err_out_nosup;
1972c6fd2807SJeff Garzik 		}
1973c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
1974c6fd2807SJeff Garzik 
1975c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
1976c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
1977c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
1978c6fd2807SJeff Garzik 		}
1979c6fd2807SJeff Garzik 
1980c6fd2807SJeff Garzik 		/* print device info to dmesg */
1981c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
1982ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
1983ef143d57SAlbert Lee 				       "ATAPI: %s, %s, max %s%s\n",
1984ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
1985c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
1986c6fd2807SJeff Garzik 				       cdb_intr_string);
1987c6fd2807SJeff Garzik 	}
1988c6fd2807SJeff Garzik 
1989914ed354STejun Heo 	/* determine max_sectors */
1990914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
1991914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
1992914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1993914ed354STejun Heo 
199493590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
199593590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
199693590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
199793590859SAlan Cox 		   idiot */
199893590859SAlan Cox                 if (print_info) {
199993590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
200093590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
200193590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
200293590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
200393590859SAlan Cox 		}
200493590859SAlan Cox 	}
200593590859SAlan Cox 
2006c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2007c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2008c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2009c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2010c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2011c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2012c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2013c6fd2807SJeff Garzik 	}
2014c6fd2807SJeff Garzik 
201575683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
201603ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
201703ec52deSTejun Heo 					 dev->max_sectors);
201818d6e9d5SAlbert Lee 
2019c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2020cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2023c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2024c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2025c6fd2807SJeff Garzik 	return 0;
2026c6fd2807SJeff Garzik 
2027c6fd2807SJeff Garzik err_out_nosup:
2028c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2029c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2030c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2031c6fd2807SJeff Garzik 	return rc;
2032c6fd2807SJeff Garzik }
2033c6fd2807SJeff Garzik 
2034c6fd2807SJeff Garzik /**
20352e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2036be0d18dfSAlan Cox  *	@ap: port
2037be0d18dfSAlan Cox  *
20382e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2039be0d18dfSAlan Cox  *	detection.
2040be0d18dfSAlan Cox  */
2041be0d18dfSAlan Cox 
2042be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2043be0d18dfSAlan Cox {
2044be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2045be0d18dfSAlan Cox }
2046be0d18dfSAlan Cox 
2047be0d18dfSAlan Cox /**
20482e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2049be0d18dfSAlan Cox  *	@ap: port
2050be0d18dfSAlan Cox  *
20512e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2052be0d18dfSAlan Cox  *	detection.
2053be0d18dfSAlan Cox  */
2054be0d18dfSAlan Cox 
2055be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2056be0d18dfSAlan Cox {
2057be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2058be0d18dfSAlan Cox }
2059be0d18dfSAlan Cox 
2060be0d18dfSAlan Cox /**
2061be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2062be0d18dfSAlan Cox  *	@ap: port
2063be0d18dfSAlan Cox  *
2064be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2065be0d18dfSAlan Cox  */
2066be0d18dfSAlan Cox 
2067be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2068be0d18dfSAlan Cox {
2069be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2070be0d18dfSAlan Cox }
2071be0d18dfSAlan Cox 
2072be0d18dfSAlan Cox /**
2073be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2074be0d18dfSAlan Cox  *	@ap: port
2075be0d18dfSAlan Cox  *
2076be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2077be0d18dfSAlan Cox  */
2078be0d18dfSAlan Cox 
2079be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2080be0d18dfSAlan Cox {
2081be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2082be0d18dfSAlan Cox }
2083be0d18dfSAlan Cox 
2084be0d18dfSAlan Cox /**
2085c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2086c6fd2807SJeff Garzik  *	@ap: Bus to probe
2087c6fd2807SJeff Garzik  *
2088c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2089c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2090c6fd2807SJeff Garzik  *	the bus.
2091c6fd2807SJeff Garzik  *
2092c6fd2807SJeff Garzik  *	LOCKING:
2093c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2094c6fd2807SJeff Garzik  *
2095c6fd2807SJeff Garzik  *	RETURNS:
2096c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2097c6fd2807SJeff Garzik  */
2098c6fd2807SJeff Garzik 
2099c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2100c6fd2807SJeff Garzik {
2101c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2102c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
21034ae72a1eSTejun Heo 	int i, rc;
2104c6fd2807SJeff Garzik 	struct ata_device *dev;
2105c6fd2807SJeff Garzik 
2106c6fd2807SJeff Garzik 	ata_port_probe(ap);
2107c6fd2807SJeff Garzik 
2108c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2109c6fd2807SJeff Garzik 		tries[i] = ATA_PROBE_MAX_TRIES;
2110c6fd2807SJeff Garzik 
2111c6fd2807SJeff Garzik  retry:
2112c6fd2807SJeff Garzik 	/* reset and determine device classes */
2113c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2114c6fd2807SJeff Garzik 
2115c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2116c6fd2807SJeff Garzik 		dev = &ap->device[i];
2117c6fd2807SJeff Garzik 
2118c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2119c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2120c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2121c6fd2807SJeff Garzik 		else
2122c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2123c6fd2807SJeff Garzik 
2124c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2125c6fd2807SJeff Garzik 	}
2126c6fd2807SJeff Garzik 
2127c6fd2807SJeff Garzik 	ata_port_probe(ap);
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2130c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2133c6fd2807SJeff Garzik 		ap->device[i].pio_mode = XFER_PIO_0;
2134c6fd2807SJeff Garzik 
2135f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2136f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2137f31f0cc2SJeff Garzik 	   the slave device */
2138f31f0cc2SJeff Garzik 
2139f31f0cc2SJeff Garzik 	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2140c6fd2807SJeff Garzik 		dev = &ap->device[i];
2141c6fd2807SJeff Garzik 
2142c6fd2807SJeff Garzik 		if (tries[i])
2143c6fd2807SJeff Garzik 			dev->class = classes[i];
2144c6fd2807SJeff Garzik 
2145c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2146c6fd2807SJeff Garzik 			continue;
2147c6fd2807SJeff Garzik 
2148bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2149bff04647STejun Heo 				     dev->id);
2150c6fd2807SJeff Garzik 		if (rc)
2151c6fd2807SJeff Garzik 			goto fail;
2152f31f0cc2SJeff Garzik 	}
2153f31f0cc2SJeff Garzik 
2154be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2155be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2156be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2157be0d18dfSAlan Cox 
2158f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2159f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2160f31f0cc2SJeff Garzik 
2161f31f0cc2SJeff Garzik 	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2162f31f0cc2SJeff Garzik 		dev = &ap->device[i];
2163f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2164f31f0cc2SJeff Garzik 			continue;
2165c6fd2807SJeff Garzik 
2166efdaedc4STejun Heo 		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2167efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
2168efdaedc4STejun Heo 		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2169c6fd2807SJeff Garzik 		if (rc)
2170c6fd2807SJeff Garzik 			goto fail;
2171c6fd2807SJeff Garzik 	}
2172c6fd2807SJeff Garzik 
2173c6fd2807SJeff Garzik 	/* configure transfer mode */
2174c6fd2807SJeff Garzik 	rc = ata_set_mode(ap, &dev);
21754ae72a1eSTejun Heo 	if (rc)
2176c6fd2807SJeff Garzik 		goto fail;
2177c6fd2807SJeff Garzik 
2178c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2179c6fd2807SJeff Garzik 		if (ata_dev_enabled(&ap->device[i]))
2180c6fd2807SJeff Garzik 			return 0;
2181c6fd2807SJeff Garzik 
2182c6fd2807SJeff Garzik 	/* no device present, disable port */
2183c6fd2807SJeff Garzik 	ata_port_disable(ap);
2184c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
2185c6fd2807SJeff Garzik 	return -ENODEV;
2186c6fd2807SJeff Garzik 
2187c6fd2807SJeff Garzik  fail:
21884ae72a1eSTejun Heo 	tries[dev->devno]--;
21894ae72a1eSTejun Heo 
2190c6fd2807SJeff Garzik 	switch (rc) {
2191c6fd2807SJeff Garzik 	case -EINVAL:
21924ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2193c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2194c6fd2807SJeff Garzik 		break;
21954ae72a1eSTejun Heo 
21964ae72a1eSTejun Heo 	case -ENODEV:
21974ae72a1eSTejun Heo 		/* give it just one more chance */
21984ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2199c6fd2807SJeff Garzik 	case -EIO:
22004ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
22014ae72a1eSTejun Heo 			/* This is the last chance, better to slow
22024ae72a1eSTejun Heo 			 * down than lose it.
22034ae72a1eSTejun Heo 			 */
2204c6fd2807SJeff Garzik 			sata_down_spd_limit(ap);
22054ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
22064ae72a1eSTejun Heo 		}
2207c6fd2807SJeff Garzik 	}
2208c6fd2807SJeff Garzik 
22094ae72a1eSTejun Heo 	if (!tries[dev->devno])
2210c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2211c6fd2807SJeff Garzik 
2212c6fd2807SJeff Garzik 	goto retry;
2213c6fd2807SJeff Garzik }
2214c6fd2807SJeff Garzik 
2215c6fd2807SJeff Garzik /**
2216c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2217c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2218c6fd2807SJeff Garzik  *
2219c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2220c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2221c6fd2807SJeff Garzik  *
2222cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2223c6fd2807SJeff Garzik  *	serialization.
2224c6fd2807SJeff Garzik  */
2225c6fd2807SJeff Garzik 
2226c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2227c6fd2807SJeff Garzik {
2228c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2229c6fd2807SJeff Garzik }
2230c6fd2807SJeff Garzik 
2231c6fd2807SJeff Garzik /**
2232c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2233c6fd2807SJeff Garzik  *	@ap: SATA port to printk link status about
2234c6fd2807SJeff Garzik  *
2235c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2236c6fd2807SJeff Garzik  *
2237c6fd2807SJeff Garzik  *	LOCKING:
2238c6fd2807SJeff Garzik  *	None.
2239c6fd2807SJeff Garzik  */
224043727fbcSJeff Garzik void sata_print_link_status(struct ata_port *ap)
2241c6fd2807SJeff Garzik {
2242c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2243c6fd2807SJeff Garzik 
2244c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2245c6fd2807SJeff Garzik 		return;
2246c6fd2807SJeff Garzik 	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2247c6fd2807SJeff Garzik 
2248c6fd2807SJeff Garzik 	if (ata_port_online(ap)) {
2249c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2250c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2251c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2252c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2253c6fd2807SJeff Garzik 	} else {
2254c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2255c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2256c6fd2807SJeff Garzik 				sstatus, scontrol);
2257c6fd2807SJeff Garzik 	}
2258c6fd2807SJeff Garzik }
2259c6fd2807SJeff Garzik 
2260c6fd2807SJeff Garzik /**
2261c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2262c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2263c6fd2807SJeff Garzik  *
2264c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2265c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2266c6fd2807SJeff Garzik  *	clear any reset condition.
2267c6fd2807SJeff Garzik  *
2268c6fd2807SJeff Garzik  *	LOCKING:
2269c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2270c6fd2807SJeff Garzik  *
2271c6fd2807SJeff Garzik  */
2272c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2273c6fd2807SJeff Garzik {
2274c6fd2807SJeff Garzik 	u32 sstatus;
2275c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2276c6fd2807SJeff Garzik 
2277c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2278c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2279c6fd2807SJeff Garzik 		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2280c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2281c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2282c6fd2807SJeff Garzik 		mdelay(1);
2283c6fd2807SJeff Garzik 	}
2284c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2285c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2286c6fd2807SJeff Garzik 
2287c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2288c6fd2807SJeff Garzik 	do {
2289c6fd2807SJeff Garzik 		msleep(200);
2290c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2291c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2292c6fd2807SJeff Garzik 			break;
2293c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2294c6fd2807SJeff Garzik 
2295c6fd2807SJeff Garzik 	/* print link status */
2296c6fd2807SJeff Garzik 	sata_print_link_status(ap);
2297c6fd2807SJeff Garzik 
2298c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2299c6fd2807SJeff Garzik 	if (!ata_port_offline(ap))
2300c6fd2807SJeff Garzik 		ata_port_probe(ap);
2301c6fd2807SJeff Garzik 	else
2302c6fd2807SJeff Garzik 		ata_port_disable(ap);
2303c6fd2807SJeff Garzik 
2304c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2305c6fd2807SJeff Garzik 		return;
2306c6fd2807SJeff Garzik 
2307c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2308c6fd2807SJeff Garzik 		ata_port_disable(ap);
2309c6fd2807SJeff Garzik 		return;
2310c6fd2807SJeff Garzik 	}
2311c6fd2807SJeff Garzik 
2312c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2313c6fd2807SJeff Garzik }
2314c6fd2807SJeff Garzik 
2315c6fd2807SJeff Garzik /**
2316c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2317c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2318c6fd2807SJeff Garzik  *
2319c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2320c6fd2807SJeff Garzik  *	the bus for devices.
2321c6fd2807SJeff Garzik  *
2322c6fd2807SJeff Garzik  *	LOCKING:
2323c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2324c6fd2807SJeff Garzik  *
2325c6fd2807SJeff Garzik  */
2326c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2327c6fd2807SJeff Garzik {
2328c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2329c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2330c6fd2807SJeff Garzik 		return;
2331c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2332c6fd2807SJeff Garzik }
2333c6fd2807SJeff Garzik 
2334c6fd2807SJeff Garzik /**
2335c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2336c6fd2807SJeff Garzik  *	@adev: device
2337c6fd2807SJeff Garzik  *
2338c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2339c6fd2807SJeff Garzik  *	present NULL is returned
2340c6fd2807SJeff Garzik  */
2341c6fd2807SJeff Garzik 
2342c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2343c6fd2807SJeff Garzik {
2344c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
2345c6fd2807SJeff Garzik 	struct ata_device *pair = &ap->device[1 - adev->devno];
2346c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2347c6fd2807SJeff Garzik 		return NULL;
2348c6fd2807SJeff Garzik 	return pair;
2349c6fd2807SJeff Garzik }
2350c6fd2807SJeff Garzik 
2351c6fd2807SJeff Garzik /**
2352c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2353c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2354c6fd2807SJeff Garzik  *
2355c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2356c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2357c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2358c6fd2807SJeff Garzik  *	on this port.
2359c6fd2807SJeff Garzik  *
2360cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2361c6fd2807SJeff Garzik  *	serialization.
2362c6fd2807SJeff Garzik  */
2363c6fd2807SJeff Garzik 
2364c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2365c6fd2807SJeff Garzik {
2366c6fd2807SJeff Garzik 	ap->device[0].class = ATA_DEV_NONE;
2367c6fd2807SJeff Garzik 	ap->device[1].class = ATA_DEV_NONE;
2368c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2369c6fd2807SJeff Garzik }
2370c6fd2807SJeff Garzik 
2371c6fd2807SJeff Garzik /**
2372c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2373c6fd2807SJeff Garzik  *	@ap: Port to adjust SATA spd limit for
2374c6fd2807SJeff Garzik  *
2375c6fd2807SJeff Garzik  *	Adjust SATA spd limit of @ap downward.  Note that this
2376c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2377c6fd2807SJeff Garzik  *	using sata_set_spd().
2378c6fd2807SJeff Garzik  *
2379c6fd2807SJeff Garzik  *	LOCKING:
2380c6fd2807SJeff Garzik  *	Inherited from caller.
2381c6fd2807SJeff Garzik  *
2382c6fd2807SJeff Garzik  *	RETURNS:
2383c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2384c6fd2807SJeff Garzik  */
2385c6fd2807SJeff Garzik int sata_down_spd_limit(struct ata_port *ap)
2386c6fd2807SJeff Garzik {
2387c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2388c6fd2807SJeff Garzik 	int rc, highbit;
2389c6fd2807SJeff Garzik 
2390c6fd2807SJeff Garzik 	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2391c6fd2807SJeff Garzik 	if (rc)
2392c6fd2807SJeff Garzik 		return rc;
2393c6fd2807SJeff Garzik 
2394c6fd2807SJeff Garzik 	mask = ap->sata_spd_limit;
2395c6fd2807SJeff Garzik 	if (mask <= 1)
2396c6fd2807SJeff Garzik 		return -EINVAL;
2397c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2398c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2399c6fd2807SJeff Garzik 
2400c6fd2807SJeff Garzik 	spd = (sstatus >> 4) & 0xf;
2401c6fd2807SJeff Garzik 	if (spd <= 1)
2402c6fd2807SJeff Garzik 		return -EINVAL;
2403c6fd2807SJeff Garzik 	spd--;
2404c6fd2807SJeff Garzik 	mask &= (1 << spd) - 1;
2405c6fd2807SJeff Garzik 	if (!mask)
2406c6fd2807SJeff Garzik 		return -EINVAL;
2407c6fd2807SJeff Garzik 
2408c6fd2807SJeff Garzik 	ap->sata_spd_limit = mask;
2409c6fd2807SJeff Garzik 
2410c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2411c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2412c6fd2807SJeff Garzik 
2413c6fd2807SJeff Garzik 	return 0;
2414c6fd2807SJeff Garzik }
2415c6fd2807SJeff Garzik 
2416c6fd2807SJeff Garzik static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2417c6fd2807SJeff Garzik {
2418c6fd2807SJeff Garzik 	u32 spd, limit;
2419c6fd2807SJeff Garzik 
2420c6fd2807SJeff Garzik 	if (ap->sata_spd_limit == UINT_MAX)
2421c6fd2807SJeff Garzik 		limit = 0;
2422c6fd2807SJeff Garzik 	else
2423c6fd2807SJeff Garzik 		limit = fls(ap->sata_spd_limit);
2424c6fd2807SJeff Garzik 
2425c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2426c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2427c6fd2807SJeff Garzik 
2428c6fd2807SJeff Garzik 	return spd != limit;
2429c6fd2807SJeff Garzik }
2430c6fd2807SJeff Garzik 
2431c6fd2807SJeff Garzik /**
2432c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2433c6fd2807SJeff Garzik  *	@ap: Port in question
2434c6fd2807SJeff Garzik  *
2435c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2436c6fd2807SJeff Garzik  *	@ap->sata_spd_limit.  This function is used to determine
2437c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2438c6fd2807SJeff Garzik  *	configuration.
2439c6fd2807SJeff Garzik  *
2440c6fd2807SJeff Garzik  *	LOCKING:
2441c6fd2807SJeff Garzik  *	Inherited from caller.
2442c6fd2807SJeff Garzik  *
2443c6fd2807SJeff Garzik  *	RETURNS:
2444c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2445c6fd2807SJeff Garzik  */
2446c6fd2807SJeff Garzik int sata_set_spd_needed(struct ata_port *ap)
2447c6fd2807SJeff Garzik {
2448c6fd2807SJeff Garzik 	u32 scontrol;
2449c6fd2807SJeff Garzik 
2450c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2451c6fd2807SJeff Garzik 		return 0;
2452c6fd2807SJeff Garzik 
2453c6fd2807SJeff Garzik 	return __sata_set_spd_needed(ap, &scontrol);
2454c6fd2807SJeff Garzik }
2455c6fd2807SJeff Garzik 
2456c6fd2807SJeff Garzik /**
2457c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2458c6fd2807SJeff Garzik  *	@ap: Port to set SATA spd for
2459c6fd2807SJeff Garzik  *
2460c6fd2807SJeff Garzik  *	Set SATA spd of @ap according to sata_spd_limit.
2461c6fd2807SJeff Garzik  *
2462c6fd2807SJeff Garzik  *	LOCKING:
2463c6fd2807SJeff Garzik  *	Inherited from caller.
2464c6fd2807SJeff Garzik  *
2465c6fd2807SJeff Garzik  *	RETURNS:
2466c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2467c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2468c6fd2807SJeff Garzik  */
2469c6fd2807SJeff Garzik int sata_set_spd(struct ata_port *ap)
2470c6fd2807SJeff Garzik {
2471c6fd2807SJeff Garzik 	u32 scontrol;
2472c6fd2807SJeff Garzik 	int rc;
2473c6fd2807SJeff Garzik 
2474c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2475c6fd2807SJeff Garzik 		return rc;
2476c6fd2807SJeff Garzik 
2477c6fd2807SJeff Garzik 	if (!__sata_set_spd_needed(ap, &scontrol))
2478c6fd2807SJeff Garzik 		return 0;
2479c6fd2807SJeff Garzik 
2480c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2481c6fd2807SJeff Garzik 		return rc;
2482c6fd2807SJeff Garzik 
2483c6fd2807SJeff Garzik 	return 1;
2484c6fd2807SJeff Garzik }
2485c6fd2807SJeff Garzik 
2486c6fd2807SJeff Garzik /*
2487c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2488c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2489c6fd2807SJeff Garzik  */
2490c6fd2807SJeff Garzik /*
2491b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2492c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2493b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2494b352e57dSAlan Cox  *
2495b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2496c6fd2807SJeff Garzik  */
2497c6fd2807SJeff Garzik 
2498c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2499c6fd2807SJeff Garzik 
2500c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2501c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2502c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2503c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2504c6fd2807SJeff Garzik 
2505b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2506b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2507c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2508c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2509c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2510c6fd2807SJeff Garzik 
2511c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2512c6fd2807SJeff Garzik 
2513c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2514c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2515c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2516c6fd2807SJeff Garzik 
2517c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2518c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2519c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2520c6fd2807SJeff Garzik 
2521b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2522b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2523c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2524c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2525c6fd2807SJeff Garzik 
2526c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2527c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2528c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2529c6fd2807SJeff Garzik 
2530c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2531c6fd2807SJeff Garzik 
2532c6fd2807SJeff Garzik 	{ 0xFF }
2533c6fd2807SJeff Garzik };
2534c6fd2807SJeff Garzik 
2535c6fd2807SJeff Garzik #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2536c6fd2807SJeff Garzik #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2537c6fd2807SJeff Garzik 
2538c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2539c6fd2807SJeff Garzik {
2540c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2541c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2542c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2543c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2544c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2545c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2546c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2547c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2548c6fd2807SJeff Garzik }
2549c6fd2807SJeff Garzik 
2550c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2551c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2552c6fd2807SJeff Garzik {
2553c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2554c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2555c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2556c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2557c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2558c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2559c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2560c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2561c6fd2807SJeff Garzik }
2562c6fd2807SJeff Garzik 
2563c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2564c6fd2807SJeff Garzik {
2565c6fd2807SJeff Garzik 	const struct ata_timing *t;
2566c6fd2807SJeff Garzik 
2567c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2568c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2569c6fd2807SJeff Garzik 			return NULL;
2570c6fd2807SJeff Garzik 	return t;
2571c6fd2807SJeff Garzik }
2572c6fd2807SJeff Garzik 
2573c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2574c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2575c6fd2807SJeff Garzik {
2576c6fd2807SJeff Garzik 	const struct ata_timing *s;
2577c6fd2807SJeff Garzik 	struct ata_timing p;
2578c6fd2807SJeff Garzik 
2579c6fd2807SJeff Garzik 	/*
2580c6fd2807SJeff Garzik 	 * Find the mode.
2581c6fd2807SJeff Garzik 	 */
2582c6fd2807SJeff Garzik 
2583c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2584c6fd2807SJeff Garzik 		return -EINVAL;
2585c6fd2807SJeff Garzik 
2586c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2587c6fd2807SJeff Garzik 
2588c6fd2807SJeff Garzik 	/*
2589c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2590c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2591c6fd2807SJeff Garzik 	 */
2592c6fd2807SJeff Garzik 
2593c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2594c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2595c6fd2807SJeff Garzik 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2596c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2597c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2598c6fd2807SJeff Garzik 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2599c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2600c6fd2807SJeff Garzik 		}
2601c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2602c6fd2807SJeff Garzik 	}
2603c6fd2807SJeff Garzik 
2604c6fd2807SJeff Garzik 	/*
2605c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2606c6fd2807SJeff Garzik 	 */
2607c6fd2807SJeff Garzik 
2608c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2609c6fd2807SJeff Garzik 
2610c6fd2807SJeff Garzik 	/*
2611c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2612c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2613c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2614c6fd2807SJeff Garzik 	 */
2615c6fd2807SJeff Garzik 
2616fd3367afSAlan 	if (speed > XFER_PIO_6) {
2617c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2618c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2619c6fd2807SJeff Garzik 	}
2620c6fd2807SJeff Garzik 
2621c6fd2807SJeff Garzik 	/*
2622c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2623c6fd2807SJeff Garzik 	 */
2624c6fd2807SJeff Garzik 
2625c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2626c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2627c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2628c6fd2807SJeff Garzik 	}
2629c6fd2807SJeff Garzik 
2630c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2631c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2632c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2633c6fd2807SJeff Garzik 	}
26344f701d1eSAlan Cox 
26354f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
26364f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
26374f701d1eSAlan Cox 	   if so we must correct this */
26384f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
26394f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2640c6fd2807SJeff Garzik 
2641c6fd2807SJeff Garzik 	return 0;
2642c6fd2807SJeff Garzik }
2643c6fd2807SJeff Garzik 
2644c6fd2807SJeff Garzik /**
2645c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2646c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2647458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2648c6fd2807SJeff Garzik  *
2649c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2650c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2651c6fd2807SJeff Garzik  *	will apply the limit.
2652c6fd2807SJeff Garzik  *
2653c6fd2807SJeff Garzik  *	LOCKING:
2654c6fd2807SJeff Garzik  *	Inherited from caller.
2655c6fd2807SJeff Garzik  *
2656c6fd2807SJeff Garzik  *	RETURNS:
2657c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2658c6fd2807SJeff Garzik  */
2659458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2660c6fd2807SJeff Garzik {
2661458337dbSTejun Heo 	char buf[32];
2662458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2663458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2664458337dbSTejun Heo 	int quiet, highbit;
2665c6fd2807SJeff Garzik 
2666458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2667458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2668458337dbSTejun Heo 
2669458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2670458337dbSTejun Heo 						  dev->mwdma_mask,
2671c6fd2807SJeff Garzik 						  dev->udma_mask);
2672458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2673c6fd2807SJeff Garzik 
2674458337dbSTejun Heo 	switch (sel) {
2675458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2676458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2677458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2678458337dbSTejun Heo 		break;
2679458337dbSTejun Heo 
2680458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2681458337dbSTejun Heo 		if (udma_mask) {
2682458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2683458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2684458337dbSTejun Heo 			if (!udma_mask)
2685458337dbSTejun Heo 				return -ENOENT;
2686458337dbSTejun Heo 		} else if (mwdma_mask) {
2687458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2688458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2689458337dbSTejun Heo 			if (!mwdma_mask)
2690458337dbSTejun Heo 				return -ENOENT;
2691458337dbSTejun Heo 		}
2692458337dbSTejun Heo 		break;
2693458337dbSTejun Heo 
2694458337dbSTejun Heo 	case ATA_DNXFER_40C:
2695458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2696458337dbSTejun Heo 		break;
2697458337dbSTejun Heo 
2698458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2699458337dbSTejun Heo 		pio_mask &= 1;
2700458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2701458337dbSTejun Heo 		mwdma_mask = 0;
2702458337dbSTejun Heo 		udma_mask = 0;
2703458337dbSTejun Heo 		break;
2704458337dbSTejun Heo 
2705458337dbSTejun Heo 	default:
2706458337dbSTejun Heo 		BUG();
2707458337dbSTejun Heo 	}
2708458337dbSTejun Heo 
2709458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2710458337dbSTejun Heo 
2711458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2712458337dbSTejun Heo 		return -ENOENT;
2713458337dbSTejun Heo 
2714458337dbSTejun Heo 	if (!quiet) {
2715458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2716458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2717458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2718458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2719458337dbSTejun Heo 		else
2720458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2721458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2722458337dbSTejun Heo 
2723458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2724458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2725458337dbSTejun Heo 	}
2726c6fd2807SJeff Garzik 
2727c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2728c6fd2807SJeff Garzik 			    &dev->udma_mask);
2729c6fd2807SJeff Garzik 
2730c6fd2807SJeff Garzik 	return 0;
2731c6fd2807SJeff Garzik }
2732c6fd2807SJeff Garzik 
2733c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2734c6fd2807SJeff Garzik {
2735baa1e78aSTejun Heo 	struct ata_eh_context *ehc = &dev->ap->eh_context;
2736c6fd2807SJeff Garzik 	unsigned int err_mask;
2737c6fd2807SJeff Garzik 	int rc;
2738c6fd2807SJeff Garzik 
2739c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2740c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2741c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2742c6fd2807SJeff Garzik 
2743c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
274411750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
274511750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
274611750a40SAlan         	err_mask &= ~AC_ERR_DEV;
274711750a40SAlan 
2748c6fd2807SJeff Garzik 	if (err_mask) {
2749c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2750c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2751c6fd2807SJeff Garzik 		return -EIO;
2752c6fd2807SJeff Garzik 	}
2753c6fd2807SJeff Garzik 
2754baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2755c6fd2807SJeff Garzik 	rc = ata_dev_revalidate(dev, 0);
2756baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2757c6fd2807SJeff Garzik 	if (rc)
2758c6fd2807SJeff Garzik 		return rc;
2759c6fd2807SJeff Garzik 
2760c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2761c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2762c6fd2807SJeff Garzik 
2763c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2764c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2765c6fd2807SJeff Garzik 	return 0;
2766c6fd2807SJeff Garzik }
2767c6fd2807SJeff Garzik 
2768c6fd2807SJeff Garzik /**
276904351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2770c6fd2807SJeff Garzik  *	@ap: port on which timings will be programmed
2771c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2772c6fd2807SJeff Garzik  *
277304351821SAlan  *	Standard implementation of the function used to tune and set
277404351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
277504351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2776c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2777c6fd2807SJeff Garzik  *
2778c6fd2807SJeff Garzik  *	LOCKING:
2779c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2780c6fd2807SJeff Garzik  *
2781c6fd2807SJeff Garzik  *	RETURNS:
2782c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2783c6fd2807SJeff Garzik  */
278404351821SAlan 
278504351821SAlan int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2786c6fd2807SJeff Garzik {
2787c6fd2807SJeff Garzik 	struct ata_device *dev;
2788c6fd2807SJeff Garzik 	int i, rc = 0, used_dma = 0, found = 0;
2789c6fd2807SJeff Garzik 
2790c6fd2807SJeff Garzik 
2791c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2792c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2793c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2794c6fd2807SJeff Garzik 
2795c6fd2807SJeff Garzik 		dev = &ap->device[i];
2796c6fd2807SJeff Garzik 
2797c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2798c6fd2807SJeff Garzik 			continue;
2799c6fd2807SJeff Garzik 
2800c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2801c6fd2807SJeff Garzik 
2802c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2803c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2804c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2805c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2806c6fd2807SJeff Garzik 
2807c6fd2807SJeff Garzik 		found = 1;
2808c6fd2807SJeff Garzik 		if (dev->dma_mode)
2809c6fd2807SJeff Garzik 			used_dma = 1;
2810c6fd2807SJeff Garzik 	}
2811c6fd2807SJeff Garzik 	if (!found)
2812c6fd2807SJeff Garzik 		goto out;
2813c6fd2807SJeff Garzik 
2814c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2815c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2816c6fd2807SJeff Garzik 		dev = &ap->device[i];
2817c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2818c6fd2807SJeff Garzik 			continue;
2819c6fd2807SJeff Garzik 
2820c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2821c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2822c6fd2807SJeff Garzik 			rc = -EINVAL;
2823c6fd2807SJeff Garzik 			goto out;
2824c6fd2807SJeff Garzik 		}
2825c6fd2807SJeff Garzik 
2826c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2827c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2828c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2829c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2830c6fd2807SJeff Garzik 	}
2831c6fd2807SJeff Garzik 
2832c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2833c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2834c6fd2807SJeff Garzik 		dev = &ap->device[i];
2835c6fd2807SJeff Garzik 
2836c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2837c6fd2807SJeff Garzik 			continue;
2838c6fd2807SJeff Garzik 
2839c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2840c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2841c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2842c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2843c6fd2807SJeff Garzik 	}
2844c6fd2807SJeff Garzik 
2845c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2846c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2847c6fd2807SJeff Garzik 		dev = &ap->device[i];
2848c6fd2807SJeff Garzik 
284918d90debSAlan 		/* don't update suspended devices' xfer mode */
28509666f400STejun Heo 		if (!ata_dev_enabled(dev))
2851c6fd2807SJeff Garzik 			continue;
2852c6fd2807SJeff Garzik 
2853c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
2854c6fd2807SJeff Garzik 		if (rc)
2855c6fd2807SJeff Garzik 			goto out;
2856c6fd2807SJeff Garzik 	}
2857c6fd2807SJeff Garzik 
2858c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
2859c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
2860c6fd2807SJeff Garzik 	 */
2861cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2862032af1ceSAlan 		ap->host->simplex_claimed = ap;
2863c6fd2807SJeff Garzik 
2864c6fd2807SJeff Garzik  out:
2865c6fd2807SJeff Garzik 	if (rc)
2866c6fd2807SJeff Garzik 		*r_failed_dev = dev;
2867c6fd2807SJeff Garzik 	return rc;
2868c6fd2807SJeff Garzik }
2869c6fd2807SJeff Garzik 
2870c6fd2807SJeff Garzik /**
287104351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
287204351821SAlan  *	@ap: port on which timings will be programmed
287304351821SAlan  *	@r_failed_dev: out paramter for failed device
287404351821SAlan  *
287504351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
287604351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
287704351821SAlan  *	returned in @r_failed_dev.
287804351821SAlan  *
287904351821SAlan  *	LOCKING:
288004351821SAlan  *	PCI/etc. bus probe sem.
288104351821SAlan  *
288204351821SAlan  *	RETURNS:
288304351821SAlan  *	0 on success, negative errno otherwise
288404351821SAlan  */
288504351821SAlan int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
288604351821SAlan {
288704351821SAlan 	/* has private set_mode? */
288804351821SAlan 	if (ap->ops->set_mode)
288904351821SAlan 		return ap->ops->set_mode(ap, r_failed_dev);
289004351821SAlan 	return ata_do_set_mode(ap, r_failed_dev);
289104351821SAlan }
289204351821SAlan 
289304351821SAlan /**
2894c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
2895c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
2896c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
2897c6fd2807SJeff Garzik  *
2898c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
2899c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
2900c6fd2807SJeff Garzik  *	other threads.
2901c6fd2807SJeff Garzik  *
2902c6fd2807SJeff Garzik  *	LOCKING:
2903cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
2904c6fd2807SJeff Garzik  */
2905c6fd2807SJeff Garzik 
2906c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
2907c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
2908c6fd2807SJeff Garzik {
2909c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
2910c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
2911c6fd2807SJeff Garzik }
2912c6fd2807SJeff Garzik 
2913c6fd2807SJeff Garzik /**
2914c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
2915c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
2916c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
2917c6fd2807SJeff Garzik  *	@tmout: overall timeout
2918c6fd2807SJeff Garzik  *
2919c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
2920c6fd2807SJeff Garzik  *	or a timeout occurs.
2921c6fd2807SJeff Garzik  *
2922d1adc1bbSTejun Heo  *	LOCKING:
2923d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
2924d1adc1bbSTejun Heo  *
2925d1adc1bbSTejun Heo  *	RETURNS:
2926d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
2927c6fd2807SJeff Garzik  */
2928d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
2929c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
2930c6fd2807SJeff Garzik {
2931c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
2932c6fd2807SJeff Garzik 	u8 status;
2933c6fd2807SJeff Garzik 
2934c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
2935c6fd2807SJeff Garzik 	timer_start = jiffies;
2936c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
2937d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2938d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2939c6fd2807SJeff Garzik 		msleep(50);
2940c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
2941c6fd2807SJeff Garzik 	}
2942c6fd2807SJeff Garzik 
2943d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
2944c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
294535aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
294635aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
2947c6fd2807SJeff Garzik 
2948c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
2949d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2950d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2951c6fd2807SJeff Garzik 		msleep(50);
2952c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
2953c6fd2807SJeff Garzik 	}
2954c6fd2807SJeff Garzik 
2955d1adc1bbSTejun Heo 	if (status == 0xff)
2956d1adc1bbSTejun Heo 		return -ENODEV;
2957d1adc1bbSTejun Heo 
2958c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
2959c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
296035aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
296135aa7a43SJeff Garzik 				tmout / HZ, status);
2962d1adc1bbSTejun Heo 		return -EBUSY;
2963c6fd2807SJeff Garzik 	}
2964c6fd2807SJeff Garzik 
2965c6fd2807SJeff Garzik 	return 0;
2966c6fd2807SJeff Garzik }
2967c6fd2807SJeff Garzik 
2968d4b2bab4STejun Heo /**
2969d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
2970d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
2971d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
2972d4b2bab4STejun Heo  *
2973d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
2974d4b2bab4STejun Heo  *	occurs.
2975d4b2bab4STejun Heo  *
2976d4b2bab4STejun Heo  *	LOCKING:
2977d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
2978d4b2bab4STejun Heo  *
2979d4b2bab4STejun Heo  *	RETURNS:
2980d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
2981d4b2bab4STejun Heo  */
2982d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
2983d4b2bab4STejun Heo {
2984d4b2bab4STejun Heo 	unsigned long start = jiffies;
2985d4b2bab4STejun Heo 	int warned = 0;
2986d4b2bab4STejun Heo 
2987d4b2bab4STejun Heo 	while (1) {
2988d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
2989d4b2bab4STejun Heo 		unsigned long now = jiffies;
2990d4b2bab4STejun Heo 
2991d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
2992d4b2bab4STejun Heo 			return 0;
2993fd7fe701STejun Heo 		if (!ata_port_online(ap) && status == 0xff)
2994d4b2bab4STejun Heo 			return -ENODEV;
2995d4b2bab4STejun Heo 		if (time_after(now, deadline))
2996d4b2bab4STejun Heo 			return -EBUSY;
2997d4b2bab4STejun Heo 
2998d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
2999d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3000d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3001d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3002d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3003d4b2bab4STejun Heo 			warned = 1;
3004d4b2bab4STejun Heo 		}
3005d4b2bab4STejun Heo 
3006d4b2bab4STejun Heo 		msleep(50);
3007d4b2bab4STejun Heo 	}
3008d4b2bab4STejun Heo }
3009d4b2bab4STejun Heo 
3010d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3011d4b2bab4STejun Heo 			      unsigned long deadline)
3012c6fd2807SJeff Garzik {
3013c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3014c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3015c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
30169b89391cSTejun Heo 	int rc, ret = 0;
3017c6fd2807SJeff Garzik 
3018c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3019c6fd2807SJeff Garzik 	 * BSY bit to clear
3020c6fd2807SJeff Garzik 	 */
3021d4b2bab4STejun Heo 	if (dev0) {
3022d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30239b89391cSTejun Heo 		if (rc) {
30249b89391cSTejun Heo 			if (rc != -ENODEV)
3025d4b2bab4STejun Heo 				return rc;
30269b89391cSTejun Heo 			ret = rc;
30279b89391cSTejun Heo 		}
3028d4b2bab4STejun Heo 	}
3029c6fd2807SJeff Garzik 
3030e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3031e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3032c6fd2807SJeff Garzik 	 */
3033e141d999STejun Heo 	if (dev1) {
3034e141d999STejun Heo 		int i;
3035c6fd2807SJeff Garzik 
3036c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3037e141d999STejun Heo 
3038e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3039e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3040e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3041e141d999STejun Heo 		 */
3042e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3043e141d999STejun Heo 			u8 nsect, lbal;
3044e141d999STejun Heo 
30450d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
30460d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3047c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3048c6fd2807SJeff Garzik 				break;
3049c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3050c6fd2807SJeff Garzik 		}
3051e141d999STejun Heo 
3052d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30539b89391cSTejun Heo 		if (rc) {
30549b89391cSTejun Heo 			if (rc != -ENODEV)
3055d4b2bab4STejun Heo 				return rc;
30569b89391cSTejun Heo 			ret = rc;
30579b89391cSTejun Heo 		}
3058d4b2bab4STejun Heo 	}
3059c6fd2807SJeff Garzik 
3060c6fd2807SJeff Garzik 	/* is all this really necessary? */
3061c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3062c6fd2807SJeff Garzik 	if (dev1)
3063c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3064c6fd2807SJeff Garzik 	if (dev0)
3065c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3066d4b2bab4STejun Heo 
30679b89391cSTejun Heo 	return ret;
3068c6fd2807SJeff Garzik }
3069c6fd2807SJeff Garzik 
3070d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3071d4b2bab4STejun Heo 			     unsigned long deadline)
3072c6fd2807SJeff Garzik {
3073c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3074c6fd2807SJeff Garzik 
307544877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3076c6fd2807SJeff Garzik 
3077c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
30780d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3079c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
30800d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3081c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
30820d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3083c6fd2807SJeff Garzik 
3084c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3085c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3086c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3087c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3088c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3089c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3090c6fd2807SJeff Garzik 	 * delay here as well.
3091c6fd2807SJeff Garzik 	 *
3092c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3093c6fd2807SJeff Garzik 	 */
3094c6fd2807SJeff Garzik 	msleep(150);
3095c6fd2807SJeff Garzik 
3096c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3097c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3098c6fd2807SJeff Garzik 	 * pulldown resistor.
3099c6fd2807SJeff Garzik 	 */
3100d1adc1bbSTejun Heo 	if (ata_check_status(ap) == 0xFF)
31019b89391cSTejun Heo 		return -ENODEV;
3102c6fd2807SJeff Garzik 
3103d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3104c6fd2807SJeff Garzik }
3105c6fd2807SJeff Garzik 
3106c6fd2807SJeff Garzik /**
3107c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3108c6fd2807SJeff Garzik  *	@ap: port to reset
3109c6fd2807SJeff Garzik  *
3110c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3111c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3112c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3113c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3114c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3115c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3116c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3117c6fd2807SJeff Garzik  *
3118c6fd2807SJeff Garzik  *	LOCKING:
3119c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3120cca3974eSJeff Garzik  *	Obtains host lock.
3121c6fd2807SJeff Garzik  *
3122c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3123c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3124c6fd2807SJeff Garzik  */
3125c6fd2807SJeff Garzik 
3126c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3127c6fd2807SJeff Garzik {
3128c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3129c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3130c6fd2807SJeff Garzik 	u8 err;
3131c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
31329b89391cSTejun Heo 	int rc;
3133c6fd2807SJeff Garzik 
313444877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3135c6fd2807SJeff Garzik 
3136c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3137c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3138c6fd2807SJeff Garzik 		dev0 = 1;
3139c6fd2807SJeff Garzik 	else {
3140c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3141c6fd2807SJeff Garzik 		if (slave_possible)
3142c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3143c6fd2807SJeff Garzik 	}
3144c6fd2807SJeff Garzik 
3145c6fd2807SJeff Garzik 	if (dev0)
3146c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3147c6fd2807SJeff Garzik 	if (dev1)
3148c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3149c6fd2807SJeff Garzik 
3150c6fd2807SJeff Garzik 	/* select device 0 again */
3151c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3152c6fd2807SJeff Garzik 
3153c6fd2807SJeff Garzik 	/* issue bus reset */
31549b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
31559b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
31569b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3157c6fd2807SJeff Garzik 			goto err_out;
31589b89391cSTejun Heo 	}
3159c6fd2807SJeff Garzik 
3160c6fd2807SJeff Garzik 	/*
3161c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3162c6fd2807SJeff Garzik 	 */
3163c6fd2807SJeff Garzik 	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3164c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
3165c6fd2807SJeff Garzik 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3166c6fd2807SJeff Garzik 
3167c6fd2807SJeff Garzik 	/* re-enable interrupts */
316883625006SAkira Iguchi 	ap->ops->irq_on(ap);
3169c6fd2807SJeff Garzik 
3170c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3171c6fd2807SJeff Garzik 	if (ap->device[1].class != ATA_DEV_NONE)
3172c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3173c6fd2807SJeff Garzik 	if (ap->device[0].class != ATA_DEV_NONE)
3174c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3175c6fd2807SJeff Garzik 
3176c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
3177c6fd2807SJeff Garzik 	if ((ap->device[0].class == ATA_DEV_NONE) &&
3178c6fd2807SJeff Garzik 	    (ap->device[1].class == ATA_DEV_NONE))
3179c6fd2807SJeff Garzik 		goto err_out;
3180c6fd2807SJeff Garzik 
3181c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3182c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
31830d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3184c6fd2807SJeff Garzik 	}
3185c6fd2807SJeff Garzik 
3186c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3187c6fd2807SJeff Garzik 	return;
3188c6fd2807SJeff Garzik 
3189c6fd2807SJeff Garzik err_out:
3190c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3191c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
3192c6fd2807SJeff Garzik 
3193c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3194c6fd2807SJeff Garzik }
3195c6fd2807SJeff Garzik 
3196c6fd2807SJeff Garzik /**
3197c6fd2807SJeff Garzik  *	sata_phy_debounce - debounce SATA phy status
3198c6fd2807SJeff Garzik  *	@ap: ATA port to debounce SATA phy status for
3199c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3200d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3201c6fd2807SJeff Garzik  *
3202c6fd2807SJeff Garzik  *	Make sure SStatus of @ap reaches stable state, determined by
3203c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3204c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3205d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3206d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3207c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3208c6fd2807SJeff Garzik  *
3209d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3210d4b2bab4STejun Heo  *	two is used.
3211d4b2bab4STejun Heo  *
3212c6fd2807SJeff Garzik  *	LOCKING:
3213c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3214c6fd2807SJeff Garzik  *
3215c6fd2807SJeff Garzik  *	RETURNS:
3216c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3217c6fd2807SJeff Garzik  */
3218d4b2bab4STejun Heo int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3219d4b2bab4STejun Heo 		      unsigned long deadline)
3220c6fd2807SJeff Garzik {
3221c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3222d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3223d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3224c6fd2807SJeff Garzik 	u32 last, cur;
3225c6fd2807SJeff Garzik 	int rc;
3226c6fd2807SJeff Garzik 
3227d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3228d4b2bab4STejun Heo 	if (time_before(t, deadline))
3229d4b2bab4STejun Heo 		deadline = t;
3230d4b2bab4STejun Heo 
3231c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3232c6fd2807SJeff Garzik 		return rc;
3233c6fd2807SJeff Garzik 	cur &= 0xf;
3234c6fd2807SJeff Garzik 
3235c6fd2807SJeff Garzik 	last = cur;
3236c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3237c6fd2807SJeff Garzik 
3238c6fd2807SJeff Garzik 	while (1) {
3239c6fd2807SJeff Garzik 		msleep(interval_msec);
3240c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3241c6fd2807SJeff Garzik 			return rc;
3242c6fd2807SJeff Garzik 		cur &= 0xf;
3243c6fd2807SJeff Garzik 
3244c6fd2807SJeff Garzik 		/* DET stable? */
3245c6fd2807SJeff Garzik 		if (cur == last) {
3246d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3247c6fd2807SJeff Garzik 				continue;
3248c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3249c6fd2807SJeff Garzik 				return 0;
3250c6fd2807SJeff Garzik 			continue;
3251c6fd2807SJeff Garzik 		}
3252c6fd2807SJeff Garzik 
3253c6fd2807SJeff Garzik 		/* unstable, start over */
3254c6fd2807SJeff Garzik 		last = cur;
3255c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3256c6fd2807SJeff Garzik 
3257d4b2bab4STejun Heo 		/* check deadline */
3258d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3259c6fd2807SJeff Garzik 			return -EBUSY;
3260c6fd2807SJeff Garzik 	}
3261c6fd2807SJeff Garzik }
3262c6fd2807SJeff Garzik 
3263c6fd2807SJeff Garzik /**
3264c6fd2807SJeff Garzik  *	sata_phy_resume - resume SATA phy
3265c6fd2807SJeff Garzik  *	@ap: ATA port to resume SATA phy for
3266c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3267d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3268c6fd2807SJeff Garzik  *
3269c6fd2807SJeff Garzik  *	Resume SATA phy of @ap and debounce it.
3270c6fd2807SJeff Garzik  *
3271c6fd2807SJeff Garzik  *	LOCKING:
3272c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3273c6fd2807SJeff Garzik  *
3274c6fd2807SJeff Garzik  *	RETURNS:
3275c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3276c6fd2807SJeff Garzik  */
3277d4b2bab4STejun Heo int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3278d4b2bab4STejun Heo 		    unsigned long deadline)
3279c6fd2807SJeff Garzik {
3280c6fd2807SJeff Garzik 	u32 scontrol;
3281c6fd2807SJeff Garzik 	int rc;
3282c6fd2807SJeff Garzik 
3283c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3284c6fd2807SJeff Garzik 		return rc;
3285c6fd2807SJeff Garzik 
3286c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3287c6fd2807SJeff Garzik 
3288c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3289c6fd2807SJeff Garzik 		return rc;
3290c6fd2807SJeff Garzik 
3291c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3292c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3293c6fd2807SJeff Garzik 	 */
3294c6fd2807SJeff Garzik 	msleep(200);
3295c6fd2807SJeff Garzik 
3296d4b2bab4STejun Heo 	return sata_phy_debounce(ap, params, deadline);
3297c6fd2807SJeff Garzik }
3298c6fd2807SJeff Garzik 
3299c6fd2807SJeff Garzik /**
3300c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3301c6fd2807SJeff Garzik  *	@ap: ATA port to be reset
3302d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3303c6fd2807SJeff Garzik  *
3304b8cffc6aSTejun Heo  *	@ap is about to be reset.  Initialize it.  Failure from
3305b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3306b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3307b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3308b8cffc6aSTejun Heo  *	should just whine, not fail.
3309c6fd2807SJeff Garzik  *
3310c6fd2807SJeff Garzik  *	LOCKING:
3311c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3312c6fd2807SJeff Garzik  *
3313c6fd2807SJeff Garzik  *	RETURNS:
3314c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3315c6fd2807SJeff Garzik  */
3316d4b2bab4STejun Heo int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3317c6fd2807SJeff Garzik {
3318c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
3319c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3320c6fd2807SJeff Garzik 	int rc;
3321c6fd2807SJeff Garzik 
332231daabdaSTejun Heo 	/* handle link resume */
3323c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3324c6fd2807SJeff Garzik 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3325c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3326c6fd2807SJeff Garzik 
3327c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3328c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3329c6fd2807SJeff Garzik 		return 0;
3330c6fd2807SJeff Garzik 
3331c6fd2807SJeff Garzik 	/* if SATA, resume phy */
3332a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3333d4b2bab4STejun Heo 		rc = sata_phy_resume(ap, timing, deadline);
3334b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3335b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3336c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3337c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3338c6fd2807SJeff Garzik 	}
3339c6fd2807SJeff Garzik 
3340c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3341c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3342c6fd2807SJeff Garzik 	 */
3343b8cffc6aSTejun Heo 	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3344b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
33456dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3346b8cffc6aSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "device not ready "
3347b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3348b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3349b8cffc6aSTejun Heo 		}
3350b8cffc6aSTejun Heo 	}
3351c6fd2807SJeff Garzik 
3352c6fd2807SJeff Garzik 	return 0;
3353c6fd2807SJeff Garzik }
3354c6fd2807SJeff Garzik 
3355c6fd2807SJeff Garzik /**
3356c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3357c6fd2807SJeff Garzik  *	@ap: port to reset
3358c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3359d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3360c6fd2807SJeff Garzik  *
3361c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3362c6fd2807SJeff Garzik  *
3363c6fd2807SJeff Garzik  *	LOCKING:
3364c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3365c6fd2807SJeff Garzik  *
3366c6fd2807SJeff Garzik  *	RETURNS:
3367c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3368c6fd2807SJeff Garzik  */
3369d4b2bab4STejun Heo int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3370d4b2bab4STejun Heo 		      unsigned long deadline)
3371c6fd2807SJeff Garzik {
3372c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3373d4b2bab4STejun Heo 	unsigned int devmask = 0;
3374d4b2bab4STejun Heo 	int rc;
3375c6fd2807SJeff Garzik 	u8 err;
3376c6fd2807SJeff Garzik 
3377c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3378c6fd2807SJeff Garzik 
3379c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3380c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3381c6fd2807SJeff Garzik 		goto out;
3382c6fd2807SJeff Garzik 	}
3383c6fd2807SJeff Garzik 
3384c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3385c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3386c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3387c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3388c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3389c6fd2807SJeff Garzik 
3390c6fd2807SJeff Garzik 	/* select device 0 again */
3391c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3392c6fd2807SJeff Garzik 
3393c6fd2807SJeff Garzik 	/* issue bus reset */
3394c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3395d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
33969b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
33979b89391cSTejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3398d4b2bab4STejun Heo 		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3399d4b2bab4STejun Heo 		return rc;
3400c6fd2807SJeff Garzik 	}
3401c6fd2807SJeff Garzik 
3402c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
3403c6fd2807SJeff Garzik 	classes[0] = ata_dev_try_classify(ap, 0, &err);
3404c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
3405c6fd2807SJeff Garzik 		classes[1] = ata_dev_try_classify(ap, 1, &err);
3406c6fd2807SJeff Garzik 
3407c6fd2807SJeff Garzik  out:
3408c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3409c6fd2807SJeff Garzik 	return 0;
3410c6fd2807SJeff Garzik }
3411c6fd2807SJeff Garzik 
3412c6fd2807SJeff Garzik /**
3413b6103f6dSTejun Heo  *	sata_port_hardreset - reset port via SATA phy reset
3414c6fd2807SJeff Garzik  *	@ap: port to reset
3415b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3416d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3417c6fd2807SJeff Garzik  *
3418c6fd2807SJeff Garzik  *	SATA phy-reset host port using DET bits of SControl register.
3419c6fd2807SJeff Garzik  *
3420c6fd2807SJeff Garzik  *	LOCKING:
3421c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3422c6fd2807SJeff Garzik  *
3423c6fd2807SJeff Garzik  *	RETURNS:
3424c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3425c6fd2807SJeff Garzik  */
3426d4b2bab4STejun Heo int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3427d4b2bab4STejun Heo 			unsigned long deadline)
3428c6fd2807SJeff Garzik {
3429c6fd2807SJeff Garzik 	u32 scontrol;
3430c6fd2807SJeff Garzik 	int rc;
3431c6fd2807SJeff Garzik 
3432c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3433c6fd2807SJeff Garzik 
3434c6fd2807SJeff Garzik 	if (sata_set_spd_needed(ap)) {
3435c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3436c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3437c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3438c6fd2807SJeff Garzik 		 * and Sil3124.
3439c6fd2807SJeff Garzik 		 */
3440c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3441b6103f6dSTejun Heo 			goto out;
3442c6fd2807SJeff Garzik 
3443cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3444c6fd2807SJeff Garzik 
3445c6fd2807SJeff Garzik 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3446b6103f6dSTejun Heo 			goto out;
3447c6fd2807SJeff Garzik 
3448c6fd2807SJeff Garzik 		sata_set_spd(ap);
3449c6fd2807SJeff Garzik 	}
3450c6fd2807SJeff Garzik 
3451c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3452c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3453b6103f6dSTejun Heo 		goto out;
3454c6fd2807SJeff Garzik 
3455c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3456c6fd2807SJeff Garzik 
3457c6fd2807SJeff Garzik 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3458b6103f6dSTejun Heo 		goto out;
3459c6fd2807SJeff Garzik 
3460c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3461c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3462c6fd2807SJeff Garzik 	 */
3463c6fd2807SJeff Garzik 	msleep(1);
3464c6fd2807SJeff Garzik 
3465c6fd2807SJeff Garzik 	/* bring phy back */
3466d4b2bab4STejun Heo 	rc = sata_phy_resume(ap, timing, deadline);
3467b6103f6dSTejun Heo  out:
3468b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3469b6103f6dSTejun Heo 	return rc;
3470b6103f6dSTejun Heo }
3471b6103f6dSTejun Heo 
3472b6103f6dSTejun Heo /**
3473b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3474b6103f6dSTejun Heo  *	@ap: port to reset
3475b6103f6dSTejun Heo  *	@class: resulting class of attached device
3476d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3477b6103f6dSTejun Heo  *
3478b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3479b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3480b6103f6dSTejun Heo  *
3481b6103f6dSTejun Heo  *	LOCKING:
3482b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3483b6103f6dSTejun Heo  *
3484b6103f6dSTejun Heo  *	RETURNS:
3485b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3486b6103f6dSTejun Heo  */
3487d4b2bab4STejun Heo int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3488d4b2bab4STejun Heo 		       unsigned long deadline)
3489b6103f6dSTejun Heo {
3490b6103f6dSTejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3491b6103f6dSTejun Heo 	int rc;
3492b6103f6dSTejun Heo 
3493b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3494b6103f6dSTejun Heo 
3495b6103f6dSTejun Heo 	/* do hardreset */
3496d4b2bab4STejun Heo 	rc = sata_port_hardreset(ap, timing, deadline);
3497b6103f6dSTejun Heo 	if (rc) {
3498b6103f6dSTejun Heo 		ata_port_printk(ap, KERN_ERR,
3499b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3500b6103f6dSTejun Heo 		return rc;
3501b6103f6dSTejun Heo 	}
3502c6fd2807SJeff Garzik 
3503c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3504c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3505c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3506c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3507c6fd2807SJeff Garzik 		return 0;
3508c6fd2807SJeff Garzik 	}
3509c6fd2807SJeff Garzik 
351034fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
351134fee227STejun Heo 	msleep(150);
351234fee227STejun Heo 
3513d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
35149b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
35159b89391cSTejun Heo 	if (rc) {
3516c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR,
3517d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3518d4b2bab4STejun Heo 		return rc;
3519c6fd2807SJeff Garzik 	}
3520c6fd2807SJeff Garzik 
3521c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3522c6fd2807SJeff Garzik 
3523c6fd2807SJeff Garzik 	*class = ata_dev_try_classify(ap, 0, NULL);
3524c6fd2807SJeff Garzik 
3525c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3526c6fd2807SJeff Garzik 	return 0;
3527c6fd2807SJeff Garzik }
3528c6fd2807SJeff Garzik 
3529c6fd2807SJeff Garzik /**
3530c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3531c6fd2807SJeff Garzik  *	@ap: the target ata_port
3532c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3533c6fd2807SJeff Garzik  *
3534c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3535c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3536c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3537c6fd2807SJeff Garzik  *
3538c6fd2807SJeff Garzik  *	LOCKING:
3539c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3540c6fd2807SJeff Garzik  */
3541c6fd2807SJeff Garzik void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3542c6fd2807SJeff Garzik {
3543c6fd2807SJeff Garzik 	u32 serror;
3544c6fd2807SJeff Garzik 
3545c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3546c6fd2807SJeff Garzik 
3547c6fd2807SJeff Garzik 	/* print link status */
3548c6fd2807SJeff Garzik 	sata_print_link_status(ap);
3549c6fd2807SJeff Garzik 
3550c6fd2807SJeff Garzik 	/* clear SError */
3551c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3552c6fd2807SJeff Garzik 		sata_scr_write(ap, SCR_ERROR, serror);
3553c6fd2807SJeff Garzik 
3554c6fd2807SJeff Garzik 	/* re-enable interrupts */
355583625006SAkira Iguchi 	if (!ap->ops->error_handler)
355683625006SAkira Iguchi 		ap->ops->irq_on(ap);
3557c6fd2807SJeff Garzik 
3558c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3559c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3560c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3561c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3562c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3563c6fd2807SJeff Garzik 
3564c6fd2807SJeff Garzik 	/* bail out if no device is present */
3565c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3566c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3567c6fd2807SJeff Garzik 		return;
3568c6fd2807SJeff Garzik 	}
3569c6fd2807SJeff Garzik 
3570c6fd2807SJeff Garzik 	/* set up device control */
35710d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
35720d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3573c6fd2807SJeff Garzik 
3574c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3575c6fd2807SJeff Garzik }
3576c6fd2807SJeff Garzik 
3577c6fd2807SJeff Garzik /**
3578c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3579c6fd2807SJeff Garzik  *	@dev: device to compare against
3580c6fd2807SJeff Garzik  *	@new_class: class of the new device
3581c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3582c6fd2807SJeff Garzik  *
3583c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3584c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3585c6fd2807SJeff Garzik  *	@new_id.
3586c6fd2807SJeff Garzik  *
3587c6fd2807SJeff Garzik  *	LOCKING:
3588c6fd2807SJeff Garzik  *	None.
3589c6fd2807SJeff Garzik  *
3590c6fd2807SJeff Garzik  *	RETURNS:
3591c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3592c6fd2807SJeff Garzik  */
3593c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3594c6fd2807SJeff Garzik 			       const u16 *new_id)
3595c6fd2807SJeff Garzik {
3596c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3597a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3598a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3599c6fd2807SJeff Garzik 
3600c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3601c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3602c6fd2807SJeff Garzik 			       dev->class, new_class);
3603c6fd2807SJeff Garzik 		return 0;
3604c6fd2807SJeff Garzik 	}
3605c6fd2807SJeff Garzik 
3606a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3607a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3608a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3609a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3610c6fd2807SJeff Garzik 
3611c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3612c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3613c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3614c6fd2807SJeff Garzik 		return 0;
3615c6fd2807SJeff Garzik 	}
3616c6fd2807SJeff Garzik 
3617c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3618c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3619c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3620c6fd2807SJeff Garzik 		return 0;
3621c6fd2807SJeff Garzik 	}
3622c6fd2807SJeff Garzik 
3623c6fd2807SJeff Garzik 	return 1;
3624c6fd2807SJeff Garzik }
3625c6fd2807SJeff Garzik 
3626c6fd2807SJeff Garzik /**
3627fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
36283fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3629bff04647STejun Heo  *	@readid_flags: read ID flags
3630c6fd2807SJeff Garzik  *
3631c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3632c6fd2807SJeff Garzik  *	the port.
3633c6fd2807SJeff Garzik  *
3634c6fd2807SJeff Garzik  *	LOCKING:
3635c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3636c6fd2807SJeff Garzik  *
3637c6fd2807SJeff Garzik  *	RETURNS:
3638c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3639c6fd2807SJeff Garzik  */
3640fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3641c6fd2807SJeff Garzik {
3642c6fd2807SJeff Garzik 	unsigned int class = dev->class;
3643c6fd2807SJeff Garzik 	u16 *id = (void *)dev->ap->sector_buf;
3644c6fd2807SJeff Garzik 	int rc;
3645c6fd2807SJeff Garzik 
3646c6fd2807SJeff Garzik 	/* read ID data */
3647bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3648c6fd2807SJeff Garzik 	if (rc)
3649fe30911bSTejun Heo 		return rc;
3650c6fd2807SJeff Garzik 
3651c6fd2807SJeff Garzik 	/* is the device still there? */
3652fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3653fe30911bSTejun Heo 		return -ENODEV;
3654c6fd2807SJeff Garzik 
3655c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3656fe30911bSTejun Heo 	return 0;
3657fe30911bSTejun Heo }
3658fe30911bSTejun Heo 
3659fe30911bSTejun Heo /**
3660fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3661fe30911bSTejun Heo  *	@dev: device to revalidate
3662fe30911bSTejun Heo  *	@readid_flags: read ID flags
3663fe30911bSTejun Heo  *
3664fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3665fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3666fe30911bSTejun Heo  *
3667fe30911bSTejun Heo  *	LOCKING:
3668fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3669fe30911bSTejun Heo  *
3670fe30911bSTejun Heo  *	RETURNS:
3671fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3672fe30911bSTejun Heo  */
3673fe30911bSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3674fe30911bSTejun Heo {
36756ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3676fe30911bSTejun Heo 	int rc;
3677fe30911bSTejun Heo 
3678fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3679fe30911bSTejun Heo 		return -ENODEV;
3680fe30911bSTejun Heo 
3681fe30911bSTejun Heo 	/* re-read ID */
3682fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3683fe30911bSTejun Heo 	if (rc)
3684fe30911bSTejun Heo 		goto fail;
3685c6fd2807SJeff Garzik 
3686c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3687efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
36886ddcd3b0STejun Heo 	if (rc)
36896ddcd3b0STejun Heo 		goto fail;
36906ddcd3b0STejun Heo 
36916ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
36926ddcd3b0STejun Heo 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
36936ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
36946ddcd3b0STejun Heo 			       "%llu != %llu\n",
36956ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
36966ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
36976ddcd3b0STejun Heo 		rc = -ENODEV;
36986ddcd3b0STejun Heo 		goto fail;
36996ddcd3b0STejun Heo 	}
37006ddcd3b0STejun Heo 
3701c6fd2807SJeff Garzik 	return 0;
3702c6fd2807SJeff Garzik 
3703c6fd2807SJeff Garzik  fail:
3704c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3705c6fd2807SJeff Garzik 	return rc;
3706c6fd2807SJeff Garzik }
3707c6fd2807SJeff Garzik 
37086919a0a6SAlan Cox struct ata_blacklist_entry {
37096919a0a6SAlan Cox 	const char *model_num;
37106919a0a6SAlan Cox 	const char *model_rev;
37116919a0a6SAlan Cox 	unsigned long horkage;
37126919a0a6SAlan Cox };
37136919a0a6SAlan Cox 
37146919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
37156919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
37166919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
37176919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
37186919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
37196919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
37206919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
37216919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
37226919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
37236919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
37246919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
37256919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
37266919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
37276919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
37286919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
37296919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
37306919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
37316919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
37326919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
37336919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
37346919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
37356919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
37366919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
37376919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
37386919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
37396919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
37406919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
37416919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
37426919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
37436919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
37446919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
374539f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
37465acd50f6STejun Heo 	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
374739ce7128STejun Heo 	{ "IOMEGA  ZIP 250       ATAPI       Floppy",
374839ce7128STejun Heo 				NULL,		ATA_HORKAGE_NODMA },
37496919a0a6SAlan Cox 
375018d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
375140a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
375218d6e9d5SAlbert Lee 
37536919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
37546919a0a6SAlan Cox 
37556919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
37566919a0a6SAlan Cox 	/* NCQ is slow */
37576919a0a6SAlan Cox         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
375809125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
375909125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
37607acfaf30SPaul Rolland 	/* NCQ is broken */
37617acfaf30SPaul Rolland 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
3762471e44b2SJeff Garzik 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
376396442925SJens Axboe 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
376496442925SJens Axboe 	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
376536e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
376636e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
376736e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
376836e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
376936e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3770bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
3771bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
37722f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
3773e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
37742f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3775a520f261STejun Heo 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
37766919a0a6SAlan Cox 
37776919a0a6SAlan Cox 	/* Devices with NCQ limits */
37786919a0a6SAlan Cox 
37796919a0a6SAlan Cox 	/* End Marker */
37806919a0a6SAlan Cox 	{ }
3781c6fd2807SJeff Garzik };
3782c6fd2807SJeff Garzik 
378375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3784c6fd2807SJeff Garzik {
37858bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
37868bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
37876919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3788c6fd2807SJeff Garzik 
37898bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
37908bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3791c6fd2807SJeff Garzik 
37926919a0a6SAlan Cox 	while (ad->model_num) {
37938bfa79fcSTejun Heo 		if (!strcmp(ad->model_num, model_num)) {
37946919a0a6SAlan Cox 			if (ad->model_rev == NULL)
37956919a0a6SAlan Cox 				return ad->horkage;
37968bfa79fcSTejun Heo 			if (!strcmp(ad->model_rev, model_rev))
37976919a0a6SAlan Cox 				return ad->horkage;
3798c6fd2807SJeff Garzik 		}
37996919a0a6SAlan Cox 		ad++;
3800c6fd2807SJeff Garzik 	}
3801c6fd2807SJeff Garzik 	return 0;
3802c6fd2807SJeff Garzik }
3803c6fd2807SJeff Garzik 
38046919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
38056919a0a6SAlan Cox {
38066919a0a6SAlan Cox 	/* We don't support polling DMA.
38076919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
38086919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
38096919a0a6SAlan Cox 	 */
38106919a0a6SAlan Cox 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
38116919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
38126919a0a6SAlan Cox 		return 1;
381375683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
38146919a0a6SAlan Cox }
38156919a0a6SAlan Cox 
3816c6fd2807SJeff Garzik /**
3817c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
3818c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
3819c6fd2807SJeff Garzik  *
3820c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
3821c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
3822c6fd2807SJeff Garzik  *	known limits including host controller limits, device
3823c6fd2807SJeff Garzik  *	blacklist, etc...
3824c6fd2807SJeff Garzik  *
3825c6fd2807SJeff Garzik  *	LOCKING:
3826c6fd2807SJeff Garzik  *	None.
3827c6fd2807SJeff Garzik  */
3828c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
3829c6fd2807SJeff Garzik {
3830c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
3831cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
3832c6fd2807SJeff Garzik 	unsigned long xfer_mask;
3833c6fd2807SJeff Garzik 
3834c6fd2807SJeff Garzik 	/* controller modes available */
3835c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3836c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
3837c6fd2807SJeff Garzik 
38388343f889SRobert Hancock 	/* drive modes available */
3839c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3840c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
3841c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
3842c6fd2807SJeff Garzik 
3843b352e57dSAlan Cox 	/*
3844b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3845b352e57dSAlan Cox 	 *	cable
3846b352e57dSAlan Cox 	 */
3847b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
3848b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
3849b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3850b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
3851b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3852b352e57dSAlan Cox 	}
3853b352e57dSAlan Cox 
3854c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
3855c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3856c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
3857c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
3858c6fd2807SJeff Garzik 	}
3859c6fd2807SJeff Garzik 
386014d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
386114d66ab7SPetr Vandrovec             host->simplex_claimed && host->simplex_claimed != ap) {
3862c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3863c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3864c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
3865c6fd2807SJeff Garzik 	}
3866c6fd2807SJeff Garzik 
3867e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
3868e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
3869e424675fSJeff Garzik 
3870c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
3871a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3872c6fd2807SJeff Garzik 
38738343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
38748343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
38758343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
38768343f889SRobert Hancock 	 * solely limited by the cable.
38778343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
38788343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
38798343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
38808343f889SRobert Hancock 	 */
38818343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
38828343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
38838343f889SRobert Hancock 		if((ap->cbl == ATA_CBL_PATA40) ||
38848343f889SRobert Hancock    		    (ata_drive_40wire(dev->id) &&
38858343f889SRobert Hancock 		     (ap->cbl == ATA_CBL_PATA_UNK ||
38868343f889SRobert Hancock                      ap->cbl == ATA_CBL_PATA80))) {
38878343f889SRobert Hancock 		      	ata_dev_printk(dev, KERN_WARNING,
38888343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
38898343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
38908343f889SRobert Hancock 		}
38918343f889SRobert Hancock 
3892c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3893c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
3894c6fd2807SJeff Garzik }
3895c6fd2807SJeff Garzik 
3896c6fd2807SJeff Garzik /**
3897c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3898c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3899c6fd2807SJeff Garzik  *
3900c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
3901c6fd2807SJeff Garzik  *	on port @ap.
3902c6fd2807SJeff Garzik  *
3903c6fd2807SJeff Garzik  *	LOCKING:
3904c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3905c6fd2807SJeff Garzik  *
3906c6fd2807SJeff Garzik  *	RETURNS:
3907c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3908c6fd2807SJeff Garzik  */
3909c6fd2807SJeff Garzik 
3910c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3911c6fd2807SJeff Garzik {
3912c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3913c6fd2807SJeff Garzik 	unsigned int err_mask;
3914c6fd2807SJeff Garzik 
3915c6fd2807SJeff Garzik 	/* set up set-features taskfile */
3916c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
3917c6fd2807SJeff Garzik 
3918464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
3919464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
3920464cf177STejun Heo 	 */
3921c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3922c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
3923c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
3924464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3925c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3926c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
3927c6fd2807SJeff Garzik 
3928c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3929c6fd2807SJeff Garzik 
3930c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3931c6fd2807SJeff Garzik 	return err_mask;
3932c6fd2807SJeff Garzik }
3933c6fd2807SJeff Garzik 
3934c6fd2807SJeff Garzik /**
3935c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
3936c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3937c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
3938c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
3939c6fd2807SJeff Garzik  *
3940c6fd2807SJeff Garzik  *	LOCKING:
3941c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3942c6fd2807SJeff Garzik  *
3943c6fd2807SJeff Garzik  *	RETURNS:
3944c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3945c6fd2807SJeff Garzik  */
3946c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
3947c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
3948c6fd2807SJeff Garzik {
3949c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3950c6fd2807SJeff Garzik 	unsigned int err_mask;
3951c6fd2807SJeff Garzik 
3952c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
3953c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3954c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
3955c6fd2807SJeff Garzik 
3956c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
3957c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
3958c6fd2807SJeff Garzik 
3959c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3960c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3961c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3962c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3963c6fd2807SJeff Garzik 	tf.nsect = sectors;
3964c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3965c6fd2807SJeff Garzik 
3966c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3967c6fd2807SJeff Garzik 
3968c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3969c6fd2807SJeff Garzik 	return err_mask;
3970c6fd2807SJeff Garzik }
3971c6fd2807SJeff Garzik 
3972c6fd2807SJeff Garzik /**
3973c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
3974c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
3975c6fd2807SJeff Garzik  *
3976c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
3977c6fd2807SJeff Garzik  *
3978c6fd2807SJeff Garzik  *	LOCKING:
3979cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3980c6fd2807SJeff Garzik  */
398170e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
3982c6fd2807SJeff Garzik {
3983c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
3984c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
3985c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
3986c6fd2807SJeff Garzik 	void *pad_buf = NULL;
3987c6fd2807SJeff Garzik 
3988c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3989c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
3990c6fd2807SJeff Garzik 
3991c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
3992c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
3993c6fd2807SJeff Garzik 
3994c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3995c6fd2807SJeff Garzik 
3996c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
3997c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
3998c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
3999c6fd2807SJeff Garzik 	 */
4000c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4001c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4002c6fd2807SJeff Garzik 
4003c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4004c6fd2807SJeff Garzik 		if (qc->n_elem)
4005c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4006c6fd2807SJeff Garzik 		/* restore last sg */
4007c6fd2807SJeff Garzik 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4008c6fd2807SJeff Garzik 		if (pad_buf) {
4009c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
4010c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4011c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4012c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4013c6fd2807SJeff Garzik 		}
4014c6fd2807SJeff Garzik 	} else {
4015c6fd2807SJeff Garzik 		if (qc->n_elem)
4016c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4017c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4018c6fd2807SJeff Garzik 				dir);
4019c6fd2807SJeff Garzik 		/* restore sg */
4020c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4021c6fd2807SJeff Garzik 		if (pad_buf)
4022c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4023c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4024c6fd2807SJeff Garzik 	}
4025c6fd2807SJeff Garzik 
4026c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4027c6fd2807SJeff Garzik 	qc->__sg = NULL;
4028c6fd2807SJeff Garzik }
4029c6fd2807SJeff Garzik 
4030c6fd2807SJeff Garzik /**
4031c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4032c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4033c6fd2807SJeff Garzik  *
4034c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4035c6fd2807SJeff Garzik  *	associated with the current disk command.
4036c6fd2807SJeff Garzik  *
4037c6fd2807SJeff Garzik  *	LOCKING:
4038cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4039c6fd2807SJeff Garzik  *
4040c6fd2807SJeff Garzik  */
4041c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4042c6fd2807SJeff Garzik {
4043c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4044c6fd2807SJeff Garzik 	struct scatterlist *sg;
4045c6fd2807SJeff Garzik 	unsigned int idx;
4046c6fd2807SJeff Garzik 
4047c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4048c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4049c6fd2807SJeff Garzik 
4050c6fd2807SJeff Garzik 	idx = 0;
4051c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4052c6fd2807SJeff Garzik 		u32 addr, offset;
4053c6fd2807SJeff Garzik 		u32 sg_len, len;
4054c6fd2807SJeff Garzik 
4055c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4056c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4057c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4058c6fd2807SJeff Garzik 		 */
4059c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4060c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4061c6fd2807SJeff Garzik 
4062c6fd2807SJeff Garzik 		while (sg_len) {
4063c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4064c6fd2807SJeff Garzik 			len = sg_len;
4065c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4066c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4067c6fd2807SJeff Garzik 
4068c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4069c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4070c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4071c6fd2807SJeff Garzik 
4072c6fd2807SJeff Garzik 			idx++;
4073c6fd2807SJeff Garzik 			sg_len -= len;
4074c6fd2807SJeff Garzik 			addr += len;
4075c6fd2807SJeff Garzik 		}
4076c6fd2807SJeff Garzik 	}
4077c6fd2807SJeff Garzik 
4078c6fd2807SJeff Garzik 	if (idx)
4079c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4080c6fd2807SJeff Garzik }
4081b9a4197eSTejun Heo 
4082c6fd2807SJeff Garzik /**
4083d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4084d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4085d26fc955SAlan Cox  *
4086d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4087d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4088d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4089d26fc955SAlan Cox  *	controllers that don't follow the spec.
4090d26fc955SAlan Cox  *
4091d26fc955SAlan Cox  *	LOCKING:
4092d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4093d26fc955SAlan Cox  *
4094d26fc955SAlan Cox  */
4095d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4096d26fc955SAlan Cox {
4097d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4098d26fc955SAlan Cox 	struct scatterlist *sg;
4099d26fc955SAlan Cox 	unsigned int idx;
4100d26fc955SAlan Cox 
4101d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4102d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4103d26fc955SAlan Cox 
4104d26fc955SAlan Cox 	idx = 0;
4105d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4106d26fc955SAlan Cox 		u32 addr, offset;
4107d26fc955SAlan Cox 		u32 sg_len, len, blen;
4108d26fc955SAlan Cox 
4109d26fc955SAlan Cox  		/* determine if physical DMA addr spans 64K boundary.
4110d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4111d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4112d26fc955SAlan Cox 		 */
4113d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4114d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4115d26fc955SAlan Cox 
4116d26fc955SAlan Cox 		while (sg_len) {
4117d26fc955SAlan Cox 			offset = addr & 0xffff;
4118d26fc955SAlan Cox 			len = sg_len;
4119d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4120d26fc955SAlan Cox 				len = 0x10000 - offset;
4121d26fc955SAlan Cox 
4122d26fc955SAlan Cox 			blen = len & 0xffff;
4123d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4124d26fc955SAlan Cox 			if (blen == 0) {
4125d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4126d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4127d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4128d26fc955SAlan Cox 				blen = 0x8000;
4129d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4130d26fc955SAlan Cox 			}
4131d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4132d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4133d26fc955SAlan Cox 
4134d26fc955SAlan Cox 			idx++;
4135d26fc955SAlan Cox 			sg_len -= len;
4136d26fc955SAlan Cox 			addr += len;
4137d26fc955SAlan Cox 		}
4138d26fc955SAlan Cox 	}
4139d26fc955SAlan Cox 
4140d26fc955SAlan Cox 	if (idx)
4141d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4142d26fc955SAlan Cox }
4143d26fc955SAlan Cox 
4144d26fc955SAlan Cox /**
4145c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4146c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4147c6fd2807SJeff Garzik  *
4148c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4149c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4150c6fd2807SJeff Garzik  *	supplied PACKET command.
4151c6fd2807SJeff Garzik  *
4152c6fd2807SJeff Garzik  *	LOCKING:
4153cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4154c6fd2807SJeff Garzik  *
4155c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4156c6fd2807SJeff Garzik  *               nonzero otherwise
4157c6fd2807SJeff Garzik  */
4158c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4159c6fd2807SJeff Garzik {
4160c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4161c6fd2807SJeff Garzik 
4162b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4163b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4164b9a4197eSTejun Heo 	 */
4165b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
41666f23a31dSAlbert Lee 		return 1;
41676f23a31dSAlbert Lee 
4168c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4169b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4170c6fd2807SJeff Garzik 
4171b9a4197eSTejun Heo 	return 0;
4172c6fd2807SJeff Garzik }
4173b9a4197eSTejun Heo 
4174c6fd2807SJeff Garzik /**
4175c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4176c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4177c6fd2807SJeff Garzik  *
4178c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4179c6fd2807SJeff Garzik  *
4180c6fd2807SJeff Garzik  *	LOCKING:
4181cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4182c6fd2807SJeff Garzik  */
4183c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4184c6fd2807SJeff Garzik {
4185c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4186c6fd2807SJeff Garzik 		return;
4187c6fd2807SJeff Garzik 
4188c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4189c6fd2807SJeff Garzik }
4190c6fd2807SJeff Garzik 
4191d26fc955SAlan Cox /**
4192d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4193d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4194d26fc955SAlan Cox  *
4195d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4196d26fc955SAlan Cox  *
4197d26fc955SAlan Cox  *	LOCKING:
4198d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4199d26fc955SAlan Cox  */
4200d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4201d26fc955SAlan Cox {
4202d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4203d26fc955SAlan Cox 		return;
4204d26fc955SAlan Cox 
4205d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4206d26fc955SAlan Cox }
4207d26fc955SAlan Cox 
4208c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4209c6fd2807SJeff Garzik 
4210c6fd2807SJeff Garzik /**
4211c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4212c6fd2807SJeff Garzik  *	@qc: Command to be associated
4213c6fd2807SJeff Garzik  *	@buf: Memory buffer
4214c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4215c6fd2807SJeff Garzik  *
4216c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4217c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4218c6fd2807SJeff Garzik  *
4219c6fd2807SJeff Garzik  *	LOCKING:
4220cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4221c6fd2807SJeff Garzik  */
4222c6fd2807SJeff Garzik 
4223c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4224c6fd2807SJeff Garzik {
4225c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4226c6fd2807SJeff Garzik 
4227c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4228c6fd2807SJeff Garzik 	qc->n_elem = 1;
4229c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4230c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4231c6fd2807SJeff Garzik 	qc->nbytes = buflen;
4232c6fd2807SJeff Garzik 
423361c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4234c6fd2807SJeff Garzik }
4235c6fd2807SJeff Garzik 
4236c6fd2807SJeff Garzik /**
4237c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4238c6fd2807SJeff Garzik  *	@qc: Command to be associated
4239c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4240c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4241c6fd2807SJeff Garzik  *
4242c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4243c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4244c6fd2807SJeff Garzik  *	elements.
4245c6fd2807SJeff Garzik  *
4246c6fd2807SJeff Garzik  *	LOCKING:
4247cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4248c6fd2807SJeff Garzik  */
4249c6fd2807SJeff Garzik 
4250c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4251c6fd2807SJeff Garzik 		 unsigned int n_elem)
4252c6fd2807SJeff Garzik {
4253c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4254c6fd2807SJeff Garzik 	qc->__sg = sg;
4255c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4256c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
4257c6fd2807SJeff Garzik }
4258c6fd2807SJeff Garzik 
4259c6fd2807SJeff Garzik /**
4260c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4261c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4262c6fd2807SJeff Garzik  *
4263c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4264c6fd2807SJeff Garzik  *
4265c6fd2807SJeff Garzik  *	LOCKING:
4266cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4267c6fd2807SJeff Garzik  *
4268c6fd2807SJeff Garzik  *	RETURNS:
4269c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4270c6fd2807SJeff Garzik  */
4271c6fd2807SJeff Garzik 
4272c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4273c6fd2807SJeff Garzik {
4274c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4275c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4276c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4277c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4278c6fd2807SJeff Garzik 	int trim_sg = 0;
4279c6fd2807SJeff Garzik 
4280c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4281c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4282c6fd2807SJeff Garzik 	if (qc->pad_len) {
4283c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4284c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4285c6fd2807SJeff Garzik 
4286c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4287c6fd2807SJeff Garzik 
4288c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4289c6fd2807SJeff Garzik 
4290c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4291c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4292c6fd2807SJeff Garzik 			       qc->pad_len);
4293c6fd2807SJeff Garzik 
4294c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4295c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4296c6fd2807SJeff Garzik 		/* trim sg */
4297c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4298c6fd2807SJeff Garzik 		if (sg->length == 0)
4299c6fd2807SJeff Garzik 			trim_sg = 1;
4300c6fd2807SJeff Garzik 
4301c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4302c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4303c6fd2807SJeff Garzik 	}
4304c6fd2807SJeff Garzik 
4305c6fd2807SJeff Garzik 	if (trim_sg) {
4306c6fd2807SJeff Garzik 		qc->n_elem--;
4307c6fd2807SJeff Garzik 		goto skip_map;
4308c6fd2807SJeff Garzik 	}
4309c6fd2807SJeff Garzik 
4310c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4311c6fd2807SJeff Garzik 				     sg->length, dir);
4312c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4313c6fd2807SJeff Garzik 		/* restore sg */
4314c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4315c6fd2807SJeff Garzik 		return -1;
4316c6fd2807SJeff Garzik 	}
4317c6fd2807SJeff Garzik 
4318c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4319c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4320c6fd2807SJeff Garzik 
4321c6fd2807SJeff Garzik skip_map:
4322c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4323c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4324c6fd2807SJeff Garzik 
4325c6fd2807SJeff Garzik 	return 0;
4326c6fd2807SJeff Garzik }
4327c6fd2807SJeff Garzik 
4328c6fd2807SJeff Garzik /**
4329c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4330c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4331c6fd2807SJeff Garzik  *
4332c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4333c6fd2807SJeff Garzik  *
4334c6fd2807SJeff Garzik  *	LOCKING:
4335cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4336c6fd2807SJeff Garzik  *
4337c6fd2807SJeff Garzik  *	RETURNS:
4338c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4339c6fd2807SJeff Garzik  *
4340c6fd2807SJeff Garzik  */
4341c6fd2807SJeff Garzik 
4342c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4343c6fd2807SJeff Garzik {
4344c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4345c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4346c6fd2807SJeff Garzik 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4347c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4348c6fd2807SJeff Garzik 
434944877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4350c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4351c6fd2807SJeff Garzik 
4352c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4353c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4354c6fd2807SJeff Garzik 	if (qc->pad_len) {
4355c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4356c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4357c6fd2807SJeff Garzik 		unsigned int offset;
4358c6fd2807SJeff Garzik 
4359c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4360c6fd2807SJeff Garzik 
4361c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4362c6fd2807SJeff Garzik 
4363c6fd2807SJeff Garzik 		/*
4364c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4365c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4366c6fd2807SJeff Garzik 		 */
4367c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4368c6fd2807SJeff Garzik 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4369c6fd2807SJeff Garzik 		psg->offset = offset_in_page(offset);
4370c6fd2807SJeff Garzik 
4371c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4372c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4373c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4374c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4375c6fd2807SJeff Garzik 		}
4376c6fd2807SJeff Garzik 
4377c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4378c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4379c6fd2807SJeff Garzik 		/* trim last sg */
4380c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4381c6fd2807SJeff Garzik 		if (lsg->length == 0)
4382c6fd2807SJeff Garzik 			trim_sg = 1;
4383c6fd2807SJeff Garzik 
4384c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4385c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4386c6fd2807SJeff Garzik 	}
4387c6fd2807SJeff Garzik 
4388c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4389c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4390c6fd2807SJeff Garzik 		pre_n_elem--;
4391c6fd2807SJeff Garzik 
4392c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4393c6fd2807SJeff Garzik 		n_elem = 0;
4394c6fd2807SJeff Garzik 		goto skip_map;
4395c6fd2807SJeff Garzik 	}
4396c6fd2807SJeff Garzik 
4397c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4398c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4399c6fd2807SJeff Garzik 	if (n_elem < 1) {
4400c6fd2807SJeff Garzik 		/* restore last sg */
4401c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4402c6fd2807SJeff Garzik 		return -1;
4403c6fd2807SJeff Garzik 	}
4404c6fd2807SJeff Garzik 
4405c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4406c6fd2807SJeff Garzik 
4407c6fd2807SJeff Garzik skip_map:
4408c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4409c6fd2807SJeff Garzik 
4410c6fd2807SJeff Garzik 	return 0;
4411c6fd2807SJeff Garzik }
4412c6fd2807SJeff Garzik 
4413c6fd2807SJeff Garzik /**
4414c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4415c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4416c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4417c6fd2807SJeff Garzik  *
4418c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4419c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4420c6fd2807SJeff Garzik  *	vice-versa.
4421c6fd2807SJeff Garzik  *
4422c6fd2807SJeff Garzik  *	LOCKING:
4423c6fd2807SJeff Garzik  *	Inherited from caller.
4424c6fd2807SJeff Garzik  */
4425c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4426c6fd2807SJeff Garzik {
4427c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4428c6fd2807SJeff Garzik 	unsigned int i;
4429c6fd2807SJeff Garzik 
4430c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4431c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4432c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4433c6fd2807SJeff Garzik }
4434c6fd2807SJeff Garzik 
4435c6fd2807SJeff Garzik /**
44360d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4437c6fd2807SJeff Garzik  *	@adev: device to target
4438c6fd2807SJeff Garzik  *	@buf: data buffer
4439c6fd2807SJeff Garzik  *	@buflen: buffer length
4440c6fd2807SJeff Garzik  *	@write_data: read/write
4441c6fd2807SJeff Garzik  *
4442c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4443c6fd2807SJeff Garzik  *
4444c6fd2807SJeff Garzik  *	LOCKING:
4445c6fd2807SJeff Garzik  *	Inherited from caller.
4446c6fd2807SJeff Garzik  */
44470d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4448c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4449c6fd2807SJeff Garzik {
4450c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
4451c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4452c6fd2807SJeff Garzik 
4453c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4454c6fd2807SJeff Garzik 	if (write_data)
44550d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4456c6fd2807SJeff Garzik 	else
44570d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4458c6fd2807SJeff Garzik 
4459c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4460c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4461c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4462c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4463c6fd2807SJeff Garzik 
4464c6fd2807SJeff Garzik 		if (write_data) {
4465c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
44660d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4467c6fd2807SJeff Garzik 		} else {
44680d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4469c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4470c6fd2807SJeff Garzik 		}
4471c6fd2807SJeff Garzik 	}
4472c6fd2807SJeff Garzik }
4473c6fd2807SJeff Garzik 
4474c6fd2807SJeff Garzik /**
44750d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4476c6fd2807SJeff Garzik  *	@adev: device to target
4477c6fd2807SJeff Garzik  *	@buf: data buffer
4478c6fd2807SJeff Garzik  *	@buflen: buffer length
4479c6fd2807SJeff Garzik  *	@write_data: read/write
4480c6fd2807SJeff Garzik  *
4481c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4482c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4483c6fd2807SJeff Garzik  *
4484c6fd2807SJeff Garzik  *	LOCKING:
4485c6fd2807SJeff Garzik  *	Inherited from caller.
4486c6fd2807SJeff Garzik  */
44870d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4488c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4489c6fd2807SJeff Garzik {
4490c6fd2807SJeff Garzik 	unsigned long flags;
4491c6fd2807SJeff Garzik 	local_irq_save(flags);
44920d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4493c6fd2807SJeff Garzik 	local_irq_restore(flags);
4494c6fd2807SJeff Garzik }
4495c6fd2807SJeff Garzik 
4496c6fd2807SJeff Garzik 
4497c6fd2807SJeff Garzik /**
44985a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4499c6fd2807SJeff Garzik  *	@qc: Command on going
4500c6fd2807SJeff Garzik  *
45015a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4502c6fd2807SJeff Garzik  *
4503c6fd2807SJeff Garzik  *	LOCKING:
4504c6fd2807SJeff Garzik  *	Inherited from caller.
4505c6fd2807SJeff Garzik  */
4506c6fd2807SJeff Garzik 
4507c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4508c6fd2807SJeff Garzik {
4509c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4510c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4511c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4512c6fd2807SJeff Garzik 	struct page *page;
4513c6fd2807SJeff Garzik 	unsigned int offset;
4514c6fd2807SJeff Garzik 	unsigned char *buf;
4515c6fd2807SJeff Garzik 
45165a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4517c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4518c6fd2807SJeff Garzik 
4519c6fd2807SJeff Garzik 	page = sg[qc->cursg].page;
4520726f0785STejun Heo 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4521c6fd2807SJeff Garzik 
4522c6fd2807SJeff Garzik 	/* get the current page and offset */
4523c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4524c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4525c6fd2807SJeff Garzik 
4526c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4527c6fd2807SJeff Garzik 
4528c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4529c6fd2807SJeff Garzik 		unsigned long flags;
4530c6fd2807SJeff Garzik 
4531c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4532c6fd2807SJeff Garzik 		local_irq_save(flags);
4533c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4534c6fd2807SJeff Garzik 
4535c6fd2807SJeff Garzik 		/* do the actual data transfer */
45365a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4537c6fd2807SJeff Garzik 
4538c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4539c6fd2807SJeff Garzik 		local_irq_restore(flags);
4540c6fd2807SJeff Garzik 	} else {
4541c6fd2807SJeff Garzik 		buf = page_address(page);
45425a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4543c6fd2807SJeff Garzik 	}
4544c6fd2807SJeff Garzik 
45455a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
45465a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4547c6fd2807SJeff Garzik 
4548726f0785STejun Heo 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4549c6fd2807SJeff Garzik 		qc->cursg++;
4550c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4551c6fd2807SJeff Garzik 	}
4552c6fd2807SJeff Garzik }
4553c6fd2807SJeff Garzik 
4554c6fd2807SJeff Garzik /**
45555a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4556c6fd2807SJeff Garzik  *	@qc: Command on going
4557c6fd2807SJeff Garzik  *
45585a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4559c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4560c6fd2807SJeff Garzik  *
4561c6fd2807SJeff Garzik  *	LOCKING:
4562c6fd2807SJeff Garzik  *	Inherited from caller.
4563c6fd2807SJeff Garzik  */
4564c6fd2807SJeff Garzik 
4565c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4566c6fd2807SJeff Garzik {
4567c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4568c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4569c6fd2807SJeff Garzik 		unsigned int nsect;
4570c6fd2807SJeff Garzik 
4571c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4572c6fd2807SJeff Garzik 
45735a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4574726f0785STejun Heo 			    qc->dev->multi_count);
4575c6fd2807SJeff Garzik 		while (nsect--)
4576c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4577c6fd2807SJeff Garzik 	} else
4578c6fd2807SJeff Garzik 		ata_pio_sector(qc);
4579c6fd2807SJeff Garzik }
4580c6fd2807SJeff Garzik 
4581c6fd2807SJeff Garzik /**
4582c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4583c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4584c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4585c6fd2807SJeff Garzik  *
4586c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4587c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4588c6fd2807SJeff Garzik  *
4589c6fd2807SJeff Garzik  *	LOCKING:
4590c6fd2807SJeff Garzik  *	caller.
4591c6fd2807SJeff Garzik  */
4592c6fd2807SJeff Garzik 
4593c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4594c6fd2807SJeff Garzik {
4595c6fd2807SJeff Garzik 	/* send SCSI cdb */
4596c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4597c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4598c6fd2807SJeff Garzik 
4599c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4600c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4601c6fd2807SJeff Garzik 
4602c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4603c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4604c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4605c6fd2807SJeff Garzik 		break;
4606c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4607c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4608c6fd2807SJeff Garzik 		break;
4609c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4610c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4611c6fd2807SJeff Garzik 		/* initiate bmdma */
4612c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4613c6fd2807SJeff Garzik 		break;
4614c6fd2807SJeff Garzik 	}
4615c6fd2807SJeff Garzik }
4616c6fd2807SJeff Garzik 
4617c6fd2807SJeff Garzik /**
4618c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4619c6fd2807SJeff Garzik  *	@qc: Command on going
4620c6fd2807SJeff Garzik  *	@bytes: number of bytes
4621c6fd2807SJeff Garzik  *
4622c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4623c6fd2807SJeff Garzik  *
4624c6fd2807SJeff Garzik  *	LOCKING:
4625c6fd2807SJeff Garzik  *	Inherited from caller.
4626c6fd2807SJeff Garzik  *
4627c6fd2807SJeff Garzik  */
4628c6fd2807SJeff Garzik 
4629c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4630c6fd2807SJeff Garzik {
4631c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4632c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4633c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4634c6fd2807SJeff Garzik 	struct page *page;
4635c6fd2807SJeff Garzik 	unsigned char *buf;
4636c6fd2807SJeff Garzik 	unsigned int offset, count;
4637c6fd2807SJeff Garzik 
4638c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4639c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4640c6fd2807SJeff Garzik 
4641c6fd2807SJeff Garzik next_sg:
4642c6fd2807SJeff Garzik 	if (unlikely(qc->cursg >= qc->n_elem)) {
4643c6fd2807SJeff Garzik 		/*
4644c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4645c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4646c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4647c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4648c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4649c6fd2807SJeff Garzik 		 */
4650c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4651c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4652c6fd2807SJeff Garzik 		unsigned int i;
4653c6fd2807SJeff Garzik 
4654c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4655c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4656c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4657c6fd2807SJeff Garzik 
4658c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4659c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4660c6fd2807SJeff Garzik 
4661c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4662c6fd2807SJeff Garzik 		return;
4663c6fd2807SJeff Garzik 	}
4664c6fd2807SJeff Garzik 
4665c6fd2807SJeff Garzik 	sg = &qc->__sg[qc->cursg];
4666c6fd2807SJeff Garzik 
4667c6fd2807SJeff Garzik 	page = sg->page;
4668c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4669c6fd2807SJeff Garzik 
4670c6fd2807SJeff Garzik 	/* get the current page and offset */
4671c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4672c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4673c6fd2807SJeff Garzik 
4674c6fd2807SJeff Garzik 	/* don't overrun current sg */
4675c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
4676c6fd2807SJeff Garzik 
4677c6fd2807SJeff Garzik 	/* don't cross page boundaries */
4678c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4679c6fd2807SJeff Garzik 
4680c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4681c6fd2807SJeff Garzik 
4682c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4683c6fd2807SJeff Garzik 		unsigned long flags;
4684c6fd2807SJeff Garzik 
4685c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
4686c6fd2807SJeff Garzik 		local_irq_save(flags);
4687c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4688c6fd2807SJeff Garzik 
4689c6fd2807SJeff Garzik 		/* do the actual data transfer */
4690c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4691c6fd2807SJeff Garzik 
4692c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4693c6fd2807SJeff Garzik 		local_irq_restore(flags);
4694c6fd2807SJeff Garzik 	} else {
4695c6fd2807SJeff Garzik 		buf = page_address(page);
4696c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4697c6fd2807SJeff Garzik 	}
4698c6fd2807SJeff Garzik 
4699c6fd2807SJeff Garzik 	bytes -= count;
4700c6fd2807SJeff Garzik 	qc->curbytes += count;
4701c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
4702c6fd2807SJeff Garzik 
4703c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
4704c6fd2807SJeff Garzik 		qc->cursg++;
4705c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4706c6fd2807SJeff Garzik 	}
4707c6fd2807SJeff Garzik 
4708c6fd2807SJeff Garzik 	if (bytes)
4709c6fd2807SJeff Garzik 		goto next_sg;
4710c6fd2807SJeff Garzik }
4711c6fd2807SJeff Garzik 
4712c6fd2807SJeff Garzik /**
4713c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4714c6fd2807SJeff Garzik  *	@qc: Command on going
4715c6fd2807SJeff Garzik  *
4716c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4717c6fd2807SJeff Garzik  *
4718c6fd2807SJeff Garzik  *	LOCKING:
4719c6fd2807SJeff Garzik  *	Inherited from caller.
4720c6fd2807SJeff Garzik  */
4721c6fd2807SJeff Garzik 
4722c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4723c6fd2807SJeff Garzik {
4724c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4725c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
4726c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
4727c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4728c6fd2807SJeff Garzik 
4729c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
4730c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
4731c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
4732c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4733c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
4734c6fd2807SJeff Garzik 	 */
4735c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
4736c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
4737c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
4738c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
4739c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
4740c6fd2807SJeff Garzik 
4741c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
4742c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
4743c6fd2807SJeff Garzik 		goto err_out;
4744c6fd2807SJeff Garzik 
4745c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
4746c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4747c6fd2807SJeff Garzik 	if (do_write != i_write)
4748c6fd2807SJeff Garzik 		goto err_out;
4749c6fd2807SJeff Garzik 
475044877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4751c6fd2807SJeff Garzik 
4752c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
4753c6fd2807SJeff Garzik 
4754c6fd2807SJeff Garzik 	return;
4755c6fd2807SJeff Garzik 
4756c6fd2807SJeff Garzik err_out:
4757c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4758c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
4759c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
4760c6fd2807SJeff Garzik }
4761c6fd2807SJeff Garzik 
4762c6fd2807SJeff Garzik /**
4763c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4764c6fd2807SJeff Garzik  *	@ap: the target ata_port
4765c6fd2807SJeff Garzik  *	@qc: qc on going
4766c6fd2807SJeff Garzik  *
4767c6fd2807SJeff Garzik  *	RETURNS:
4768c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
4769c6fd2807SJeff Garzik  */
4770c6fd2807SJeff Garzik 
4771c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4772c6fd2807SJeff Garzik {
4773c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4774c6fd2807SJeff Garzik 		return 1;
4775c6fd2807SJeff Garzik 
4776c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4777c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
4778c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4779c6fd2807SJeff Garzik 		    return 1;
4780c6fd2807SJeff Garzik 
4781c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
4782c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4783c6fd2807SJeff Garzik 			return 1;
4784c6fd2807SJeff Garzik 	}
4785c6fd2807SJeff Garzik 
4786c6fd2807SJeff Garzik 	return 0;
4787c6fd2807SJeff Garzik }
4788c6fd2807SJeff Garzik 
4789c6fd2807SJeff Garzik /**
4790c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
4791c6fd2807SJeff Garzik  *	@qc: Command to complete
4792c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4793c6fd2807SJeff Garzik  *
4794c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
4795c6fd2807SJeff Garzik  *
4796c6fd2807SJeff Garzik  *	LOCKING:
4797cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
4798c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
4799c6fd2807SJeff Garzik  */
4800c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4801c6fd2807SJeff Garzik {
4802c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4803c6fd2807SJeff Garzik 	unsigned long flags;
4804c6fd2807SJeff Garzik 
4805c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
4806c6fd2807SJeff Garzik 		if (in_wq) {
4807c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4808c6fd2807SJeff Garzik 
4809cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
4810cca3974eSJeff Garzik 			 * released.
4811c6fd2807SJeff Garzik 			 */
4812c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
4813c6fd2807SJeff Garzik 			if (qc) {
4814c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
481583625006SAkira Iguchi 					ap->ops->irq_on(ap);
4816c6fd2807SJeff Garzik 					ata_qc_complete(qc);
4817c6fd2807SJeff Garzik 				} else
4818c6fd2807SJeff Garzik 					ata_port_freeze(ap);
4819c6fd2807SJeff Garzik 			}
4820c6fd2807SJeff Garzik 
4821c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4822c6fd2807SJeff Garzik 		} else {
4823c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4824c6fd2807SJeff Garzik 				ata_qc_complete(qc);
4825c6fd2807SJeff Garzik 			else
4826c6fd2807SJeff Garzik 				ata_port_freeze(ap);
4827c6fd2807SJeff Garzik 		}
4828c6fd2807SJeff Garzik 	} else {
4829c6fd2807SJeff Garzik 		if (in_wq) {
4830c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
483183625006SAkira Iguchi 			ap->ops->irq_on(ap);
4832c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4833c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4834c6fd2807SJeff Garzik 		} else
4835c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4836c6fd2807SJeff Garzik 	}
4837c6fd2807SJeff Garzik }
4838c6fd2807SJeff Garzik 
4839c6fd2807SJeff Garzik /**
4840c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
4841c6fd2807SJeff Garzik  *	@ap: the target ata_port
4842c6fd2807SJeff Garzik  *	@qc: qc on going
4843c6fd2807SJeff Garzik  *	@status: current device status
4844c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4845c6fd2807SJeff Garzik  *
4846c6fd2807SJeff Garzik  *	RETURNS:
4847c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
4848c6fd2807SJeff Garzik  */
4849c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4850c6fd2807SJeff Garzik 		 u8 status, int in_wq)
4851c6fd2807SJeff Garzik {
4852c6fd2807SJeff Garzik 	unsigned long flags = 0;
4853c6fd2807SJeff Garzik 	int poll_next;
4854c6fd2807SJeff Garzik 
4855c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4856c6fd2807SJeff Garzik 
4857c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
4858c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
4859c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4860c6fd2807SJeff Garzik 	 */
4861c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4862c6fd2807SJeff Garzik 
4863c6fd2807SJeff Garzik fsm_start:
4864c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
486544877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4866c6fd2807SJeff Garzik 
4867c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
4868c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
4869c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
4870c6fd2807SJeff Garzik 
4871c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
4872c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
4873c6fd2807SJeff Garzik 		 * takes over after sending the data.
4874c6fd2807SJeff Garzik 		 */
4875c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4876c6fd2807SJeff Garzik 
4877c6fd2807SJeff Garzik 		/* check device status */
4878c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
4879c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
4880c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
4881c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
4882c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4883c6fd2807SJeff Garzik 			else
4884c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
4885c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4886c6fd2807SJeff Garzik 
4887c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4888c6fd2807SJeff Garzik 			goto fsm_start;
4889c6fd2807SJeff Garzik 		}
4890c6fd2807SJeff Garzik 
4891c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
4892c6fd2807SJeff Garzik 		 * when it finds something wrong.
4893c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
4894c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
4895c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
4896c6fd2807SJeff Garzik 		 */
4897c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
489844877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
489944877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
4900c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
4901c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4902c6fd2807SJeff Garzik 			goto fsm_start;
4903c6fd2807SJeff Garzik 		}
4904c6fd2807SJeff Garzik 
4905c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
4906c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
4907c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
4908c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
4909c6fd2807SJeff Garzik 		 */
4910c6fd2807SJeff Garzik 		if (in_wq)
4911c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4912c6fd2807SJeff Garzik 
4913c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
4914c6fd2807SJeff Garzik 			/* PIO data out protocol.
4915c6fd2807SJeff Garzik 			 * send first data block.
4916c6fd2807SJeff Garzik 			 */
4917c6fd2807SJeff Garzik 
4918c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
4919c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
4920c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
4921c6fd2807SJeff Garzik 			 */
4922c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
4923c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4924c6fd2807SJeff Garzik 			ata_altstatus(ap); /* flush */
4925c6fd2807SJeff Garzik 		} else
4926c6fd2807SJeff Garzik 			/* send CDB */
4927c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
4928c6fd2807SJeff Garzik 
4929c6fd2807SJeff Garzik 		if (in_wq)
4930c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4931c6fd2807SJeff Garzik 
4932c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
4933c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
4934c6fd2807SJeff Garzik 		 */
4935c6fd2807SJeff Garzik 		break;
4936c6fd2807SJeff Garzik 
4937c6fd2807SJeff Garzik 	case HSM_ST:
4938c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
4939c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4940c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
4941c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
4942c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
4943c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
4944c6fd2807SJeff Garzik 				 */
4945c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
4946c6fd2807SJeff Garzik 				goto fsm_start;
4947c6fd2807SJeff Garzik 			}
4948c6fd2807SJeff Garzik 
4949c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
4950c6fd2807SJeff Garzik 			 * when it finds something wrong.
4951c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
4952c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
4953c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
4954c6fd2807SJeff Garzik 			 */
4955c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
495644877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
495744877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
495844877b4eSTejun Heo 						status);
4959c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4960c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4961c6fd2807SJeff Garzik 				goto fsm_start;
4962c6fd2807SJeff Garzik 			}
4963c6fd2807SJeff Garzik 
4964c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
4965c6fd2807SJeff Garzik 
4966c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4967c6fd2807SJeff Garzik 				/* bad ireason reported by device */
4968c6fd2807SJeff Garzik 				goto fsm_start;
4969c6fd2807SJeff Garzik 
4970c6fd2807SJeff Garzik 		} else {
4971c6fd2807SJeff Garzik 			/* ATA PIO protocol */
4972c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
4973c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
4974c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
4975c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
4976c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
4977c6fd2807SJeff Garzik 				else
497855a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
497955a8e2c8STejun Heo 					 * Phantom devices also trigger this
498055a8e2c8STejun Heo 					 * condition.  Mark hint.
498155a8e2c8STejun Heo 					 */
498255a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
498355a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
4984c6fd2807SJeff Garzik 
4985c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4986c6fd2807SJeff Garzik 				goto fsm_start;
4987c6fd2807SJeff Garzik 			}
4988c6fd2807SJeff Garzik 
4989c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
4990c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
4991c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
4992c6fd2807SJeff Garzik 			 * block of junk data before changing the
4993c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
4994c6fd2807SJeff Garzik 			 *
4995c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
4996c6fd2807SJeff Garzik 			 * sense since the data block has been
4997c6fd2807SJeff Garzik 			 * transferred to the device.
4998c6fd2807SJeff Garzik 			 */
4999c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5000c6fd2807SJeff Garzik 				/* data might be corrputed */
5001c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5002c6fd2807SJeff Garzik 
5003c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5004c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5005c6fd2807SJeff Garzik 					ata_altstatus(ap);
5006c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5007c6fd2807SJeff Garzik 				}
5008c6fd2807SJeff Garzik 
5009c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5010c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5011c6fd2807SJeff Garzik 
5012c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5013c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5014c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5015c6fd2807SJeff Garzik 				 */
5016c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5017c6fd2807SJeff Garzik 				goto fsm_start;
5018c6fd2807SJeff Garzik 			}
5019c6fd2807SJeff Garzik 
5020c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5021c6fd2807SJeff Garzik 
5022c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5023c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5024c6fd2807SJeff Garzik 				/* all data read */
5025c6fd2807SJeff Garzik 				ata_altstatus(ap);
5026c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5027c6fd2807SJeff Garzik 				goto fsm_start;
5028c6fd2807SJeff Garzik 			}
5029c6fd2807SJeff Garzik 		}
5030c6fd2807SJeff Garzik 
5031c6fd2807SJeff Garzik 		ata_altstatus(ap); /* flush */
5032c6fd2807SJeff Garzik 		poll_next = 1;
5033c6fd2807SJeff Garzik 		break;
5034c6fd2807SJeff Garzik 
5035c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5036c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5037c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5038c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5039c6fd2807SJeff Garzik 			goto fsm_start;
5040c6fd2807SJeff Garzik 		}
5041c6fd2807SJeff Garzik 
5042c6fd2807SJeff Garzik 		/* no more data to transfer */
5043c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
504444877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5045c6fd2807SJeff Garzik 
5046c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5047c6fd2807SJeff Garzik 
5048c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5049c6fd2807SJeff Garzik 
5050c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5051c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5052c6fd2807SJeff Garzik 
5053c6fd2807SJeff Garzik 		poll_next = 0;
5054c6fd2807SJeff Garzik 		break;
5055c6fd2807SJeff Garzik 
5056c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5057c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5058c6fd2807SJeff Garzik 		 * know what's wrong and recover
5059c6fd2807SJeff Garzik 		 */
5060c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5061c6fd2807SJeff Garzik 
5062c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5063c6fd2807SJeff Garzik 
5064c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5065c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5066c6fd2807SJeff Garzik 
5067c6fd2807SJeff Garzik 		poll_next = 0;
5068c6fd2807SJeff Garzik 		break;
5069c6fd2807SJeff Garzik 	default:
5070c6fd2807SJeff Garzik 		poll_next = 0;
5071c6fd2807SJeff Garzik 		BUG();
5072c6fd2807SJeff Garzik 	}
5073c6fd2807SJeff Garzik 
5074c6fd2807SJeff Garzik 	return poll_next;
5075c6fd2807SJeff Garzik }
5076c6fd2807SJeff Garzik 
507765f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5078c6fd2807SJeff Garzik {
507965f27f38SDavid Howells 	struct ata_port *ap =
508065f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
508165f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5082c6fd2807SJeff Garzik 	u8 status;
5083c6fd2807SJeff Garzik 	int poll_next;
5084c6fd2807SJeff Garzik 
5085c6fd2807SJeff Garzik fsm_start:
5086c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5087c6fd2807SJeff Garzik 
5088c6fd2807SJeff Garzik 	/*
5089c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5090c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5091c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5092c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5093c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5094c6fd2807SJeff Garzik 	 */
5095c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5096c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5097c6fd2807SJeff Garzik 		msleep(2);
5098c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5099c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5100c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5101c6fd2807SJeff Garzik 			return;
5102c6fd2807SJeff Garzik 		}
5103c6fd2807SJeff Garzik 	}
5104c6fd2807SJeff Garzik 
5105c6fd2807SJeff Garzik 	/* move the HSM */
5106c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5107c6fd2807SJeff Garzik 
5108c6fd2807SJeff Garzik 	/* another command or interrupt handler
5109c6fd2807SJeff Garzik 	 * may be running at this point.
5110c6fd2807SJeff Garzik 	 */
5111c6fd2807SJeff Garzik 	if (poll_next)
5112c6fd2807SJeff Garzik 		goto fsm_start;
5113c6fd2807SJeff Garzik }
5114c6fd2807SJeff Garzik 
5115c6fd2807SJeff Garzik /**
5116c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5117c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5118c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5119c6fd2807SJeff Garzik  *
5120c6fd2807SJeff Garzik  *	LOCKING:
5121c6fd2807SJeff Garzik  *	None.
5122c6fd2807SJeff Garzik  */
5123c6fd2807SJeff Garzik 
5124c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5125c6fd2807SJeff Garzik {
5126c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5127c6fd2807SJeff Garzik 	unsigned int i;
5128c6fd2807SJeff Garzik 
5129c6fd2807SJeff Garzik 	/* no command while frozen */
5130c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5131c6fd2807SJeff Garzik 		return NULL;
5132c6fd2807SJeff Garzik 
5133c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5134c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5135c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5136c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5137c6fd2807SJeff Garzik 			break;
5138c6fd2807SJeff Garzik 		}
5139c6fd2807SJeff Garzik 
5140c6fd2807SJeff Garzik 	if (qc)
5141c6fd2807SJeff Garzik 		qc->tag = i;
5142c6fd2807SJeff Garzik 
5143c6fd2807SJeff Garzik 	return qc;
5144c6fd2807SJeff Garzik }
5145c6fd2807SJeff Garzik 
5146c6fd2807SJeff Garzik /**
5147c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5148c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5149c6fd2807SJeff Garzik  *
5150c6fd2807SJeff Garzik  *	LOCKING:
5151c6fd2807SJeff Garzik  *	None.
5152c6fd2807SJeff Garzik  */
5153c6fd2807SJeff Garzik 
5154c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5155c6fd2807SJeff Garzik {
5156c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5157c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5158c6fd2807SJeff Garzik 
5159c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5160c6fd2807SJeff Garzik 	if (qc) {
5161c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5162c6fd2807SJeff Garzik 		qc->ap = ap;
5163c6fd2807SJeff Garzik 		qc->dev = dev;
5164c6fd2807SJeff Garzik 
5165c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5166c6fd2807SJeff Garzik 	}
5167c6fd2807SJeff Garzik 
5168c6fd2807SJeff Garzik 	return qc;
5169c6fd2807SJeff Garzik }
5170c6fd2807SJeff Garzik 
5171c6fd2807SJeff Garzik /**
5172c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5173c6fd2807SJeff Garzik  *	@qc: Command to complete
5174c6fd2807SJeff Garzik  *
5175c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5176c6fd2807SJeff Garzik  *	in case something prevents using it.
5177c6fd2807SJeff Garzik  *
5178c6fd2807SJeff Garzik  *	LOCKING:
5179cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5180c6fd2807SJeff Garzik  */
5181c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5182c6fd2807SJeff Garzik {
5183c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5184c6fd2807SJeff Garzik 	unsigned int tag;
5185c6fd2807SJeff Garzik 
5186c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5187c6fd2807SJeff Garzik 
5188c6fd2807SJeff Garzik 	qc->flags = 0;
5189c6fd2807SJeff Garzik 	tag = qc->tag;
5190c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5191c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5192c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5193c6fd2807SJeff Garzik 	}
5194c6fd2807SJeff Garzik }
5195c6fd2807SJeff Garzik 
5196c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5197c6fd2807SJeff Garzik {
5198c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5199c6fd2807SJeff Garzik 
5200c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5201c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5202c6fd2807SJeff Garzik 
5203c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5204c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5207c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ)
5208c6fd2807SJeff Garzik 		ap->sactive &= ~(1 << qc->tag);
5209c6fd2807SJeff Garzik 	else
5210c6fd2807SJeff Garzik 		ap->active_tag = ATA_TAG_POISON;
5211c6fd2807SJeff Garzik 
5212c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5213c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5214c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5215c6fd2807SJeff Garzik 	 */
5216c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5217c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5218c6fd2807SJeff Garzik 
5219c6fd2807SJeff Garzik 	/* call completion callback */
5220c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5221c6fd2807SJeff Garzik }
5222c6fd2807SJeff Garzik 
522339599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
522439599a53STejun Heo {
522539599a53STejun Heo 	struct ata_port *ap = qc->ap;
522639599a53STejun Heo 
522739599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
52284742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
522939599a53STejun Heo }
523039599a53STejun Heo 
5231c6fd2807SJeff Garzik /**
5232c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5233c6fd2807SJeff Garzik  *	@qc: Command to complete
5234c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5235c6fd2807SJeff Garzik  *
5236c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5237c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5238c6fd2807SJeff Garzik  *
5239c6fd2807SJeff Garzik  *	LOCKING:
5240cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5241c6fd2807SJeff Garzik  */
5242c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5243c6fd2807SJeff Garzik {
5244c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5245c6fd2807SJeff Garzik 
5246c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5247c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5248c6fd2807SJeff Garzik 	 *
5249c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5250c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5251c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5252c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5253c6fd2807SJeff Garzik 	 *
5254c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5255c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5256c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5257c6fd2807SJeff Garzik 	 * taken care of.
5258c6fd2807SJeff Garzik 	 */
5259c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5260c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5263c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5264c6fd2807SJeff Garzik 
5265c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5266c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5267c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
526839599a53STejun Heo 				fill_result_tf(qc);
5269c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5270c6fd2807SJeff Garzik 				return;
5271c6fd2807SJeff Garzik 			}
5272c6fd2807SJeff Garzik 		}
5273c6fd2807SJeff Garzik 
5274c6fd2807SJeff Garzik 		/* read result TF if requested */
5275c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
527639599a53STejun Heo 			fill_result_tf(qc);
5277c6fd2807SJeff Garzik 
5278c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5279c6fd2807SJeff Garzik 	} else {
5280c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5281c6fd2807SJeff Garzik 			return;
5282c6fd2807SJeff Garzik 
5283c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5284c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
528539599a53STejun Heo 			fill_result_tf(qc);
5286c6fd2807SJeff Garzik 
5287c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5288c6fd2807SJeff Garzik 	}
5289c6fd2807SJeff Garzik }
5290c6fd2807SJeff Garzik 
5291c6fd2807SJeff Garzik /**
5292c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5293c6fd2807SJeff Garzik  *	@ap: port in question
5294c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5295c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5296c6fd2807SJeff Garzik  *
5297c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5298c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5299c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5300c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5301c6fd2807SJeff Garzik  *
5302c6fd2807SJeff Garzik  *	LOCKING:
5303cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5304c6fd2807SJeff Garzik  *
5305c6fd2807SJeff Garzik  *	RETURNS:
5306c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5307c6fd2807SJeff Garzik  */
5308c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5309c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5310c6fd2807SJeff Garzik {
5311c6fd2807SJeff Garzik 	int nr_done = 0;
5312c6fd2807SJeff Garzik 	u32 done_mask;
5313c6fd2807SJeff Garzik 	int i;
5314c6fd2807SJeff Garzik 
5315c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5316c6fd2807SJeff Garzik 
5317c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5318c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5319c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5320c6fd2807SJeff Garzik 		return -EINVAL;
5321c6fd2807SJeff Garzik 	}
5322c6fd2807SJeff Garzik 
5323c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5324c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5325c6fd2807SJeff Garzik 
5326c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5327c6fd2807SJeff Garzik 			continue;
5328c6fd2807SJeff Garzik 
5329c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5330c6fd2807SJeff Garzik 			if (finish_qc)
5331c6fd2807SJeff Garzik 				finish_qc(qc);
5332c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5333c6fd2807SJeff Garzik 			nr_done++;
5334c6fd2807SJeff Garzik 		}
5335c6fd2807SJeff Garzik 	}
5336c6fd2807SJeff Garzik 
5337c6fd2807SJeff Garzik 	return nr_done;
5338c6fd2807SJeff Garzik }
5339c6fd2807SJeff Garzik 
5340c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5341c6fd2807SJeff Garzik {
5342c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5343c6fd2807SJeff Garzik 
5344c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5345c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5346c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5347c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5348c6fd2807SJeff Garzik 		return 1;
5349c6fd2807SJeff Garzik 
5350c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5351c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5352c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5353c6fd2807SJeff Garzik 			return 1;
5354c6fd2807SJeff Garzik 
5355c6fd2807SJeff Garzik 		/* fall through */
5356c6fd2807SJeff Garzik 
5357c6fd2807SJeff Garzik 	default:
5358c6fd2807SJeff Garzik 		return 0;
5359c6fd2807SJeff Garzik 	}
5360c6fd2807SJeff Garzik 
5361c6fd2807SJeff Garzik 	/* never reached */
5362c6fd2807SJeff Garzik }
5363c6fd2807SJeff Garzik 
5364c6fd2807SJeff Garzik /**
5365c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5366c6fd2807SJeff Garzik  *	@qc: command to issue to device
5367c6fd2807SJeff Garzik  *
5368c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5369c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5370c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5371c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5372c6fd2807SJeff Garzik  *
5373c6fd2807SJeff Garzik  *	LOCKING:
5374cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5375c6fd2807SJeff Garzik  */
5376c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5377c6fd2807SJeff Garzik {
5378c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5379c6fd2807SJeff Garzik 
5380c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5381c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5382c6fd2807SJeff Garzik 	 * request ATAPI sense.
5383c6fd2807SJeff Garzik 	 */
5384c6fd2807SJeff Garzik 	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5385c6fd2807SJeff Garzik 
5386c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5387c6fd2807SJeff Garzik 		WARN_ON(ap->sactive & (1 << qc->tag));
5388c6fd2807SJeff Garzik 		ap->sactive |= 1 << qc->tag;
5389c6fd2807SJeff Garzik 	} else {
5390c6fd2807SJeff Garzik 		WARN_ON(ap->sactive);
5391c6fd2807SJeff Garzik 		ap->active_tag = qc->tag;
5392c6fd2807SJeff Garzik 	}
5393c6fd2807SJeff Garzik 
5394c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5395c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5396c6fd2807SJeff Garzik 
5397c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5398c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5399c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5400c6fd2807SJeff Garzik 				goto sg_err;
5401c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5402c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5403c6fd2807SJeff Garzik 				goto sg_err;
5404c6fd2807SJeff Garzik 		}
5405c6fd2807SJeff Garzik 	} else {
5406c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5407c6fd2807SJeff Garzik 	}
5408c6fd2807SJeff Garzik 
5409c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5410c6fd2807SJeff Garzik 
5411c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5412c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5413c6fd2807SJeff Garzik 		goto err;
5414c6fd2807SJeff Garzik 	return;
5415c6fd2807SJeff Garzik 
5416c6fd2807SJeff Garzik sg_err:
5417c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5418c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5419c6fd2807SJeff Garzik err:
5420c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5421c6fd2807SJeff Garzik }
5422c6fd2807SJeff Garzik 
5423c6fd2807SJeff Garzik /**
5424c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5425c6fd2807SJeff Garzik  *	@qc: command to issue to device
5426c6fd2807SJeff Garzik  *
5427c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5428c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5429c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5430c6fd2807SJeff Garzik  *	is slightly different.
5431c6fd2807SJeff Garzik  *
5432c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5433c6fd2807SJeff Garzik  *
5434c6fd2807SJeff Garzik  *	LOCKING:
5435cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5436c6fd2807SJeff Garzik  *
5437c6fd2807SJeff Garzik  *	RETURNS:
5438c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5439c6fd2807SJeff Garzik  */
5440c6fd2807SJeff Garzik 
5441c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5442c6fd2807SJeff Garzik {
5443c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5444c6fd2807SJeff Garzik 
5445c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5446c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5447c6fd2807SJeff Garzik 	 */
5448c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5449c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5450c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5451e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5452c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5453c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5454c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5455c6fd2807SJeff Garzik 			break;
5456c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5457c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5458c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5459c6fd2807SJeff Garzik 				BUG();
5460c6fd2807SJeff Garzik 			break;
5461c6fd2807SJeff Garzik 		default:
5462c6fd2807SJeff Garzik 			break;
5463c6fd2807SJeff Garzik 		}
5464c6fd2807SJeff Garzik 	}
5465c6fd2807SJeff Garzik 
5466c6fd2807SJeff Garzik 	/* select the device */
5467c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5468c6fd2807SJeff Garzik 
5469c6fd2807SJeff Garzik 	/* start the command */
5470c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5471c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5472c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5473c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5474c6fd2807SJeff Garzik 
5475c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5476c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5477c6fd2807SJeff Garzik 
5478c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5479c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5480c6fd2807SJeff Garzik 
5481c6fd2807SJeff Garzik 		break;
5482c6fd2807SJeff Garzik 
5483c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5484c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5485c6fd2807SJeff Garzik 
5486c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5487c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5488c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5489c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5490c6fd2807SJeff Garzik 		break;
5491c6fd2807SJeff Garzik 
5492c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5493c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5494c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5495c6fd2807SJeff Garzik 
5496c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5497c6fd2807SJeff Garzik 
5498c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5499c6fd2807SJeff Garzik 			/* PIO data out protocol */
5500c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5501c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5502c6fd2807SJeff Garzik 
5503c6fd2807SJeff Garzik 			/* always send first data block using
5504c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5505c6fd2807SJeff Garzik 			 */
5506c6fd2807SJeff Garzik 		} else {
5507c6fd2807SJeff Garzik 			/* PIO data in protocol */
5508c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5509c6fd2807SJeff Garzik 
5510c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5511c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5512c6fd2807SJeff Garzik 
5513c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5514c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5515c6fd2807SJeff Garzik 			 */
5516c6fd2807SJeff Garzik 		}
5517c6fd2807SJeff Garzik 
5518c6fd2807SJeff Garzik 		break;
5519c6fd2807SJeff Garzik 
5520c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5521c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5522c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5523c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5524c6fd2807SJeff Garzik 
5525c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5526c6fd2807SJeff Garzik 
5527c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5528c6fd2807SJeff Garzik 
5529c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5530c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5531c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5532c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5533c6fd2807SJeff Garzik 		break;
5534c6fd2807SJeff Garzik 
5535c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5536c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5537c6fd2807SJeff Garzik 
5538c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5539c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5540c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5541c6fd2807SJeff Garzik 
5542c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5543c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5544c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5545c6fd2807SJeff Garzik 		break;
5546c6fd2807SJeff Garzik 
5547c6fd2807SJeff Garzik 	default:
5548c6fd2807SJeff Garzik 		WARN_ON(1);
5549c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5550c6fd2807SJeff Garzik 	}
5551c6fd2807SJeff Garzik 
5552c6fd2807SJeff Garzik 	return 0;
5553c6fd2807SJeff Garzik }
5554c6fd2807SJeff Garzik 
5555c6fd2807SJeff Garzik /**
5556c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5557c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5558c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5559c6fd2807SJeff Garzik  *
5560c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5561c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5562c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5563c6fd2807SJeff Garzik  *
5564c6fd2807SJeff Garzik  *	LOCKING:
5565cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5566c6fd2807SJeff Garzik  *
5567c6fd2807SJeff Garzik  *	RETURNS:
5568c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5569c6fd2807SJeff Garzik  */
5570c6fd2807SJeff Garzik 
5571c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap,
5572c6fd2807SJeff Garzik 				   struct ata_queued_cmd *qc)
5573c6fd2807SJeff Garzik {
5574ea54763fSTejun Heo 	struct ata_eh_info *ehi = &ap->eh_info;
5575c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5576c6fd2807SJeff Garzik 
5577c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
557844877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5579c6fd2807SJeff Garzik 
5580c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5581c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5582c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5583c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5584c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5585c6fd2807SJeff Garzik 		 */
5586c6fd2807SJeff Garzik 
5587c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5588c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5589c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5590c6fd2807SJeff Garzik 		 */
5591c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5592c6fd2807SJeff Garzik 			goto idle_irq;
5593c6fd2807SJeff Garzik 		break;
5594c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5595c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5596c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5597c6fd2807SJeff Garzik 			/* check status of DMA engine */
5598c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
559944877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
560044877b4eSTejun Heo 				ap->print_id, host_stat);
5601c6fd2807SJeff Garzik 
5602c6fd2807SJeff Garzik 			/* if it's not our irq... */
5603c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5604c6fd2807SJeff Garzik 				goto idle_irq;
5605c6fd2807SJeff Garzik 
5606c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5607c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5608c6fd2807SJeff Garzik 
5609c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5610c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5611c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5612c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5613c6fd2807SJeff Garzik 			}
5614c6fd2807SJeff Garzik 		}
5615c6fd2807SJeff Garzik 		break;
5616c6fd2807SJeff Garzik 	case HSM_ST:
5617c6fd2807SJeff Garzik 		break;
5618c6fd2807SJeff Garzik 	default:
5619c6fd2807SJeff Garzik 		goto idle_irq;
5620c6fd2807SJeff Garzik 	}
5621c6fd2807SJeff Garzik 
5622c6fd2807SJeff Garzik 	/* check altstatus */
5623c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5624c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5625c6fd2807SJeff Garzik 		goto idle_irq;
5626c6fd2807SJeff Garzik 
5627c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5628c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5629c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5630c6fd2807SJeff Garzik 		goto idle_irq;
5631c6fd2807SJeff Garzik 
5632c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5633c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5634c6fd2807SJeff Garzik 
5635c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5636ea54763fSTejun Heo 
5637ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5638ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5639ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5640ea54763fSTejun Heo 
5641c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5642c6fd2807SJeff Garzik 
5643c6fd2807SJeff Garzik idle_irq:
5644c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5645c6fd2807SJeff Garzik 
5646c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5647c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
564883625006SAkira Iguchi 		ap->ops->irq_ack(ap, 0); /* debug trap */
5649c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5650c6fd2807SJeff Garzik 		return 1;
5651c6fd2807SJeff Garzik 	}
5652c6fd2807SJeff Garzik #endif
5653c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5654c6fd2807SJeff Garzik }
5655c6fd2807SJeff Garzik 
5656c6fd2807SJeff Garzik /**
5657c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
5658c6fd2807SJeff Garzik  *	@irq: irq line (unused)
5659cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
5660c6fd2807SJeff Garzik  *
5661c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
5662c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
5663c6fd2807SJeff Garzik  *
5664c6fd2807SJeff Garzik  *	LOCKING:
5665cca3974eSJeff Garzik  *	Obtains host lock during operation.
5666c6fd2807SJeff Garzik  *
5667c6fd2807SJeff Garzik  *	RETURNS:
5668c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
5669c6fd2807SJeff Garzik  */
5670c6fd2807SJeff Garzik 
56717d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance)
5672c6fd2807SJeff Garzik {
5673cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
5674c6fd2807SJeff Garzik 	unsigned int i;
5675c6fd2807SJeff Garzik 	unsigned int handled = 0;
5676c6fd2807SJeff Garzik 	unsigned long flags;
5677c6fd2807SJeff Garzik 
5678c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5679cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
5680c6fd2807SJeff Garzik 
5681cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5682c6fd2807SJeff Garzik 		struct ata_port *ap;
5683c6fd2807SJeff Garzik 
5684cca3974eSJeff Garzik 		ap = host->ports[i];
5685c6fd2807SJeff Garzik 		if (ap &&
5686c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5687c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
5688c6fd2807SJeff Garzik 
5689c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, ap->active_tag);
5690c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5691c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5692c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
5693c6fd2807SJeff Garzik 		}
5694c6fd2807SJeff Garzik 	}
5695c6fd2807SJeff Garzik 
5696cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
5697c6fd2807SJeff Garzik 
5698c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
5699c6fd2807SJeff Garzik }
5700c6fd2807SJeff Garzik 
5701c6fd2807SJeff Garzik /**
5702c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
5703c6fd2807SJeff Garzik  *	@ap: ATA port to test SCR accessibility for
5704c6fd2807SJeff Garzik  *
5705c6fd2807SJeff Garzik  *	Test whether SCRs are accessible for @ap.
5706c6fd2807SJeff Garzik  *
5707c6fd2807SJeff Garzik  *	LOCKING:
5708c6fd2807SJeff Garzik  *	None.
5709c6fd2807SJeff Garzik  *
5710c6fd2807SJeff Garzik  *	RETURNS:
5711c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
5712c6fd2807SJeff Garzik  */
5713c6fd2807SJeff Garzik int sata_scr_valid(struct ata_port *ap)
5714c6fd2807SJeff Garzik {
5715a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5716c6fd2807SJeff Garzik }
5717c6fd2807SJeff Garzik 
5718c6fd2807SJeff Garzik /**
5719c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
5720c6fd2807SJeff Garzik  *	@ap: ATA port to read SCR for
5721c6fd2807SJeff Garzik  *	@reg: SCR to read
5722c6fd2807SJeff Garzik  *	@val: Place to store read value
5723c6fd2807SJeff Garzik  *
5724c6fd2807SJeff Garzik  *	Read SCR register @reg of @ap into *@val.  This function is
5725c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5726c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5727c6fd2807SJeff Garzik  *
5728c6fd2807SJeff Garzik  *	LOCKING:
5729c6fd2807SJeff Garzik  *	None.
5730c6fd2807SJeff Garzik  *
5731c6fd2807SJeff Garzik  *	RETURNS:
5732c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5733c6fd2807SJeff Garzik  */
5734c6fd2807SJeff Garzik int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5735c6fd2807SJeff Garzik {
5736c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5737c6fd2807SJeff Garzik 		*val = ap->ops->scr_read(ap, reg);
5738c6fd2807SJeff Garzik 		return 0;
5739c6fd2807SJeff Garzik 	}
5740c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5741c6fd2807SJeff Garzik }
5742c6fd2807SJeff Garzik 
5743c6fd2807SJeff Garzik /**
5744c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
5745c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5746c6fd2807SJeff Garzik  *	@reg: SCR to write
5747c6fd2807SJeff Garzik  *	@val: value to write
5748c6fd2807SJeff Garzik  *
5749c6fd2807SJeff Garzik  *	Write @val to SCR register @reg of @ap.  This function is
5750c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5751c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5752c6fd2807SJeff Garzik  *
5753c6fd2807SJeff Garzik  *	LOCKING:
5754c6fd2807SJeff Garzik  *	None.
5755c6fd2807SJeff Garzik  *
5756c6fd2807SJeff Garzik  *	RETURNS:
5757c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5758c6fd2807SJeff Garzik  */
5759c6fd2807SJeff Garzik int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5760c6fd2807SJeff Garzik {
5761c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5762c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5763c6fd2807SJeff Garzik 		return 0;
5764c6fd2807SJeff Garzik 	}
5765c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5766c6fd2807SJeff Garzik }
5767c6fd2807SJeff Garzik 
5768c6fd2807SJeff Garzik /**
5769c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
5770c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5771c6fd2807SJeff Garzik  *	@reg: SCR to write
5772c6fd2807SJeff Garzik  *	@val: value to write
5773c6fd2807SJeff Garzik  *
5774c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
5775c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
5776c6fd2807SJeff Garzik  *
5777c6fd2807SJeff Garzik  *	LOCKING:
5778c6fd2807SJeff Garzik  *	None.
5779c6fd2807SJeff Garzik  *
5780c6fd2807SJeff Garzik  *	RETURNS:
5781c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5782c6fd2807SJeff Garzik  */
5783c6fd2807SJeff Garzik int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5784c6fd2807SJeff Garzik {
5785c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5786c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5787c6fd2807SJeff Garzik 		ap->ops->scr_read(ap, reg);
5788c6fd2807SJeff Garzik 		return 0;
5789c6fd2807SJeff Garzik 	}
5790c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5791c6fd2807SJeff Garzik }
5792c6fd2807SJeff Garzik 
5793c6fd2807SJeff Garzik /**
5794c6fd2807SJeff Garzik  *	ata_port_online - test whether the given port is online
5795c6fd2807SJeff Garzik  *	@ap: ATA port to test
5796c6fd2807SJeff Garzik  *
5797c6fd2807SJeff Garzik  *	Test whether @ap is online.  Note that this function returns 0
5798c6fd2807SJeff Garzik  *	if online status of @ap cannot be obtained, so
5799c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5800c6fd2807SJeff Garzik  *
5801c6fd2807SJeff Garzik  *	LOCKING:
5802c6fd2807SJeff Garzik  *	None.
5803c6fd2807SJeff Garzik  *
5804c6fd2807SJeff Garzik  *	RETURNS:
5805c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
5806c6fd2807SJeff Garzik  */
5807c6fd2807SJeff Garzik int ata_port_online(struct ata_port *ap)
5808c6fd2807SJeff Garzik {
5809c6fd2807SJeff Garzik 	u32 sstatus;
5810c6fd2807SJeff Garzik 
5811c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5812c6fd2807SJeff Garzik 		return 1;
5813c6fd2807SJeff Garzik 	return 0;
5814c6fd2807SJeff Garzik }
5815c6fd2807SJeff Garzik 
5816c6fd2807SJeff Garzik /**
5817c6fd2807SJeff Garzik  *	ata_port_offline - test whether the given port is offline
5818c6fd2807SJeff Garzik  *	@ap: ATA port to test
5819c6fd2807SJeff Garzik  *
5820c6fd2807SJeff Garzik  *	Test whether @ap is offline.  Note that this function returns
5821c6fd2807SJeff Garzik  *	0 if offline status of @ap cannot be obtained, so
5822c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5823c6fd2807SJeff Garzik  *
5824c6fd2807SJeff Garzik  *	LOCKING:
5825c6fd2807SJeff Garzik  *	None.
5826c6fd2807SJeff Garzik  *
5827c6fd2807SJeff Garzik  *	RETURNS:
5828c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
5829c6fd2807SJeff Garzik  */
5830c6fd2807SJeff Garzik int ata_port_offline(struct ata_port *ap)
5831c6fd2807SJeff Garzik {
5832c6fd2807SJeff Garzik 	u32 sstatus;
5833c6fd2807SJeff Garzik 
5834c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5835c6fd2807SJeff Garzik 		return 1;
5836c6fd2807SJeff Garzik 	return 0;
5837c6fd2807SJeff Garzik }
5838c6fd2807SJeff Garzik 
5839c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
5840c6fd2807SJeff Garzik {
5841c6fd2807SJeff Garzik 	unsigned int err_mask;
5842c6fd2807SJeff Garzik 	u8 cmd;
5843c6fd2807SJeff Garzik 
5844c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
5845c6fd2807SJeff Garzik 		return 0;
5846c6fd2807SJeff Garzik 
58476fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5848c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
5849c6fd2807SJeff Garzik 	else
5850c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
5851c6fd2807SJeff Garzik 
5852c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
5853c6fd2807SJeff Garzik 	if (err_mask) {
5854c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5855c6fd2807SJeff Garzik 		return -EIO;
5856c6fd2807SJeff Garzik 	}
5857c6fd2807SJeff Garzik 
5858c6fd2807SJeff Garzik 	return 0;
5859c6fd2807SJeff Garzik }
5860c6fd2807SJeff Garzik 
58616ffa01d8STejun Heo #ifdef CONFIG_PM
5862cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5863cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
5864cca3974eSJeff Garzik 			       int wait)
5865c6fd2807SJeff Garzik {
5866c6fd2807SJeff Garzik 	unsigned long flags;
5867c6fd2807SJeff Garzik 	int i, rc;
5868c6fd2807SJeff Garzik 
5869cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5870cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
5871c6fd2807SJeff Garzik 
5872c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
5873c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
5874c6fd2807SJeff Garzik 		 */
5875c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5876c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5877c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5878c6fd2807SJeff Garzik 		}
5879c6fd2807SJeff Garzik 
5880c6fd2807SJeff Garzik 		/* request PM ops to EH */
5881c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
5882c6fd2807SJeff Garzik 
5883c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
5884c6fd2807SJeff Garzik 		if (wait) {
5885c6fd2807SJeff Garzik 			rc = 0;
5886c6fd2807SJeff Garzik 			ap->pm_result = &rc;
5887c6fd2807SJeff Garzik 		}
5888c6fd2807SJeff Garzik 
5889c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5890c6fd2807SJeff Garzik 		ap->eh_info.action |= action;
5891c6fd2807SJeff Garzik 		ap->eh_info.flags |= ehi_flags;
5892c6fd2807SJeff Garzik 
5893c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
5894c6fd2807SJeff Garzik 
5895c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
5896c6fd2807SJeff Garzik 
5897c6fd2807SJeff Garzik 		/* wait and check result */
5898c6fd2807SJeff Garzik 		if (wait) {
5899c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5900c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5901c6fd2807SJeff Garzik 			if (rc)
5902c6fd2807SJeff Garzik 				return rc;
5903c6fd2807SJeff Garzik 		}
5904c6fd2807SJeff Garzik 	}
5905c6fd2807SJeff Garzik 
5906c6fd2807SJeff Garzik 	return 0;
5907c6fd2807SJeff Garzik }
5908c6fd2807SJeff Garzik 
5909c6fd2807SJeff Garzik /**
5910cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
5911cca3974eSJeff Garzik  *	@host: host to suspend
5912c6fd2807SJeff Garzik  *	@mesg: PM message
5913c6fd2807SJeff Garzik  *
5914cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
5915c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
5916c6fd2807SJeff Garzik  *	to finish.
5917c6fd2807SJeff Garzik  *
5918c6fd2807SJeff Garzik  *	LOCKING:
5919c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5920c6fd2807SJeff Garzik  *
5921c6fd2807SJeff Garzik  *	RETURNS:
5922c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
5923c6fd2807SJeff Garzik  */
5924cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5925c6fd2807SJeff Garzik {
59269666f400STejun Heo 	int rc;
5927c6fd2807SJeff Garzik 
5928cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
59299666f400STejun Heo 	if (rc == 0)
5930cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
5931c6fd2807SJeff Garzik 	return rc;
5932c6fd2807SJeff Garzik }
5933c6fd2807SJeff Garzik 
5934c6fd2807SJeff Garzik /**
5935cca3974eSJeff Garzik  *	ata_host_resume - resume host
5936cca3974eSJeff Garzik  *	@host: host to resume
5937c6fd2807SJeff Garzik  *
5938cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
5939c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
5940c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
5941c6fd2807SJeff Garzik  *
5942c6fd2807SJeff Garzik  *	LOCKING:
5943c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5944c6fd2807SJeff Garzik  */
5945cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
5946c6fd2807SJeff Garzik {
5947cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5948c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5949cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
5950c6fd2807SJeff Garzik }
59516ffa01d8STejun Heo #endif
5952c6fd2807SJeff Garzik 
5953c6fd2807SJeff Garzik /**
5954c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
5955c6fd2807SJeff Garzik  *	@ap: Port to initialize
5956c6fd2807SJeff Garzik  *
5957c6fd2807SJeff Garzik  *	Called just after data structures for each port are
5958c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
5959c6fd2807SJeff Garzik  *
5960c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
5961c6fd2807SJeff Garzik  *
5962c6fd2807SJeff Garzik  *	LOCKING:
5963c6fd2807SJeff Garzik  *	Inherited from caller.
5964c6fd2807SJeff Garzik  */
5965c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
5966c6fd2807SJeff Garzik {
5967c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
5968c6fd2807SJeff Garzik 	int rc;
5969c6fd2807SJeff Garzik 
5970f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5971f0d36efdSTejun Heo 				      GFP_KERNEL);
5972c6fd2807SJeff Garzik 	if (!ap->prd)
5973c6fd2807SJeff Garzik 		return -ENOMEM;
5974c6fd2807SJeff Garzik 
5975c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
5976f0d36efdSTejun Heo 	if (rc)
5977c6fd2807SJeff Garzik 		return rc;
5978c6fd2807SJeff Garzik 
5979f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5980f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
5981c6fd2807SJeff Garzik 	return 0;
5982c6fd2807SJeff Garzik }
5983c6fd2807SJeff Garzik 
5984c6fd2807SJeff Garzik /**
5985c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
5986c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
5987c6fd2807SJeff Garzik  *
5988c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
5989c6fd2807SJeff Garzik  *
5990c6fd2807SJeff Garzik  *	LOCKING:
5991c6fd2807SJeff Garzik  *	Inherited from caller.
5992c6fd2807SJeff Garzik  */
5993c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
5994c6fd2807SJeff Garzik {
5995c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5996c6fd2807SJeff Garzik 	unsigned long flags;
5997c6fd2807SJeff Garzik 
5998c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
5999c6fd2807SJeff Garzik 	ap->sata_spd_limit = ap->hw_sata_spd_limit;
6000c6fd2807SJeff Garzik 
6001c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6002c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6003cca3974eSJeff Garzik 	 * host lock.
6004c6fd2807SJeff Garzik 	 */
6005c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6006c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
6007c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6008c6fd2807SJeff Garzik 
6009c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6010c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6011c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6012c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6013c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6014c6fd2807SJeff Garzik }
6015c6fd2807SJeff Garzik 
6016c6fd2807SJeff Garzik /**
6017f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6018f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6019c6fd2807SJeff Garzik  *
6020f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6021f3187195STejun Heo  *
6022f3187195STejun Heo  *	RETURNS:
6023f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6024c6fd2807SJeff Garzik  *
6025c6fd2807SJeff Garzik  *	LOCKING:
6026f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6027c6fd2807SJeff Garzik  */
6028f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6029c6fd2807SJeff Garzik {
6030f3187195STejun Heo 	struct ata_port *ap;
6031c6fd2807SJeff Garzik 	unsigned int i;
6032c6fd2807SJeff Garzik 
6033f3187195STejun Heo 	DPRINTK("ENTER\n");
6034f3187195STejun Heo 
6035f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6036f3187195STejun Heo 	if (!ap)
6037f3187195STejun Heo 		return NULL;
6038f3187195STejun Heo 
6039f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6040cca3974eSJeff Garzik 	ap->lock = &host->lock;
6041c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6042f3187195STejun Heo 	ap->print_id = -1;
6043c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6044cca3974eSJeff Garzik 	ap->host = host;
6045f3187195STejun Heo 	ap->dev = host->dev;
6046f3187195STejun Heo 
6047c6fd2807SJeff Garzik 	ap->hw_sata_spd_limit = UINT_MAX;
6048c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
6049c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6050c6fd2807SJeff Garzik 
6051c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6052c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6053c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6054c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6055c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6056c6fd2807SJeff Garzik #else
6057c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6058c6fd2807SJeff Garzik #endif
6059c6fd2807SJeff Garzik 
606065f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
606165f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
606265f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6063c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6064c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
6065c6fd2807SJeff Garzik 
6066c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6067c6fd2807SJeff Garzik 
6068c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
6069c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
6070c6fd2807SJeff Garzik 		dev->ap = ap;
6071c6fd2807SJeff Garzik 		dev->devno = i;
6072c6fd2807SJeff Garzik 		ata_dev_init(dev);
6073c6fd2807SJeff Garzik 	}
6074c6fd2807SJeff Garzik 
6075c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6076c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6077c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6078c6fd2807SJeff Garzik #endif
6079c6fd2807SJeff Garzik 	return ap;
6080c6fd2807SJeff Garzik }
6081c6fd2807SJeff Garzik 
6082f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6083f0d36efdSTejun Heo {
6084f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6085f0d36efdSTejun Heo 	int i;
6086f0d36efdSTejun Heo 
6087f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6088f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6089f0d36efdSTejun Heo 
6090ecef7253STejun Heo 		if (!ap)
6091ecef7253STejun Heo 			continue;
6092ecef7253STejun Heo 
6093ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6094f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6095f0d36efdSTejun Heo 	}
6096f0d36efdSTejun Heo 
6097ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6098f0d36efdSTejun Heo 		host->ops->host_stop(host);
60991aa56ccaSTejun Heo 
61001aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
61011aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
61021aa506e4STejun Heo 
61034911487aSTejun Heo 		if (!ap)
61044911487aSTejun Heo 			continue;
61054911487aSTejun Heo 
61064911487aSTejun Heo 		if (ap->scsi_host)
61071aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
61081aa506e4STejun Heo 
61094911487aSTejun Heo 		kfree(ap);
61101aa506e4STejun Heo 		host->ports[i] = NULL;
61111aa506e4STejun Heo 	}
61121aa506e4STejun Heo 
61131aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6114f0d36efdSTejun Heo }
6115f0d36efdSTejun Heo 
6116c6fd2807SJeff Garzik /**
6117f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6118f3187195STejun Heo  *	@dev: generic device this host is associated with
6119f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6120f3187195STejun Heo  *
6121f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6122f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6123f3187195STejun Heo  *	attaches it using ata_host_register().
6124f3187195STejun Heo  *
6125f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6126f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6127f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6128f3187195STejun Heo  *	ports will be automatically freed on registration.
6129f3187195STejun Heo  *
6130f3187195STejun Heo  *	RETURNS:
6131f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6132f3187195STejun Heo  *
6133f3187195STejun Heo  *	LOCKING:
6134f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6135f3187195STejun Heo  */
6136f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6137f3187195STejun Heo {
6138f3187195STejun Heo 	struct ata_host *host;
6139f3187195STejun Heo 	size_t sz;
6140f3187195STejun Heo 	int i;
6141f3187195STejun Heo 
6142f3187195STejun Heo 	DPRINTK("ENTER\n");
6143f3187195STejun Heo 
6144f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6145f3187195STejun Heo 		return NULL;
6146f3187195STejun Heo 
6147f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6148f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6149f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6150f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6151f3187195STejun Heo 	if (!host)
6152f3187195STejun Heo 		goto err_out;
6153f3187195STejun Heo 
6154f3187195STejun Heo 	devres_add(dev, host);
6155f3187195STejun Heo 	dev_set_drvdata(dev, host);
6156f3187195STejun Heo 
6157f3187195STejun Heo 	spin_lock_init(&host->lock);
6158f3187195STejun Heo 	host->dev = dev;
6159f3187195STejun Heo 	host->n_ports = max_ports;
6160f3187195STejun Heo 
6161f3187195STejun Heo 	/* allocate ports bound to this host */
6162f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6163f3187195STejun Heo 		struct ata_port *ap;
6164f3187195STejun Heo 
6165f3187195STejun Heo 		ap = ata_port_alloc(host);
6166f3187195STejun Heo 		if (!ap)
6167f3187195STejun Heo 			goto err_out;
6168f3187195STejun Heo 
6169f3187195STejun Heo 		ap->port_no = i;
6170f3187195STejun Heo 		host->ports[i] = ap;
6171f3187195STejun Heo 	}
6172f3187195STejun Heo 
6173f3187195STejun Heo 	devres_remove_group(dev, NULL);
6174f3187195STejun Heo 	return host;
6175f3187195STejun Heo 
6176f3187195STejun Heo  err_out:
6177f3187195STejun Heo 	devres_release_group(dev, NULL);
6178f3187195STejun Heo 	return NULL;
6179f3187195STejun Heo }
6180f3187195STejun Heo 
6181f3187195STejun Heo /**
6182f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6183f5cda257STejun Heo  *	@dev: generic device this host is associated with
6184f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6185f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6186f5cda257STejun Heo  *
6187f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6188f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6189f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6190f5cda257STejun Heo  *
6191f5cda257STejun Heo  *	RETURNS:
6192f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6193f5cda257STejun Heo  *
6194f5cda257STejun Heo  *	LOCKING:
6195f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6196f5cda257STejun Heo  */
6197f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6198f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6199f5cda257STejun Heo 				      int n_ports)
6200f5cda257STejun Heo {
6201f5cda257STejun Heo 	const struct ata_port_info *pi;
6202f5cda257STejun Heo 	struct ata_host *host;
6203f5cda257STejun Heo 	int i, j;
6204f5cda257STejun Heo 
6205f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6206f5cda257STejun Heo 	if (!host)
6207f5cda257STejun Heo 		return NULL;
6208f5cda257STejun Heo 
6209f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6210f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6211f5cda257STejun Heo 
6212f5cda257STejun Heo 		if (ppi[j])
6213f5cda257STejun Heo 			pi = ppi[j++];
6214f5cda257STejun Heo 
6215f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6216f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6217f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6218f5cda257STejun Heo 		ap->flags |= pi->flags;
6219f5cda257STejun Heo 		ap->ops = pi->port_ops;
6220f5cda257STejun Heo 
6221f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6222f5cda257STejun Heo 			host->ops = pi->port_ops;
6223f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6224f5cda257STejun Heo 			host->private_data = pi->private_data;
6225f5cda257STejun Heo 	}
6226f5cda257STejun Heo 
6227f5cda257STejun Heo 	return host;
6228f5cda257STejun Heo }
6229f5cda257STejun Heo 
6230f5cda257STejun Heo /**
6231ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6232ecef7253STejun Heo  *	@host: ATA host to start ports for
6233ecef7253STejun Heo  *
6234ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6235ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6236ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6237f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6238f3187195STejun Heo  *	first non-dummy port ops.
6239ecef7253STejun Heo  *
6240ecef7253STejun Heo  *	LOCKING:
6241ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6242ecef7253STejun Heo  *
6243ecef7253STejun Heo  *	RETURNS:
6244ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6245ecef7253STejun Heo  */
6246ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6247ecef7253STejun Heo {
6248ecef7253STejun Heo 	int i, rc;
6249ecef7253STejun Heo 
6250ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6251ecef7253STejun Heo 		return 0;
6252ecef7253STejun Heo 
6253ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6254ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6255ecef7253STejun Heo 
6256f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6257f3187195STejun Heo 			host->ops = ap->ops;
6258f3187195STejun Heo 
6259ecef7253STejun Heo 		if (ap->ops->port_start) {
6260ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6261ecef7253STejun Heo 			if (rc) {
6262ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6263ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6264ecef7253STejun Heo 				goto err_out;
6265ecef7253STejun Heo 			}
6266ecef7253STejun Heo 		}
6267ecef7253STejun Heo 
6268ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6269ecef7253STejun Heo 	}
6270ecef7253STejun Heo 
6271ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6272ecef7253STejun Heo 	return 0;
6273ecef7253STejun Heo 
6274ecef7253STejun Heo  err_out:
6275ecef7253STejun Heo 	while (--i >= 0) {
6276ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6277ecef7253STejun Heo 
6278ecef7253STejun Heo 		if (ap->ops->port_stop)
6279ecef7253STejun Heo 			ap->ops->port_stop(ap);
6280ecef7253STejun Heo 	}
6281ecef7253STejun Heo 	return rc;
6282ecef7253STejun Heo }
6283ecef7253STejun Heo 
6284ecef7253STejun Heo /**
6285cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6286cca3974eSJeff Garzik  *	@host:	host to initialize
6287cca3974eSJeff Garzik  *	@dev:	device host is attached to
6288cca3974eSJeff Garzik  *	@flags:	host flags
6289c6fd2807SJeff Garzik  *	@ops:	port_ops
6290c6fd2807SJeff Garzik  *
6291c6fd2807SJeff Garzik  *	LOCKING:
6292c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6293c6fd2807SJeff Garzik  *
6294c6fd2807SJeff Garzik  */
6295f3187195STejun Heo /* KILLME - the only user left is ipr */
6296cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6297cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6298c6fd2807SJeff Garzik {
6299cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6300cca3974eSJeff Garzik 	host->dev = dev;
6301cca3974eSJeff Garzik 	host->flags = flags;
6302cca3974eSJeff Garzik 	host->ops = ops;
6303c6fd2807SJeff Garzik }
6304c6fd2807SJeff Garzik 
6305c6fd2807SJeff Garzik /**
6306f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6307f3187195STejun Heo  *	@host: ATA host to register
6308f3187195STejun Heo  *	@sht: template for SCSI host
6309c6fd2807SJeff Garzik  *
6310f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6311f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6312f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6313f3187195STejun Heo  *	probe registered devices.
6314c6fd2807SJeff Garzik  *
6315c6fd2807SJeff Garzik  *	LOCKING:
6316f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6317c6fd2807SJeff Garzik  *
6318c6fd2807SJeff Garzik  *	RETURNS:
6319f3187195STejun Heo  *	0 on success, -errno otherwise.
6320c6fd2807SJeff Garzik  */
6321f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6322c6fd2807SJeff Garzik {
6323f3187195STejun Heo 	int i, rc;
6324c6fd2807SJeff Garzik 
6325f3187195STejun Heo 	/* host must have been started */
6326f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6327f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6328f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6329f3187195STejun Heo 		WARN_ON(1);
6330f3187195STejun Heo 		return -EINVAL;
633102f076aaSAlan Cox 	}
6332f0d36efdSTejun Heo 
6333f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6334f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6335f3187195STejun Heo 	 * allocation time.
6336f3187195STejun Heo 	 */
6337f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6338f3187195STejun Heo 		kfree(host->ports[i]);
6339f0d36efdSTejun Heo 
6340f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6341f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6342f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6343c6fd2807SJeff Garzik 
6344f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6345ecef7253STejun Heo 	if (rc)
6346f3187195STejun Heo 		return rc;
6347ecef7253STejun Heo 
6348fafbae87STejun Heo 	/* associate with ACPI nodes */
6349fafbae87STejun Heo 	ata_acpi_associate(host);
6350fafbae87STejun Heo 
6351f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6352cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6353cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6354f3187195STejun Heo 		int irq_line;
6355c6fd2807SJeff Garzik 		u32 scontrol;
6356f3187195STejun Heo 		unsigned long xfer_mask;
6357f3187195STejun Heo 
6358f3187195STejun Heo 		/* set SATA cable type if still unset */
6359f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6360f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6361c6fd2807SJeff Garzik 
6362c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
6363c6fd2807SJeff Garzik 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6364c6fd2807SJeff Garzik 			int spd = (scontrol >> 4) & 0xf;
6365afe3cc51STejun Heo 			if (spd)
6366c6fd2807SJeff Garzik 				ap->hw_sata_spd_limit &= (1 << spd) - 1;
6367c6fd2807SJeff Garzik 		}
6368c6fd2807SJeff Garzik 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6369c6fd2807SJeff Garzik 
6370f3187195STejun Heo 		/* report the secondary IRQ for second channel legacy */
6371f3187195STejun Heo 		irq_line = host->irq;
6372f3187195STejun Heo 		if (i == 1 && host->irq2)
6373f3187195STejun Heo 			irq_line = host->irq2;
6374f3187195STejun Heo 
6375f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6376f3187195STejun Heo 					      ap->udma_mask);
6377f3187195STejun Heo 
6378f3187195STejun Heo 		/* print per-port info to dmesg */
6379f3187195STejun Heo 		if (!ata_port_is_dummy(ap))
6380f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6381f3187195STejun Heo 					"ctl 0x%p bmdma 0x%p irq %d\n",
6382a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6383f3187195STejun Heo 					ata_mode_string(xfer_mask),
6384f3187195STejun Heo 					ap->ioaddr.cmd_addr,
6385f3187195STejun Heo 					ap->ioaddr.ctl_addr,
6386f3187195STejun Heo 					ap->ioaddr.bmdma_addr,
6387f3187195STejun Heo 					irq_line);
6388f3187195STejun Heo 		else
6389f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6390c6fd2807SJeff Garzik 	}
6391c6fd2807SJeff Garzik 
6392f3187195STejun Heo 	/* perform each probe synchronously */
6393f3187195STejun Heo 	DPRINTK("probe begin\n");
6394f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6395f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6396f3187195STejun Heo 		int rc;
6397f3187195STejun Heo 
6398f3187195STejun Heo 		/* probe */
6399c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
6400c6fd2807SJeff Garzik 			struct ata_eh_info *ehi = &ap->eh_info;
6401c6fd2807SJeff Garzik 			unsigned long flags;
6402c6fd2807SJeff Garzik 
6403c6fd2807SJeff Garzik 			ata_port_probe(ap);
6404c6fd2807SJeff Garzik 
6405c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6406c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6407c6fd2807SJeff Garzik 
6408c6fd2807SJeff Garzik 			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6409c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6410c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6411c6fd2807SJeff Garzik 
6412f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6413c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6414c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6415c6fd2807SJeff Garzik 
6416c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6417c6fd2807SJeff Garzik 
6418c6fd2807SJeff Garzik 			/* wait for EH to finish */
6419c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6420c6fd2807SJeff Garzik 		} else {
642144877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6422c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
642344877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6424c6fd2807SJeff Garzik 
6425c6fd2807SJeff Garzik 			if (rc) {
6426c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6427c6fd2807SJeff Garzik 				 * Current libata behavior will
6428c6fd2807SJeff Garzik 				 * tear down everything when
6429c6fd2807SJeff Garzik 				 * the module is removed
6430c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6431c6fd2807SJeff Garzik 				 */
6432c6fd2807SJeff Garzik 			}
6433c6fd2807SJeff Garzik 		}
6434c6fd2807SJeff Garzik 	}
6435c6fd2807SJeff Garzik 
6436c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6437c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6438cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6439cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6440c6fd2807SJeff Garzik 
6441c6fd2807SJeff Garzik 		ata_scsi_scan_host(ap);
6442c6fd2807SJeff Garzik 	}
6443c6fd2807SJeff Garzik 
6444f3187195STejun Heo 	return 0;
6445f3187195STejun Heo }
6446f3187195STejun Heo 
6447f3187195STejun Heo /**
6448f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6449f5cda257STejun Heo  *	@host: target ATA host
6450f5cda257STejun Heo  *	@irq: IRQ to request
6451f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6452f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6453f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6454f5cda257STejun Heo  *
6455f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6456f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6457f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6458f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6459f5cda257STejun Heo  *
6460f5cda257STejun Heo  *	LOCKING:
6461f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6462f5cda257STejun Heo  *
6463f5cda257STejun Heo  *	RETURNS:
6464f5cda257STejun Heo  *	0 on success, -errno otherwise.
6465f5cda257STejun Heo  */
6466f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6467f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6468f5cda257STejun Heo 		      struct scsi_host_template *sht)
6469f5cda257STejun Heo {
6470f5cda257STejun Heo 	int rc;
6471f5cda257STejun Heo 
6472f5cda257STejun Heo 	rc = ata_host_start(host);
6473f5cda257STejun Heo 	if (rc)
6474f5cda257STejun Heo 		return rc;
6475f5cda257STejun Heo 
6476f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6477f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6478f5cda257STejun Heo 	if (rc)
6479f5cda257STejun Heo 		return rc;
6480f5cda257STejun Heo 
64814031826bSTejun Heo 	/* Used to print device info at probe */
64824031826bSTejun Heo 	host->irq = irq;
64834031826bSTejun Heo 
6484f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6485f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6486f5cda257STejun Heo 	if (rc)
6487f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6488f5cda257STejun Heo 
6489f5cda257STejun Heo 	return rc;
6490f5cda257STejun Heo }
6491f5cda257STejun Heo 
6492f5cda257STejun Heo /**
6493c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6494c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6495c6fd2807SJeff Garzik  *
6496c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6497c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6498c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6499c6fd2807SJeff Garzik  *
6500c6fd2807SJeff Garzik  *	LOCKING:
6501c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6502c6fd2807SJeff Garzik  */
6503c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap)
6504c6fd2807SJeff Garzik {
6505c6fd2807SJeff Garzik 	unsigned long flags;
6506c6fd2807SJeff Garzik 	int i;
6507c6fd2807SJeff Garzik 
6508c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6509c6fd2807SJeff Garzik 		goto skip_eh;
6510c6fd2807SJeff Garzik 
6511c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6512c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6513c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6514c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6515c6fd2807SJeff Garzik 
6516c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6517c6fd2807SJeff Garzik 
6518c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6519c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6520c6fd2807SJeff Garzik 	 */
6521c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6522c6fd2807SJeff Garzik 
6523c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
6524c6fd2807SJeff Garzik 		ata_dev_disable(&ap->device[i]);
6525c6fd2807SJeff Garzik 
6526c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6527c6fd2807SJeff Garzik 
6528c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6529c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6530c6fd2807SJeff Garzik 	 * target.
6531c6fd2807SJeff Garzik 	 */
6532c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6533c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6534c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6535c6fd2807SJeff Garzik 
6536c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
653745a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
6538c6fd2807SJeff Garzik 
6539c6fd2807SJeff Garzik  skip_eh:
6540c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6541cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6542c6fd2807SJeff Garzik }
6543c6fd2807SJeff Garzik 
6544c6fd2807SJeff Garzik /**
65450529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
65460529c159STejun Heo  *	@host: Host to detach
65470529c159STejun Heo  *
65480529c159STejun Heo  *	Detach all ports of @host.
65490529c159STejun Heo  *
65500529c159STejun Heo  *	LOCKING:
65510529c159STejun Heo  *	Kernel thread context (may sleep).
65520529c159STejun Heo  */
65530529c159STejun Heo void ata_host_detach(struct ata_host *host)
65540529c159STejun Heo {
65550529c159STejun Heo 	int i;
65560529c159STejun Heo 
65570529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
65580529c159STejun Heo 		ata_port_detach(host->ports[i]);
65590529c159STejun Heo }
65600529c159STejun Heo 
6561c6fd2807SJeff Garzik /**
6562c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6563c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6564c6fd2807SJeff Garzik  *
6565c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6566c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6567c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6568c6fd2807SJeff Garzik  *	relative to cmd_addr.
6569c6fd2807SJeff Garzik  *
6570c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6571c6fd2807SJeff Garzik  */
6572c6fd2807SJeff Garzik 
6573c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6574c6fd2807SJeff Garzik {
6575c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6576c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6577c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6578c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6579c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6580c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6581c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6582c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6583c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6584c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6585c6fd2807SJeff Garzik }
6586c6fd2807SJeff Garzik 
6587c6fd2807SJeff Garzik 
6588c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6589c6fd2807SJeff Garzik 
6590c6fd2807SJeff Garzik /**
6591c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
6592c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
6593c6fd2807SJeff Garzik  *
6594b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
6595b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
6596b878ca5dSTejun Heo  *	release is handled via devres.
6597c6fd2807SJeff Garzik  *
6598c6fd2807SJeff Garzik  *	LOCKING:
6599c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
6600c6fd2807SJeff Garzik  */
6601c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
6602c6fd2807SJeff Garzik {
6603c6fd2807SJeff Garzik 	struct device *dev = pci_dev_to_dev(pdev);
6604cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
6605c6fd2807SJeff Garzik 
6606f0d36efdSTejun Heo 	ata_host_detach(host);
6607c6fd2807SJeff Garzik }
6608c6fd2807SJeff Garzik 
6609c6fd2807SJeff Garzik /* move to PCI subsystem */
6610c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6611c6fd2807SJeff Garzik {
6612c6fd2807SJeff Garzik 	unsigned long tmp = 0;
6613c6fd2807SJeff Garzik 
6614c6fd2807SJeff Garzik 	switch (bits->width) {
6615c6fd2807SJeff Garzik 	case 1: {
6616c6fd2807SJeff Garzik 		u8 tmp8 = 0;
6617c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6618c6fd2807SJeff Garzik 		tmp = tmp8;
6619c6fd2807SJeff Garzik 		break;
6620c6fd2807SJeff Garzik 	}
6621c6fd2807SJeff Garzik 	case 2: {
6622c6fd2807SJeff Garzik 		u16 tmp16 = 0;
6623c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
6624c6fd2807SJeff Garzik 		tmp = tmp16;
6625c6fd2807SJeff Garzik 		break;
6626c6fd2807SJeff Garzik 	}
6627c6fd2807SJeff Garzik 	case 4: {
6628c6fd2807SJeff Garzik 		u32 tmp32 = 0;
6629c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6630c6fd2807SJeff Garzik 		tmp = tmp32;
6631c6fd2807SJeff Garzik 		break;
6632c6fd2807SJeff Garzik 	}
6633c6fd2807SJeff Garzik 
6634c6fd2807SJeff Garzik 	default:
6635c6fd2807SJeff Garzik 		return -EINVAL;
6636c6fd2807SJeff Garzik 	}
6637c6fd2807SJeff Garzik 
6638c6fd2807SJeff Garzik 	tmp &= bits->mask;
6639c6fd2807SJeff Garzik 
6640c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
6641c6fd2807SJeff Garzik }
6642c6fd2807SJeff Garzik 
66436ffa01d8STejun Heo #ifdef CONFIG_PM
6644c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6645c6fd2807SJeff Garzik {
6646c6fd2807SJeff Garzik 	pci_save_state(pdev);
6647c6fd2807SJeff Garzik 	pci_disable_device(pdev);
66484c90d971STejun Heo 
66494c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
6650c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
6651c6fd2807SJeff Garzik }
6652c6fd2807SJeff Garzik 
6653553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
6654c6fd2807SJeff Garzik {
6655553c4aa6STejun Heo 	int rc;
6656553c4aa6STejun Heo 
6657c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
6658c6fd2807SJeff Garzik 	pci_restore_state(pdev);
6659553c4aa6STejun Heo 
6660f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
6661553c4aa6STejun Heo 	if (rc) {
6662553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
6663553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
6664553c4aa6STejun Heo 		return rc;
6665553c4aa6STejun Heo 	}
6666553c4aa6STejun Heo 
6667c6fd2807SJeff Garzik 	pci_set_master(pdev);
6668553c4aa6STejun Heo 	return 0;
6669c6fd2807SJeff Garzik }
6670c6fd2807SJeff Garzik 
6671c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6672c6fd2807SJeff Garzik {
6673cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6674c6fd2807SJeff Garzik 	int rc = 0;
6675c6fd2807SJeff Garzik 
6676cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
6677c6fd2807SJeff Garzik 	if (rc)
6678c6fd2807SJeff Garzik 		return rc;
6679c6fd2807SJeff Garzik 
6680c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
6681c6fd2807SJeff Garzik 
6682c6fd2807SJeff Garzik 	return 0;
6683c6fd2807SJeff Garzik }
6684c6fd2807SJeff Garzik 
6685c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
6686c6fd2807SJeff Garzik {
6687cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6688553c4aa6STejun Heo 	int rc;
6689c6fd2807SJeff Garzik 
6690553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
6691553c4aa6STejun Heo 	if (rc == 0)
6692cca3974eSJeff Garzik 		ata_host_resume(host);
6693553c4aa6STejun Heo 	return rc;
6694c6fd2807SJeff Garzik }
66956ffa01d8STejun Heo #endif /* CONFIG_PM */
66966ffa01d8STejun Heo 
6697c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6698c6fd2807SJeff Garzik 
6699c6fd2807SJeff Garzik 
6700c6fd2807SJeff Garzik static int __init ata_init(void)
6701c6fd2807SJeff Garzik {
6702c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
6703c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
6704c6fd2807SJeff Garzik 	if (!ata_wq)
6705c6fd2807SJeff Garzik 		return -ENOMEM;
6706c6fd2807SJeff Garzik 
6707c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6708c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
6709c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
6710c6fd2807SJeff Garzik 		return -ENOMEM;
6711c6fd2807SJeff Garzik 	}
6712c6fd2807SJeff Garzik 
6713c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6714c6fd2807SJeff Garzik 	return 0;
6715c6fd2807SJeff Garzik }
6716c6fd2807SJeff Garzik 
6717c6fd2807SJeff Garzik static void __exit ata_exit(void)
6718c6fd2807SJeff Garzik {
6719c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
6720c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
6721c6fd2807SJeff Garzik }
6722c6fd2807SJeff Garzik 
6723a4625085SBrian King subsys_initcall(ata_init);
6724c6fd2807SJeff Garzik module_exit(ata_exit);
6725c6fd2807SJeff Garzik 
6726c6fd2807SJeff Garzik static unsigned long ratelimit_time;
6727c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
6728c6fd2807SJeff Garzik 
6729c6fd2807SJeff Garzik int ata_ratelimit(void)
6730c6fd2807SJeff Garzik {
6731c6fd2807SJeff Garzik 	int rc;
6732c6fd2807SJeff Garzik 	unsigned long flags;
6733c6fd2807SJeff Garzik 
6734c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6735c6fd2807SJeff Garzik 
6736c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
6737c6fd2807SJeff Garzik 		rc = 1;
6738c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
6739c6fd2807SJeff Garzik 	} else
6740c6fd2807SJeff Garzik 		rc = 0;
6741c6fd2807SJeff Garzik 
6742c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6743c6fd2807SJeff Garzik 
6744c6fd2807SJeff Garzik 	return rc;
6745c6fd2807SJeff Garzik }
6746c6fd2807SJeff Garzik 
6747c6fd2807SJeff Garzik /**
6748c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
6749c6fd2807SJeff Garzik  *	@reg: IO-mapped register
6750c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
6751c6fd2807SJeff Garzik  *	@val: Wait condition
6752c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
6753c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
6754c6fd2807SJeff Garzik  *
6755c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
6756c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
6757c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
6758c6fd2807SJeff Garzik  *
6759c6fd2807SJeff Garzik  *	(*@reg & mask) != val
6760c6fd2807SJeff Garzik  *
6761c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
6762c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
6763c6fd2807SJeff Garzik  *
6764c6fd2807SJeff Garzik  *	LOCKING:
6765c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
6766c6fd2807SJeff Garzik  *
6767c6fd2807SJeff Garzik  *	RETURNS:
6768c6fd2807SJeff Garzik  *	The final register value.
6769c6fd2807SJeff Garzik  */
6770c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6771c6fd2807SJeff Garzik 		      unsigned long interval_msec,
6772c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
6773c6fd2807SJeff Garzik {
6774c6fd2807SJeff Garzik 	unsigned long timeout;
6775c6fd2807SJeff Garzik 	u32 tmp;
6776c6fd2807SJeff Garzik 
6777c6fd2807SJeff Garzik 	tmp = ioread32(reg);
6778c6fd2807SJeff Garzik 
6779c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
6780c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
6781c6fd2807SJeff Garzik 	 * eat away the timeout.
6782c6fd2807SJeff Garzik 	 */
6783c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6784c6fd2807SJeff Garzik 
6785c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6786c6fd2807SJeff Garzik 		msleep(interval_msec);
6787c6fd2807SJeff Garzik 		tmp = ioread32(reg);
6788c6fd2807SJeff Garzik 	}
6789c6fd2807SJeff Garzik 
6790c6fd2807SJeff Garzik 	return tmp;
6791c6fd2807SJeff Garzik }
6792c6fd2807SJeff Garzik 
6793c6fd2807SJeff Garzik /*
6794c6fd2807SJeff Garzik  * Dummy port_ops
6795c6fd2807SJeff Garzik  */
6796c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
6797c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6798c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6799c6fd2807SJeff Garzik 
6800c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
6801c6fd2807SJeff Garzik {
6802c6fd2807SJeff Garzik 	return ATA_DRDY;
6803c6fd2807SJeff Garzik }
6804c6fd2807SJeff Garzik 
6805c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6806c6fd2807SJeff Garzik {
6807c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
6808c6fd2807SJeff Garzik }
6809c6fd2807SJeff Garzik 
6810c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
6811c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
6812c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
6813c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
6814c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
6815c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
6816c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
6817c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
6818c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
6819c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
6820c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
6821c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
6822c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
6823c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
6824c6fd2807SJeff Garzik };
6825c6fd2807SJeff Garzik 
682621b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
682721b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
682821b0ad4fSTejun Heo };
682921b0ad4fSTejun Heo 
6830c6fd2807SJeff Garzik /*
6831c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
6832c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
6833c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
6834c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
6835c6fd2807SJeff Garzik  */
6836c6fd2807SJeff Garzik 
6837c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6838c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6839c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6840c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
684121b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6842c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
6843c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
6844cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
6845f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
6846f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6847ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
6848f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
6849f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
68500529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
6851c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
6852c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
6853c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
6854c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
6855c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6856c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6857c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
6858c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
6859c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6860c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
686143727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
6862c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6863c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6864c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
6865c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
6866c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
6867c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
6868d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
6869c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
687004351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
68710d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
68720d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6873c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
6874d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6875c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6876c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6877c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
6878c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6879c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
6880c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6881c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6882c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6883c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6884c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6885c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6886c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
688710305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
6888c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
6889c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_debounce);
6890c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_resume);
6891c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
6892c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
6893c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
6894c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
6895c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
6896b6103f6dSTejun Heo EXPORT_SYMBOL_GPL(sata_port_hardreset);
6897c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
6898c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
6899c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
6900c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
6901c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
6902c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
6903c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
6904c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
6905d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
6906c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
6907c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6908c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6909c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6910c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6911c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6912c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
6913c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
6914c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
6915c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
6916c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6917c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_online);
6918c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_offline);
69196ffa01d8STejun Heo #ifdef CONFIG_PM
6920cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
6921cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
69226ffa01d8STejun Heo #endif /* CONFIG_PM */
6923c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
6924c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
692510305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6926c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6927c6fd2807SJeff Garzik 
6928c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6929c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
6930c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
6931c6fd2807SJeff Garzik 
6932c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6933c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
6934d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
69351626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
6936d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
6937c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
6938c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
69396ffa01d8STejun Heo #ifdef CONFIG_PM
6940c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6941c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6942c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6943c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
69446ffa01d8STejun Heo #endif /* CONFIG_PM */
6945c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6946c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6947c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6948c6fd2807SJeff Garzik 
6949c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
6950c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6951c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
6952c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
6953c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6954c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6955c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6956c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6957c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
695883625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
695983625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
696083625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_ack);
696183625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6962a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6963be0d18dfSAlan Cox 
6964be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
6965be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
6966be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
6967be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
6968