xref: /openbmc/linux/drivers/ata/libata-core.c (revision afe3cc51)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
52c6fd2807SJeff Garzik #include <scsi/scsi.h>
53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
55c6fd2807SJeff Garzik #include <linux/libata.h>
56c6fd2807SJeff Garzik #include <asm/io.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
628bc3fc47SJeff Garzik #define DRV_VERSION	"2.21"	/* must be exactly four chars */
63fda0efc5SJeff Garzik 
64fda0efc5SJeff Garzik 
65c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69c6fd2807SJeff Garzik 
70c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
71c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
72c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
74c6fd2807SJeff Garzik 
75f3187195STejun Heo unsigned int ata_print_id = 1;
76c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
77c6fd2807SJeff Garzik 
78c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
79c6fd2807SJeff Garzik 
80c6fd2807SJeff Garzik int atapi_enabled = 1;
81c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
82c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83c6fd2807SJeff Garzik 
84c6fd2807SJeff Garzik int atapi_dmadir = 0;
85c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
86c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik int libata_fua = 0;
89c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
90c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91c6fd2807SJeff Garzik 
921e999736SAlan Cox static int ata_ignore_hpa = 0;
931e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
941e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
951e999736SAlan Cox 
96c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
98c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99c6fd2807SJeff Garzik 
100d7d0dad6SJeff Garzik int libata_noacpi = 1;
101d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
10211ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
10311ef697bSKristen Carlson Accardi 
104c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
105c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
106c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
107c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
108c6fd2807SJeff Garzik 
109c6fd2807SJeff Garzik 
110c6fd2807SJeff Garzik /**
111c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
113c6fd2807SJeff Garzik  *	@fis: Buffer into which data will output
114c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
115c6fd2807SJeff Garzik  *
116c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
117c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
118c6fd2807SJeff Garzik  *
119c6fd2807SJeff Garzik  *	LOCKING:
120c6fd2807SJeff Garzik  *	Inherited from caller.
121c6fd2807SJeff Garzik  */
122c6fd2807SJeff Garzik 
123c6fd2807SJeff Garzik void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
124c6fd2807SJeff Garzik {
125c6fd2807SJeff Garzik 	fis[0] = 0x27;	/* Register - Host to Device FIS */
126c6fd2807SJeff Garzik 	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127c6fd2807SJeff Garzik 					    bit 7 indicates Command FIS */
128c6fd2807SJeff Garzik 	fis[2] = tf->command;
129c6fd2807SJeff Garzik 	fis[3] = tf->feature;
130c6fd2807SJeff Garzik 
131c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
132c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
133c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
134c6fd2807SJeff Garzik 	fis[7] = tf->device;
135c6fd2807SJeff Garzik 
136c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
137c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
138c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
139c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
140c6fd2807SJeff Garzik 
141c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
142c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
143c6fd2807SJeff Garzik 	fis[14] = 0;
144c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
145c6fd2807SJeff Garzik 
146c6fd2807SJeff Garzik 	fis[16] = 0;
147c6fd2807SJeff Garzik 	fis[17] = 0;
148c6fd2807SJeff Garzik 	fis[18] = 0;
149c6fd2807SJeff Garzik 	fis[19] = 0;
150c6fd2807SJeff Garzik }
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik /**
153c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
155c6fd2807SJeff Garzik  *	@tf: Taskfile to output
156c6fd2807SJeff Garzik  *
157c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
158c6fd2807SJeff Garzik  *
159c6fd2807SJeff Garzik  *	LOCKING:
160c6fd2807SJeff Garzik  *	Inherited from caller.
161c6fd2807SJeff Garzik  */
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
164c6fd2807SJeff Garzik {
165c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
166c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
167c6fd2807SJeff Garzik 
168c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
169c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
170c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
171c6fd2807SJeff Garzik 	tf->device	= fis[7];
172c6fd2807SJeff Garzik 
173c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
174c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
175c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
176c6fd2807SJeff Garzik 
177c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
178c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
179c6fd2807SJeff Garzik }
180c6fd2807SJeff Garzik 
181c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
182c6fd2807SJeff Garzik 	/* pio multi */
183c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
184c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
185c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
186c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
187c6fd2807SJeff Garzik 	0,
188c6fd2807SJeff Garzik 	0,
189c6fd2807SJeff Garzik 	0,
190c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
191c6fd2807SJeff Garzik 	/* pio */
192c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
193c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
194c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
195c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
196c6fd2807SJeff Garzik 	0,
197c6fd2807SJeff Garzik 	0,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	/* dma */
201c6fd2807SJeff Garzik 	ATA_CMD_READ,
202c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
203c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
204c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
205c6fd2807SJeff Garzik 	0,
206c6fd2807SJeff Garzik 	0,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
209c6fd2807SJeff Garzik };
210c6fd2807SJeff Garzik 
211c6fd2807SJeff Garzik /**
212c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
213bd056d7eSTejun Heo  *	@tf: command to examine and configure
214bd056d7eSTejun Heo  *	@dev: device tf belongs to
215c6fd2807SJeff Garzik  *
216c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
217c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
218c6fd2807SJeff Garzik  *
219c6fd2807SJeff Garzik  *	LOCKING:
220c6fd2807SJeff Garzik  *	caller.
221c6fd2807SJeff Garzik  */
222bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
223c6fd2807SJeff Garzik {
224c6fd2807SJeff Garzik 	u8 cmd;
225c6fd2807SJeff Garzik 
226c6fd2807SJeff Garzik 	int index, fua, lba48, write;
227c6fd2807SJeff Garzik 
228c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
229c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
231c6fd2807SJeff Garzik 
232c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
233c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
234c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
235bd056d7eSTejun Heo 	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
236c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
237c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
238c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
239c6fd2807SJeff Garzik 	} else {
240c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
241c6fd2807SJeff Garzik 		index = 16;
242c6fd2807SJeff Garzik 	}
243c6fd2807SJeff Garzik 
244c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
245c6fd2807SJeff Garzik 	if (cmd) {
246c6fd2807SJeff Garzik 		tf->command = cmd;
247c6fd2807SJeff Garzik 		return 0;
248c6fd2807SJeff Garzik 	}
249c6fd2807SJeff Garzik 	return -1;
250c6fd2807SJeff Garzik }
251c6fd2807SJeff Garzik 
252c6fd2807SJeff Garzik /**
25335b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
25435b649feSTejun Heo  *	@tf: ATA taskfile of interest
25535b649feSTejun Heo  *	@dev: ATA device @tf belongs to
25635b649feSTejun Heo  *
25735b649feSTejun Heo  *	LOCKING:
25835b649feSTejun Heo  *	None.
25935b649feSTejun Heo  *
26035b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
26135b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
26235b649feSTejun Heo  *	flags select the address format to use.
26335b649feSTejun Heo  *
26435b649feSTejun Heo  *	RETURNS:
26535b649feSTejun Heo  *	Block address read from @tf.
26635b649feSTejun Heo  */
26735b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
26835b649feSTejun Heo {
26935b649feSTejun Heo 	u64 block = 0;
27035b649feSTejun Heo 
27135b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
27235b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
27335b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
27435b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
27535b649feSTejun Heo 			block |= tf->hob_lbal << 24;
27635b649feSTejun Heo 		} else
27735b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
27835b649feSTejun Heo 
27935b649feSTejun Heo 		block |= tf->lbah << 16;
28035b649feSTejun Heo 		block |= tf->lbam << 8;
28135b649feSTejun Heo 		block |= tf->lbal;
28235b649feSTejun Heo 	} else {
28335b649feSTejun Heo 		u32 cyl, head, sect;
28435b649feSTejun Heo 
28535b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
28635b649feSTejun Heo 		head = tf->device & 0xf;
28735b649feSTejun Heo 		sect = tf->lbal;
28835b649feSTejun Heo 
28935b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
29035b649feSTejun Heo 	}
29135b649feSTejun Heo 
29235b649feSTejun Heo 	return block;
29335b649feSTejun Heo }
29435b649feSTejun Heo 
29535b649feSTejun Heo /**
296bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
297bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
298bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
299bd056d7eSTejun Heo  *	@block: Block address
300bd056d7eSTejun Heo  *	@n_block: Number of blocks
301bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
302bd056d7eSTejun Heo  *	@tag: tag
303bd056d7eSTejun Heo  *
304bd056d7eSTejun Heo  *	LOCKING:
305bd056d7eSTejun Heo  *	None.
306bd056d7eSTejun Heo  *
307bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
308bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
309bd056d7eSTejun Heo  *
310bd056d7eSTejun Heo  *	RETURNS:
311bd056d7eSTejun Heo  *
312bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
313bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
314bd056d7eSTejun Heo  */
315bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
317bd056d7eSTejun Heo 		    unsigned int tag)
318bd056d7eSTejun Heo {
319bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320bd056d7eSTejun Heo 	tf->flags |= tf_flags;
321bd056d7eSTejun Heo 
3226d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
323bd056d7eSTejun Heo 		/* yay, NCQ */
324bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
325bd056d7eSTejun Heo 			return -ERANGE;
326bd056d7eSTejun Heo 
327bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
328bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329bd056d7eSTejun Heo 
330bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
331bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
332bd056d7eSTejun Heo 		else
333bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
334bd056d7eSTejun Heo 
335bd056d7eSTejun Heo 		tf->nsect = tag << 3;
336bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
337bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
338bd056d7eSTejun Heo 
339bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
340bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
341bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
342bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
343bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
344bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->device = 1 << 6;
347bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
348bd056d7eSTejun Heo 			tf->device |= 1 << 7;
349bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
350bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
351bd056d7eSTejun Heo 
352bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
353bd056d7eSTejun Heo 			/* use LBA28 */
354bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
355bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
356bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
357bd056d7eSTejun Heo 				return -ERANGE;
358bd056d7eSTejun Heo 
359bd056d7eSTejun Heo 			/* use LBA48 */
360bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
361bd056d7eSTejun Heo 
362bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
363bd056d7eSTejun Heo 
364bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
365bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
366bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
367bd056d7eSTejun Heo 		} else
368bd056d7eSTejun Heo 			/* request too large even for LBA48 */
369bd056d7eSTejun Heo 			return -ERANGE;
370bd056d7eSTejun Heo 
371bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372bd056d7eSTejun Heo 			return -EINVAL;
373bd056d7eSTejun Heo 
374bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
375bd056d7eSTejun Heo 
376bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
377bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
378bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
379bd056d7eSTejun Heo 
380bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
381bd056d7eSTejun Heo 	} else {
382bd056d7eSTejun Heo 		/* CHS */
383bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
386bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
387bd056d7eSTejun Heo 			return -ERANGE;
388bd056d7eSTejun Heo 
389bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390bd056d7eSTejun Heo 			return -EINVAL;
391bd056d7eSTejun Heo 
392bd056d7eSTejun Heo 		/* Convert LBA to CHS */
393bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
394bd056d7eSTejun Heo 		cyl   = track / dev->heads;
395bd056d7eSTejun Heo 		head  = track % dev->heads;
396bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
400bd056d7eSTejun Heo 
401bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
402bd056d7eSTejun Heo 		   Cylinder: 0-65535
403bd056d7eSTejun Heo 		   Head: 0-15
404bd056d7eSTejun Heo 		   Sector: 1-255*/
405bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406bd056d7eSTejun Heo 			return -ERANGE;
407bd056d7eSTejun Heo 
408bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409bd056d7eSTejun Heo 		tf->lbal = sect;
410bd056d7eSTejun Heo 		tf->lbam = cyl;
411bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
412bd056d7eSTejun Heo 		tf->device |= head;
413bd056d7eSTejun Heo 	}
414bd056d7eSTejun Heo 
415bd056d7eSTejun Heo 	return 0;
416bd056d7eSTejun Heo }
417bd056d7eSTejun Heo 
418bd056d7eSTejun Heo /**
419c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
421c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
422c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
423c6fd2807SJeff Garzik  *
424c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
426c6fd2807SJeff Garzik  *
427c6fd2807SJeff Garzik  *	LOCKING:
428c6fd2807SJeff Garzik  *	None.
429c6fd2807SJeff Garzik  *
430c6fd2807SJeff Garzik  *	RETURNS:
431c6fd2807SJeff Garzik  *	Packed xfer_mask.
432c6fd2807SJeff Garzik  */
433c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
435c6fd2807SJeff Garzik 				      unsigned int udma_mask)
436c6fd2807SJeff Garzik {
437c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440c6fd2807SJeff Garzik }
441c6fd2807SJeff Garzik 
442c6fd2807SJeff Garzik /**
443c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
445c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
446c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
447c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
448c6fd2807SJeff Garzik  *
449c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
451c6fd2807SJeff Garzik  */
452c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
453c6fd2807SJeff Garzik 				unsigned int *pio_mask,
454c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
455c6fd2807SJeff Garzik 				unsigned int *udma_mask)
456c6fd2807SJeff Garzik {
457c6fd2807SJeff Garzik 	if (pio_mask)
458c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459c6fd2807SJeff Garzik 	if (mwdma_mask)
460c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461c6fd2807SJeff Garzik 	if (udma_mask)
462c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463c6fd2807SJeff Garzik }
464c6fd2807SJeff Garzik 
465c6fd2807SJeff Garzik static const struct ata_xfer_ent {
466c6fd2807SJeff Garzik 	int shift, bits;
467c6fd2807SJeff Garzik 	u8 base;
468c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
469c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472c6fd2807SJeff Garzik 	{ -1, },
473c6fd2807SJeff Garzik };
474c6fd2807SJeff Garzik 
475c6fd2807SJeff Garzik /**
476c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
478c6fd2807SJeff Garzik  *
479c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
480c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
481c6fd2807SJeff Garzik  *
482c6fd2807SJeff Garzik  *	LOCKING:
483c6fd2807SJeff Garzik  *	None.
484c6fd2807SJeff Garzik  *
485c6fd2807SJeff Garzik  *	RETURNS:
486c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
487c6fd2807SJeff Garzik  */
488c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489c6fd2807SJeff Garzik {
490c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
491c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
492c6fd2807SJeff Garzik 
493c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
496c6fd2807SJeff Garzik 	return 0;
497c6fd2807SJeff Garzik }
498c6fd2807SJeff Garzik 
499c6fd2807SJeff Garzik /**
500c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
502c6fd2807SJeff Garzik  *
503c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
504c6fd2807SJeff Garzik  *
505c6fd2807SJeff Garzik  *	LOCKING:
506c6fd2807SJeff Garzik  *	None.
507c6fd2807SJeff Garzik  *
508c6fd2807SJeff Garzik  *	RETURNS:
509c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
510c6fd2807SJeff Garzik  */
511c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512c6fd2807SJeff Garzik {
513c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
514c6fd2807SJeff Garzik 
515c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
518c6fd2807SJeff Garzik 	return 0;
519c6fd2807SJeff Garzik }
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik /**
522c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
524c6fd2807SJeff Garzik  *
525c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	LOCKING:
528c6fd2807SJeff Garzik  *	None.
529c6fd2807SJeff Garzik  *
530c6fd2807SJeff Garzik  *	RETURNS:
531c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
532c6fd2807SJeff Garzik  */
533c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
534c6fd2807SJeff Garzik {
535c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
536c6fd2807SJeff Garzik 
537c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539c6fd2807SJeff Garzik 			return ent->shift;
540c6fd2807SJeff Garzik 	return -1;
541c6fd2807SJeff Garzik }
542c6fd2807SJeff Garzik 
543c6fd2807SJeff Garzik /**
544c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
545c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
546c6fd2807SJeff Garzik  *
547c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
548c6fd2807SJeff Garzik  *	(highest bit in @modemask).
549c6fd2807SJeff Garzik  *
550c6fd2807SJeff Garzik  *	LOCKING:
551c6fd2807SJeff Garzik  *	None.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	RETURNS:
554c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
555c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
556c6fd2807SJeff Garzik  */
557c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
558c6fd2807SJeff Garzik {
559c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
560c6fd2807SJeff Garzik 		"PIO0",
561c6fd2807SJeff Garzik 		"PIO1",
562c6fd2807SJeff Garzik 		"PIO2",
563c6fd2807SJeff Garzik 		"PIO3",
564c6fd2807SJeff Garzik 		"PIO4",
565b352e57dSAlan Cox 		"PIO5",
566b352e57dSAlan Cox 		"PIO6",
567c6fd2807SJeff Garzik 		"MWDMA0",
568c6fd2807SJeff Garzik 		"MWDMA1",
569c6fd2807SJeff Garzik 		"MWDMA2",
570b352e57dSAlan Cox 		"MWDMA3",
571b352e57dSAlan Cox 		"MWDMA4",
572c6fd2807SJeff Garzik 		"UDMA/16",
573c6fd2807SJeff Garzik 		"UDMA/25",
574c6fd2807SJeff Garzik 		"UDMA/33",
575c6fd2807SJeff Garzik 		"UDMA/44",
576c6fd2807SJeff Garzik 		"UDMA/66",
577c6fd2807SJeff Garzik 		"UDMA/100",
578c6fd2807SJeff Garzik 		"UDMA/133",
579c6fd2807SJeff Garzik 		"UDMA7",
580c6fd2807SJeff Garzik 	};
581c6fd2807SJeff Garzik 	int highbit;
582c6fd2807SJeff Garzik 
583c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
584c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
586c6fd2807SJeff Garzik 	return "<n/a>";
587c6fd2807SJeff Garzik }
588c6fd2807SJeff Garzik 
589c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
590c6fd2807SJeff Garzik {
591c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
592c6fd2807SJeff Garzik 		"1.5 Gbps",
593c6fd2807SJeff Garzik 		"3.0 Gbps",
594c6fd2807SJeff Garzik 	};
595c6fd2807SJeff Garzik 
596c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597c6fd2807SJeff Garzik 		return "<unknown>";
598c6fd2807SJeff Garzik 	return spd_str[spd - 1];
599c6fd2807SJeff Garzik }
600c6fd2807SJeff Garzik 
601c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
602c6fd2807SJeff Garzik {
603c6fd2807SJeff Garzik 	if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
604c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6054ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6064ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
607c6fd2807SJeff Garzik 		dev->class++;
608c6fd2807SJeff Garzik 	}
609c6fd2807SJeff Garzik }
610c6fd2807SJeff Garzik 
611c6fd2807SJeff Garzik /**
612c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
613c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
614c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
615c6fd2807SJeff Garzik  *
6160d5ff566STejun Heo  *	This technique was originally described in
6170d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6180d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6190d5ff566STejun Heo  *
6200d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6210d5ff566STejun Heo  *	and if a device is present, it will respond by
6220d5ff566STejun Heo  *	correctly storing and echoing back the
6230d5ff566STejun Heo  *	ATA shadow register contents.
624c6fd2807SJeff Garzik  *
625c6fd2807SJeff Garzik  *	LOCKING:
626c6fd2807SJeff Garzik  *	caller.
627c6fd2807SJeff Garzik  */
628c6fd2807SJeff Garzik 
6290d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
630c6fd2807SJeff Garzik {
6310d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6320d5ff566STejun Heo 	u8 nsect, lbal;
6330d5ff566STejun Heo 
6340d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6350d5ff566STejun Heo 
6360d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6370d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6380d5ff566STejun Heo 
6390d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6400d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6410d5ff566STejun Heo 
6420d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6430d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6440d5ff566STejun Heo 
6450d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6460d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6470d5ff566STejun Heo 
6480d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6490d5ff566STejun Heo 		return 1;	/* we found a device */
6500d5ff566STejun Heo 
6510d5ff566STejun Heo 	return 0;		/* nothing found */
652c6fd2807SJeff Garzik }
653c6fd2807SJeff Garzik 
654c6fd2807SJeff Garzik /**
655c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
656c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
657c6fd2807SJeff Garzik  *
658c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
659c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
660c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
661c6fd2807SJeff Garzik  *
662c6fd2807SJeff Garzik  *	LOCKING:
663c6fd2807SJeff Garzik  *	None.
664c6fd2807SJeff Garzik  *
665c6fd2807SJeff Garzik  *	RETURNS:
666c6fd2807SJeff Garzik  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667c6fd2807SJeff Garzik  *	the event of failure.
668c6fd2807SJeff Garzik  */
669c6fd2807SJeff Garzik 
670c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
671c6fd2807SJeff Garzik {
672c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
673c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
674c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
675c6fd2807SJeff Garzik 	 */
676c6fd2807SJeff Garzik 
677c6fd2807SJeff Garzik 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678c6fd2807SJeff Garzik 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
680c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
681c6fd2807SJeff Garzik 	}
682c6fd2807SJeff Garzik 
683c6fd2807SJeff Garzik 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684c6fd2807SJeff Garzik 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
686c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
687c6fd2807SJeff Garzik 	}
688c6fd2807SJeff Garzik 
689c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
690c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
691c6fd2807SJeff Garzik }
692c6fd2807SJeff Garzik 
693c6fd2807SJeff Garzik /**
694c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
695c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
696c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
697c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
698c6fd2807SJeff Garzik  *
699c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
701c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
702c6fd2807SJeff Garzik  *	and diagnostics.
703c6fd2807SJeff Garzik  *
704c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
705c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
706c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
707c6fd2807SJeff Garzik  *
708c6fd2807SJeff Garzik  *	LOCKING:
709c6fd2807SJeff Garzik  *	caller.
710c6fd2807SJeff Garzik  *
711c6fd2807SJeff Garzik  *	RETURNS:
712c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
713c6fd2807SJeff Garzik  */
714c6fd2807SJeff Garzik 
715a619f981SAkira Iguchi unsigned int
716c6fd2807SJeff Garzik ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
717c6fd2807SJeff Garzik {
718c6fd2807SJeff Garzik 	struct ata_taskfile tf;
719c6fd2807SJeff Garzik 	unsigned int class;
720c6fd2807SJeff Garzik 	u8 err;
721c6fd2807SJeff Garzik 
722c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
723c6fd2807SJeff Garzik 
724c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
725c6fd2807SJeff Garzik 
726c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
727c6fd2807SJeff Garzik 	err = tf.feature;
728c6fd2807SJeff Garzik 	if (r_err)
729c6fd2807SJeff Garzik 		*r_err = err;
730c6fd2807SJeff Garzik 
73193590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
73293590859SAlan Cox 	if (err == 0 && device == 0)
73393590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
73493590859SAlan Cox 		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
73593590859SAlan Cox 	else if (err == 1)
736c6fd2807SJeff Garzik 		/* do nothing */ ;
737c6fd2807SJeff Garzik 	else if ((device == 0) && (err == 0x81))
738c6fd2807SJeff Garzik 		/* do nothing */ ;
739c6fd2807SJeff Garzik 	else
740c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
741c6fd2807SJeff Garzik 
742c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
743c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
744c6fd2807SJeff Garzik 
745c6fd2807SJeff Garzik 	if (class == ATA_DEV_UNKNOWN)
746c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
747c6fd2807SJeff Garzik 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
748c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
749c6fd2807SJeff Garzik 	return class;
750c6fd2807SJeff Garzik }
751c6fd2807SJeff Garzik 
752c6fd2807SJeff Garzik /**
753c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
754c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
755c6fd2807SJeff Garzik  *	@s: string into which data is output
756c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
757c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
758c6fd2807SJeff Garzik  *
759c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
760c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
761c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
762c6fd2807SJeff Garzik  *
763c6fd2807SJeff Garzik  *	LOCKING:
764c6fd2807SJeff Garzik  *	caller.
765c6fd2807SJeff Garzik  */
766c6fd2807SJeff Garzik 
767c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
768c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
769c6fd2807SJeff Garzik {
770c6fd2807SJeff Garzik 	unsigned int c;
771c6fd2807SJeff Garzik 
772c6fd2807SJeff Garzik 	while (len > 0) {
773c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
774c6fd2807SJeff Garzik 		*s = c;
775c6fd2807SJeff Garzik 		s++;
776c6fd2807SJeff Garzik 
777c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
778c6fd2807SJeff Garzik 		*s = c;
779c6fd2807SJeff Garzik 		s++;
780c6fd2807SJeff Garzik 
781c6fd2807SJeff Garzik 		ofs++;
782c6fd2807SJeff Garzik 		len -= 2;
783c6fd2807SJeff Garzik 	}
784c6fd2807SJeff Garzik }
785c6fd2807SJeff Garzik 
786c6fd2807SJeff Garzik /**
787c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
788c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
789c6fd2807SJeff Garzik  *	@s: string into which data is output
790c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
791c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
792c6fd2807SJeff Garzik  *
793c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
794c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
795c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
796c6fd2807SJeff Garzik  *
797c6fd2807SJeff Garzik  *	LOCKING:
798c6fd2807SJeff Garzik  *	caller.
799c6fd2807SJeff Garzik  */
800c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
801c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
802c6fd2807SJeff Garzik {
803c6fd2807SJeff Garzik 	unsigned char *p;
804c6fd2807SJeff Garzik 
805c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
806c6fd2807SJeff Garzik 
807c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
808c6fd2807SJeff Garzik 
809c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
810c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
811c6fd2807SJeff Garzik 		p--;
812c6fd2807SJeff Garzik 	*p = '\0';
813c6fd2807SJeff Garzik }
814c6fd2807SJeff Garzik 
8151e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8161e999736SAlan Cox {
8171e999736SAlan Cox 	u64 sectors = 0;
8181e999736SAlan Cox 
8191e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8201e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8211e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8221e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8231e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8241e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8251e999736SAlan Cox 
8261e999736SAlan Cox 	return ++sectors;
8271e999736SAlan Cox }
8281e999736SAlan Cox 
8291e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8301e999736SAlan Cox {
8311e999736SAlan Cox 	u64 sectors = 0;
8321e999736SAlan Cox 
8331e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8341e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8351e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8361e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8371e999736SAlan Cox 
8381e999736SAlan Cox 	return ++sectors;
8391e999736SAlan Cox }
8401e999736SAlan Cox 
8411e999736SAlan Cox /**
8421e999736SAlan Cox  *	ata_read_native_max_address_ext	-	LBA48 native max query
8431e999736SAlan Cox  *	@dev: Device to query
8441e999736SAlan Cox  *
8451e999736SAlan Cox  *	Perform an LBA48 size query upon the device in question. Return the
8461e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
8471e999736SAlan Cox  */
8481e999736SAlan Cox 
8491e999736SAlan Cox static u64 ata_read_native_max_address_ext(struct ata_device *dev)
8501e999736SAlan Cox {
8511e999736SAlan Cox 	unsigned int err;
8521e999736SAlan Cox 	struct ata_taskfile tf;
8531e999736SAlan Cox 
8541e999736SAlan Cox 	ata_tf_init(dev, &tf);
8551e999736SAlan Cox 
8561e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
8571e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
8581e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8591e999736SAlan Cox 	tf.device |= 0x40;
8601e999736SAlan Cox 
8611e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8621e999736SAlan Cox 	if (err)
8631e999736SAlan Cox 		return 0;
8641e999736SAlan Cox 
8651e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
8661e999736SAlan Cox }
8671e999736SAlan Cox 
8681e999736SAlan Cox /**
8691e999736SAlan Cox  *	ata_read_native_max_address	-	LBA28 native max query
8701e999736SAlan Cox  *	@dev: Device to query
8711e999736SAlan Cox  *
8721e999736SAlan Cox  *	Performa an LBA28 size query upon the device in question. Return the
8731e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
8741e999736SAlan Cox  */
8751e999736SAlan Cox 
8761e999736SAlan Cox static u64 ata_read_native_max_address(struct ata_device *dev)
8771e999736SAlan Cox {
8781e999736SAlan Cox 	unsigned int err;
8791e999736SAlan Cox 	struct ata_taskfile tf;
8801e999736SAlan Cox 
8811e999736SAlan Cox 	ata_tf_init(dev, &tf);
8821e999736SAlan Cox 
8831e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX;
8841e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
8851e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8861e999736SAlan Cox 	tf.device |= 0x40;
8871e999736SAlan Cox 
8881e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8891e999736SAlan Cox 	if (err)
8901e999736SAlan Cox 		return 0;
8911e999736SAlan Cox 
8921e999736SAlan Cox 	return ata_tf_to_lba(&tf);
8931e999736SAlan Cox }
8941e999736SAlan Cox 
8951e999736SAlan Cox /**
8961e999736SAlan Cox  *	ata_set_native_max_address_ext	-	LBA48 native max set
8971e999736SAlan Cox  *	@dev: Device to query
8986b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
8991e999736SAlan Cox  *
9001e999736SAlan Cox  *	Perform an LBA48 size set max upon the device in question. Return the
9011e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
9021e999736SAlan Cox  */
9031e999736SAlan Cox 
9041e999736SAlan Cox static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
9051e999736SAlan Cox {
9061e999736SAlan Cox 	unsigned int err;
9071e999736SAlan Cox 	struct ata_taskfile tf;
9081e999736SAlan Cox 
9091e999736SAlan Cox 	new_sectors--;
9101e999736SAlan Cox 
9111e999736SAlan Cox 	ata_tf_init(dev, &tf);
9121e999736SAlan Cox 
9131e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX_EXT;
9141e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
9151e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9161e999736SAlan Cox 	tf.device |= 0x40;
9171e999736SAlan Cox 
9181e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9191e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9201e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9211e999736SAlan Cox 
9221e999736SAlan Cox 	tf.hob_lbal = (new_sectors >> 24) & 0xff;
9231e999736SAlan Cox 	tf.hob_lbam = (new_sectors >> 32) & 0xff;
9241e999736SAlan Cox 	tf.hob_lbah = (new_sectors >> 40) & 0xff;
9251e999736SAlan Cox 
9261e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9271e999736SAlan Cox 	if (err)
9281e999736SAlan Cox 		return 0;
9291e999736SAlan Cox 
9301e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
9311e999736SAlan Cox }
9321e999736SAlan Cox 
9331e999736SAlan Cox /**
9341e999736SAlan Cox  *	ata_set_native_max_address	-	LBA28 native max set
9351e999736SAlan Cox  *	@dev: Device to query
9366b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9371e999736SAlan Cox  *
9381e999736SAlan Cox  *	Perform an LBA28 size set max upon the device in question. Return the
9391e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
9401e999736SAlan Cox  */
9411e999736SAlan Cox 
9421e999736SAlan Cox static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
9431e999736SAlan Cox {
9441e999736SAlan Cox 	unsigned int err;
9451e999736SAlan Cox 	struct ata_taskfile tf;
9461e999736SAlan Cox 
9471e999736SAlan Cox 	new_sectors--;
9481e999736SAlan Cox 
9491e999736SAlan Cox 	ata_tf_init(dev, &tf);
9501e999736SAlan Cox 
9511e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX;
9521e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9531e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9541e999736SAlan Cox 
9551e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9561e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9571e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9581e999736SAlan Cox 	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
9591e999736SAlan Cox 
9601e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9611e999736SAlan Cox 	if (err)
9621e999736SAlan Cox 		return 0;
9631e999736SAlan Cox 
9641e999736SAlan Cox 	return ata_tf_to_lba(&tf);
9651e999736SAlan Cox }
9661e999736SAlan Cox 
9671e999736SAlan Cox /**
9681e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
9691e999736SAlan Cox  *	@dev: Device to resize
9701e999736SAlan Cox  *
9711e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
9721e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
9731e999736SAlan Cox  *	the drive has the HPA feature set enabled.
9741e999736SAlan Cox  */
9751e999736SAlan Cox 
9761e999736SAlan Cox static u64 ata_hpa_resize(struct ata_device *dev)
9771e999736SAlan Cox {
9781e999736SAlan Cox 	u64 sectors = dev->n_sectors;
9791e999736SAlan Cox 	u64 hpa_sectors;
9801e999736SAlan Cox 
9811e999736SAlan Cox 	if (ata_id_has_lba48(dev->id))
9821e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address_ext(dev);
9831e999736SAlan Cox 	else
9841e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address(dev);
9851e999736SAlan Cox 
9861e999736SAlan Cox 	/* if no hpa, both should be equal */
987bd1d5ec6SAndrew Morton 	ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
988bd1d5ec6SAndrew Morton 				"hpa_sectors = %lld\n",
989bd1d5ec6SAndrew Morton 		__FUNCTION__, (long long)sectors, (long long)hpa_sectors);
9901e999736SAlan Cox 
9911e999736SAlan Cox 	if (hpa_sectors > sectors) {
9921e999736SAlan Cox 		ata_dev_printk(dev, KERN_INFO,
9931e999736SAlan Cox 			"Host Protected Area detected:\n"
9941e999736SAlan Cox 			"\tcurrent size: %lld sectors\n"
9951e999736SAlan Cox 			"\tnative size: %lld sectors\n",
996bd1d5ec6SAndrew Morton 			(long long)sectors, (long long)hpa_sectors);
9971e999736SAlan Cox 
9981e999736SAlan Cox 		if (ata_ignore_hpa) {
9991e999736SAlan Cox 			if (ata_id_has_lba48(dev->id))
10001e999736SAlan Cox 				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
10011e999736SAlan Cox 			else
1002bd1d5ec6SAndrew Morton 				hpa_sectors = ata_set_native_max_address(dev,
1003bd1d5ec6SAndrew Morton 								hpa_sectors);
10041e999736SAlan Cox 
10051e999736SAlan Cox 			if (hpa_sectors) {
1006bd1d5ec6SAndrew Morton 				ata_dev_printk(dev, KERN_INFO, "native size "
1007bd1d5ec6SAndrew Morton 					"increased to %lld sectors\n",
1008bd1d5ec6SAndrew Morton 					(long long)hpa_sectors);
10091e999736SAlan Cox 				return hpa_sectors;
10101e999736SAlan Cox 			}
10111e999736SAlan Cox 		}
10121e999736SAlan Cox 	}
10131e999736SAlan Cox 	return sectors;
10141e999736SAlan Cox }
10151e999736SAlan Cox 
1016c6fd2807SJeff Garzik static u64 ata_id_n_sectors(const u16 *id)
1017c6fd2807SJeff Garzik {
1018c6fd2807SJeff Garzik 	if (ata_id_has_lba(id)) {
1019c6fd2807SJeff Garzik 		if (ata_id_has_lba48(id))
1020c6fd2807SJeff Garzik 			return ata_id_u64(id, 100);
1021c6fd2807SJeff Garzik 		else
1022c6fd2807SJeff Garzik 			return ata_id_u32(id, 60);
1023c6fd2807SJeff Garzik 	} else {
1024c6fd2807SJeff Garzik 		if (ata_id_current_chs_valid(id))
1025c6fd2807SJeff Garzik 			return ata_id_u32(id, 57);
1026c6fd2807SJeff Garzik 		else
1027c6fd2807SJeff Garzik 			return id[1] * id[3] * id[6];
1028c6fd2807SJeff Garzik 	}
1029c6fd2807SJeff Garzik }
1030c6fd2807SJeff Garzik 
1031c6fd2807SJeff Garzik /**
103210305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
103310305f0fSAlan  *	@dev: device to identify
1034cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
103510305f0fSAlan  *
103610305f0fSAlan  *	Set up the timing values for the device based upon the identify
103710305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
103810305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
103910305f0fSAlan  *	mode correctly when possible.
104010305f0fSAlan  *
104110305f0fSAlan  *	In addition we emit similarly formatted messages to the default
104210305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
104310305f0fSAlan  *	presentation.
104410305f0fSAlan  */
104510305f0fSAlan 
104610305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
104710305f0fSAlan {
104810305f0fSAlan 	unsigned int mask;
104910305f0fSAlan 	u8 mode;
105010305f0fSAlan 
105110305f0fSAlan 	/* Pack the DMA modes */
105210305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
105310305f0fSAlan 	if (dev->id[53] & 0x04)
105410305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
105510305f0fSAlan 
105610305f0fSAlan 	/* Select the mode in use */
105710305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
105810305f0fSAlan 
105910305f0fSAlan 	if (mode != 0) {
106010305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
106110305f0fSAlan 		       ata_mode_string(mask));
106210305f0fSAlan 	} else {
106310305f0fSAlan 		/* SWDMA perhaps ? */
106410305f0fSAlan 		mode = unknown;
106510305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
106610305f0fSAlan 	}
106710305f0fSAlan 
106810305f0fSAlan 	/* Configure the device reporting */
106910305f0fSAlan 	dev->xfer_mode = mode;
107010305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
107110305f0fSAlan }
107210305f0fSAlan 
107310305f0fSAlan /**
1074c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1075c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1076c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1077c6fd2807SJeff Garzik  *
1078c6fd2807SJeff Garzik  *	This function performs no actual function.
1079c6fd2807SJeff Garzik  *
1080c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1081c6fd2807SJeff Garzik  *
1082c6fd2807SJeff Garzik  *	LOCKING:
1083c6fd2807SJeff Garzik  *	caller.
1084c6fd2807SJeff Garzik  */
1085c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1086c6fd2807SJeff Garzik {
1087c6fd2807SJeff Garzik }
1088c6fd2807SJeff Garzik 
1089c6fd2807SJeff Garzik 
1090c6fd2807SJeff Garzik /**
1091c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1092c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1093c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1094c6fd2807SJeff Garzik  *
1095c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1096c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1097c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1098c6fd2807SJeff Garzik  *
1099c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1100c6fd2807SJeff Garzik  *
1101c6fd2807SJeff Garzik  *	LOCKING:
1102c6fd2807SJeff Garzik  *	caller.
1103c6fd2807SJeff Garzik  */
1104c6fd2807SJeff Garzik 
1105c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1106c6fd2807SJeff Garzik {
1107c6fd2807SJeff Garzik 	u8 tmp;
1108c6fd2807SJeff Garzik 
1109c6fd2807SJeff Garzik 	if (device == 0)
1110c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1111c6fd2807SJeff Garzik 	else
1112c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1113c6fd2807SJeff Garzik 
11140d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1115c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1116c6fd2807SJeff Garzik }
1117c6fd2807SJeff Garzik 
1118c6fd2807SJeff Garzik /**
1119c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1120c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1121c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1122c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1123c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1124c6fd2807SJeff Garzik  *
1125c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1126c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1127c6fd2807SJeff Garzik  *	ATA channel.
1128c6fd2807SJeff Garzik  *
1129c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1130c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1131c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1132c6fd2807SJeff Garzik  *
1133c6fd2807SJeff Garzik  *	LOCKING:
1134c6fd2807SJeff Garzik  *	caller.
1135c6fd2807SJeff Garzik  */
1136c6fd2807SJeff Garzik 
1137c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1138c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1139c6fd2807SJeff Garzik {
1140c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
114144877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
114244877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1143c6fd2807SJeff Garzik 
1144c6fd2807SJeff Garzik 	if (wait)
1145c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1146c6fd2807SJeff Garzik 
1147c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1148c6fd2807SJeff Garzik 
1149c6fd2807SJeff Garzik 	if (wait) {
1150c6fd2807SJeff Garzik 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1151c6fd2807SJeff Garzik 			msleep(150);
1152c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1153c6fd2807SJeff Garzik 	}
1154c6fd2807SJeff Garzik }
1155c6fd2807SJeff Garzik 
1156c6fd2807SJeff Garzik /**
1157c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1158c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1159c6fd2807SJeff Garzik  *
1160c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1161c6fd2807SJeff Garzik  *	page.
1162c6fd2807SJeff Garzik  *
1163c6fd2807SJeff Garzik  *	LOCKING:
1164c6fd2807SJeff Garzik  *	caller.
1165c6fd2807SJeff Garzik  */
1166c6fd2807SJeff Garzik 
1167c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1168c6fd2807SJeff Garzik {
1169c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1170c6fd2807SJeff Garzik 		"53==0x%04x  "
1171c6fd2807SJeff Garzik 		"63==0x%04x  "
1172c6fd2807SJeff Garzik 		"64==0x%04x  "
1173c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1174c6fd2807SJeff Garzik 		id[49],
1175c6fd2807SJeff Garzik 		id[53],
1176c6fd2807SJeff Garzik 		id[63],
1177c6fd2807SJeff Garzik 		id[64],
1178c6fd2807SJeff Garzik 		id[75]);
1179c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1180c6fd2807SJeff Garzik 		"81==0x%04x  "
1181c6fd2807SJeff Garzik 		"82==0x%04x  "
1182c6fd2807SJeff Garzik 		"83==0x%04x  "
1183c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1184c6fd2807SJeff Garzik 		id[80],
1185c6fd2807SJeff Garzik 		id[81],
1186c6fd2807SJeff Garzik 		id[82],
1187c6fd2807SJeff Garzik 		id[83],
1188c6fd2807SJeff Garzik 		id[84]);
1189c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1190c6fd2807SJeff Garzik 		"93==0x%04x\n",
1191c6fd2807SJeff Garzik 		id[88],
1192c6fd2807SJeff Garzik 		id[93]);
1193c6fd2807SJeff Garzik }
1194c6fd2807SJeff Garzik 
1195c6fd2807SJeff Garzik /**
1196c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1197c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1198c6fd2807SJeff Garzik  *
1199c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1200c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1201c6fd2807SJeff Garzik  *
1202c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1203c6fd2807SJeff Garzik  *
1204c6fd2807SJeff Garzik  *	LOCKING:
1205c6fd2807SJeff Garzik  *	None.
1206c6fd2807SJeff Garzik  *
1207c6fd2807SJeff Garzik  *	RETURNS:
1208c6fd2807SJeff Garzik  *	Computed xfermask
1209c6fd2807SJeff Garzik  */
1210c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1211c6fd2807SJeff Garzik {
1212c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1213c6fd2807SJeff Garzik 
1214c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1215c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1216c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1217c6fd2807SJeff Garzik 		pio_mask <<= 3;
1218c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1219c6fd2807SJeff Garzik 	} else {
1220c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1221c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1222c6fd2807SJeff Garzik 		 * a mask.
1223c6fd2807SJeff Garzik 		 */
12247a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
122546767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
122646767aebSAlan Cox                 	pio_mask = (2 << mode) - 1;
122746767aebSAlan Cox 		else
122846767aebSAlan Cox 			pio_mask = 1;
1229c6fd2807SJeff Garzik 
1230c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1231c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1232c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1233c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1234c6fd2807SJeff Garzik 		 * will get this right anyway
1235c6fd2807SJeff Garzik 		 */
1236c6fd2807SJeff Garzik 	}
1237c6fd2807SJeff Garzik 
1238c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1239c6fd2807SJeff Garzik 
1240b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1241b352e57dSAlan Cox 		/*
1242b352e57dSAlan Cox 		 *	Process compact flash extended modes
1243b352e57dSAlan Cox 		 */
1244b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1245b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1246b352e57dSAlan Cox 
1247b352e57dSAlan Cox 		if (pio)
1248b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1249b352e57dSAlan Cox 		if (pio > 1)
1250b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1251b352e57dSAlan Cox 		if (dma)
1252b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1253b352e57dSAlan Cox 		if (dma > 1)
1254b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1255b352e57dSAlan Cox 	}
1256b352e57dSAlan Cox 
1257c6fd2807SJeff Garzik 	udma_mask = 0;
1258c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1259c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1260c6fd2807SJeff Garzik 
1261c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1262c6fd2807SJeff Garzik }
1263c6fd2807SJeff Garzik 
1264c6fd2807SJeff Garzik /**
1265c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1266c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1267c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
126865f27f38SDavid Howells  *	@data: data for @fn to use
1269c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1270c6fd2807SJeff Garzik  *
1271c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1272c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1273c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1274c6fd2807SJeff Garzik  *	one task is active at any given time.
1275c6fd2807SJeff Garzik  *
1276c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1277c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1278c6fd2807SJeff Garzik  *	synchronization.
1279c6fd2807SJeff Garzik  *
1280c6fd2807SJeff Garzik  *	LOCKING:
1281c6fd2807SJeff Garzik  *	Inherited from caller.
1282c6fd2807SJeff Garzik  */
128365f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1284c6fd2807SJeff Garzik 			 unsigned long delay)
1285c6fd2807SJeff Garzik {
1286c6fd2807SJeff Garzik 	int rc;
1287c6fd2807SJeff Garzik 
1288c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1289c6fd2807SJeff Garzik 		return;
1290c6fd2807SJeff Garzik 
129165f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
129265f27f38SDavid Howells 	ap->port_task_data = data;
1293c6fd2807SJeff Garzik 
1294c6fd2807SJeff Garzik 	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1295c6fd2807SJeff Garzik 
1296c6fd2807SJeff Garzik 	/* rc == 0 means that another user is using port task */
1297c6fd2807SJeff Garzik 	WARN_ON(rc == 0);
1298c6fd2807SJeff Garzik }
1299c6fd2807SJeff Garzik 
1300c6fd2807SJeff Garzik /**
1301c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1302c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1303c6fd2807SJeff Garzik  *
1304c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1305c6fd2807SJeff Garzik  *	be running or scheduled.
1306c6fd2807SJeff Garzik  *
1307c6fd2807SJeff Garzik  *	LOCKING:
1308c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1309c6fd2807SJeff Garzik  */
1310c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1311c6fd2807SJeff Garzik {
1312c6fd2807SJeff Garzik 	unsigned long flags;
1313c6fd2807SJeff Garzik 
1314c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1315c6fd2807SJeff Garzik 
1316c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1317c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1318c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1319c6fd2807SJeff Garzik 
1320c6fd2807SJeff Garzik 	DPRINTK("flush #1\n");
132128e53bddSOleg Nesterov 	cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1322c6fd2807SJeff Garzik 
1323c6fd2807SJeff Garzik 	/*
1324c6fd2807SJeff Garzik 	 * At this point, if a task is running, it's guaranteed to see
1325c6fd2807SJeff Garzik 	 * the FLUSH flag; thus, it will never queue pio tasks again.
1326c6fd2807SJeff Garzik 	 * Cancel and flush.
1327c6fd2807SJeff Garzik 	 */
1328c6fd2807SJeff Garzik 	if (!cancel_delayed_work(&ap->port_task)) {
1329c6fd2807SJeff Garzik 		if (ata_msg_ctl(ap))
1330c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1331c6fd2807SJeff Garzik 					__FUNCTION__);
133228e53bddSOleg Nesterov 		cancel_work_sync(&ap->port_task.work);
1333c6fd2807SJeff Garzik 	}
1334c6fd2807SJeff Garzik 
1335c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1336c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1337c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1338c6fd2807SJeff Garzik 
1339c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1340c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1341c6fd2807SJeff Garzik }
1342c6fd2807SJeff Garzik 
13437102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1344c6fd2807SJeff Garzik {
1345c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1346c6fd2807SJeff Garzik 
1347c6fd2807SJeff Garzik 	complete(waiting);
1348c6fd2807SJeff Garzik }
1349c6fd2807SJeff Garzik 
1350c6fd2807SJeff Garzik /**
13512432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1352c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1353c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1354c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1355c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13562432697bSTejun Heo  *	@sg: sg list for the data buffer of the command
13572432697bSTejun Heo  *	@n_elem: Number of sg entries
1358c6fd2807SJeff Garzik  *
1359c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1360c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1361c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1362c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1363c6fd2807SJeff Garzik  *	clean up after timeout.
1364c6fd2807SJeff Garzik  *
1365c6fd2807SJeff Garzik  *	LOCKING:
1366c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1367c6fd2807SJeff Garzik  *
1368c6fd2807SJeff Garzik  *	RETURNS:
1369c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1370c6fd2807SJeff Garzik  */
13712432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1372c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
13732432697bSTejun Heo 			      int dma_dir, struct scatterlist *sg,
13742432697bSTejun Heo 			      unsigned int n_elem)
1375c6fd2807SJeff Garzik {
1376c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1377c6fd2807SJeff Garzik 	u8 command = tf->command;
1378c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1379c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1380c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1381c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1382c6fd2807SJeff Garzik 	unsigned long flags;
1383c6fd2807SJeff Garzik 	unsigned int err_mask;
1384c6fd2807SJeff Garzik 	int rc;
1385c6fd2807SJeff Garzik 
1386c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1387c6fd2807SJeff Garzik 
1388c6fd2807SJeff Garzik 	/* no internal command while frozen */
1389c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1390c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1391c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1392c6fd2807SJeff Garzik 	}
1393c6fd2807SJeff Garzik 
1394c6fd2807SJeff Garzik 	/* initialize internal qc */
1395c6fd2807SJeff Garzik 
1396c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1397c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1398c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1399c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1400c6fd2807SJeff Garzik 	 */
1401c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1402c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1403c6fd2807SJeff Garzik 	else
1404c6fd2807SJeff Garzik 		tag = 0;
1405c6fd2807SJeff Garzik 
1406c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1407c6fd2807SJeff Garzik 		BUG();
1408c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1409c6fd2807SJeff Garzik 
1410c6fd2807SJeff Garzik 	qc->tag = tag;
1411c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1412c6fd2807SJeff Garzik 	qc->ap = ap;
1413c6fd2807SJeff Garzik 	qc->dev = dev;
1414c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1415c6fd2807SJeff Garzik 
1416c6fd2807SJeff Garzik 	preempted_tag = ap->active_tag;
1417c6fd2807SJeff Garzik 	preempted_sactive = ap->sactive;
1418c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1419c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
1420c6fd2807SJeff Garzik 	ap->sactive = 0;
1421c6fd2807SJeff Garzik 	ap->qc_active = 0;
1422c6fd2807SJeff Garzik 
1423c6fd2807SJeff Garzik 	/* prepare & issue qc */
1424c6fd2807SJeff Garzik 	qc->tf = *tf;
1425c6fd2807SJeff Garzik 	if (cdb)
1426c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1427c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1428c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1429c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14302432697bSTejun Heo 		unsigned int i, buflen = 0;
14312432697bSTejun Heo 
14322432697bSTejun Heo 		for (i = 0; i < n_elem; i++)
14332432697bSTejun Heo 			buflen += sg[i].length;
14342432697bSTejun Heo 
14352432697bSTejun Heo 		ata_sg_init(qc, sg, n_elem);
143649c80429SBrian King 		qc->nbytes = buflen;
1437c6fd2807SJeff Garzik 	}
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	qc->private_data = &wait;
1440c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1443c6fd2807SJeff Garzik 
1444c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1445c6fd2807SJeff Garzik 
1446c6fd2807SJeff Garzik 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1449c6fd2807SJeff Garzik 
1450c6fd2807SJeff Garzik 	if (!rc) {
1451c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1454c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1455c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1456c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1457c6fd2807SJeff Garzik 		 */
1458c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1459c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1460c6fd2807SJeff Garzik 
1461c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1462c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1463c6fd2807SJeff Garzik 			else
1464c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1465c6fd2807SJeff Garzik 
1466c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1467c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1468c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1469c6fd2807SJeff Garzik 		}
1470c6fd2807SJeff Garzik 
1471c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1472c6fd2807SJeff Garzik 	}
1473c6fd2807SJeff Garzik 
1474c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1475c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1476c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1477c6fd2807SJeff Garzik 
1478a51d644aSTejun Heo 	/* perform minimal error analysis */
1479a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1480a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1481a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1482a51d644aSTejun Heo 
1483a51d644aSTejun Heo 		if (!qc->err_mask)
1484c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1485a51d644aSTejun Heo 
1486a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1487a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1488c6fd2807SJeff Garzik 	}
1489c6fd2807SJeff Garzik 
1490c6fd2807SJeff Garzik 	/* finish up */
1491c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1492c6fd2807SJeff Garzik 
1493c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1494c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1495c6fd2807SJeff Garzik 
1496c6fd2807SJeff Garzik 	ata_qc_free(qc);
1497c6fd2807SJeff Garzik 	ap->active_tag = preempted_tag;
1498c6fd2807SJeff Garzik 	ap->sactive = preempted_sactive;
1499c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1500c6fd2807SJeff Garzik 
1501c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1502c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1503c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1504c6fd2807SJeff Garzik 	 * port.
1505c6fd2807SJeff Garzik 	 *
1506c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1507c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1508c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1509c6fd2807SJeff Garzik 	 *
1510c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1511c6fd2807SJeff Garzik 	 */
1512c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1513c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1514c6fd2807SJeff Garzik 		ata_port_probe(ap);
1515c6fd2807SJeff Garzik 	}
1516c6fd2807SJeff Garzik 
1517c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1518c6fd2807SJeff Garzik 
1519c6fd2807SJeff Garzik 	return err_mask;
1520c6fd2807SJeff Garzik }
1521c6fd2807SJeff Garzik 
1522c6fd2807SJeff Garzik /**
152333480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15242432697bSTejun Heo  *	@dev: Device to which the command is sent
15252432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15262432697bSTejun Heo  *	@cdb: CDB for packet command
15272432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15282432697bSTejun Heo  *	@buf: Data buffer of the command
15292432697bSTejun Heo  *	@buflen: Length of data buffer
15302432697bSTejun Heo  *
15312432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15322432697bSTejun Heo  *	buffer instead of sg list.
15332432697bSTejun Heo  *
15342432697bSTejun Heo  *	LOCKING:
15352432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15362432697bSTejun Heo  *
15372432697bSTejun Heo  *	RETURNS:
15382432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15392432697bSTejun Heo  */
15402432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15412432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15422432697bSTejun Heo 			   int dma_dir, void *buf, unsigned int buflen)
15432432697bSTejun Heo {
154433480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
154533480a0eSTejun Heo 	unsigned int n_elem = 0;
15462432697bSTejun Heo 
154733480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
154833480a0eSTejun Heo 		WARN_ON(!buf);
15492432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
155033480a0eSTejun Heo 		psg = &sg;
155133480a0eSTejun Heo 		n_elem++;
155233480a0eSTejun Heo 	}
15532432697bSTejun Heo 
155433480a0eSTejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
15552432697bSTejun Heo }
15562432697bSTejun Heo 
15572432697bSTejun Heo /**
1558c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1559c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1560c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1561c6fd2807SJeff Garzik  *
1562c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1563c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1564c6fd2807SJeff Garzik  *
1565c6fd2807SJeff Garzik  *	LOCKING:
1566c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1567c6fd2807SJeff Garzik  *
1568c6fd2807SJeff Garzik  *	RETURNS:
1569c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1570c6fd2807SJeff Garzik  */
1571c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1572c6fd2807SJeff Garzik {
1573c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1574c6fd2807SJeff Garzik 
1575c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1576c6fd2807SJeff Garzik 
1577c6fd2807SJeff Garzik 	tf.command = cmd;
1578c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1579c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1580c6fd2807SJeff Garzik 
1581c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1582c6fd2807SJeff Garzik }
1583c6fd2807SJeff Garzik 
1584c6fd2807SJeff Garzik /**
1585c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1586c6fd2807SJeff Garzik  *	@adev: ATA device
1587c6fd2807SJeff Garzik  *
1588c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1589c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1590c6fd2807SJeff Garzik  */
1591c6fd2807SJeff Garzik 
1592c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1593c6fd2807SJeff Garzik {
1594432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1595432729f0SAlan Cox 	   as the caller should know this */
1596432729f0SAlan Cox 	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1597c6fd2807SJeff Garzik 		return 0;
1598432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1599432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1600c6fd2807SJeff Garzik 		return 1;
1601432729f0SAlan Cox 	/* We turn it on when possible */
1602432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1603432729f0SAlan Cox 		return 1;
1604432729f0SAlan Cox 	return 0;
1605432729f0SAlan Cox }
1606c6fd2807SJeff Garzik 
1607432729f0SAlan Cox /**
1608432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1609432729f0SAlan Cox  *	@adev: ATA device
1610432729f0SAlan Cox  *
1611432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1612432729f0SAlan Cox  *	-1 if no iordy mode is available.
1613432729f0SAlan Cox  */
1614432729f0SAlan Cox 
1615432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1616432729f0SAlan Cox {
1617c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1618c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1619432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1620c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1621c6fd2807SJeff Garzik 		if (pio) {
1622c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1623c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1624432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1625432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1626c6fd2807SJeff Garzik 		}
1627c6fd2807SJeff Garzik 	}
1628432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1629c6fd2807SJeff Garzik }
1630c6fd2807SJeff Garzik 
1631c6fd2807SJeff Garzik /**
1632c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1633c6fd2807SJeff Garzik  *	@dev: target device
1634c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1635bff04647STejun Heo  *	@flags: ATA_READID_* flags
1636c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1637c6fd2807SJeff Garzik  *
1638c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1639c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1640c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1641c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1642c6fd2807SJeff Garzik  *
1643c6fd2807SJeff Garzik  *	LOCKING:
1644c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1645c6fd2807SJeff Garzik  *
1646c6fd2807SJeff Garzik  *	RETURNS:
1647c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1648c6fd2807SJeff Garzik  */
1649c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1650bff04647STejun Heo 		    unsigned int flags, u16 *id)
1651c6fd2807SJeff Garzik {
1652c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1653c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1654c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1655c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1656c6fd2807SJeff Garzik 	const char *reason;
165754936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1658c6fd2807SJeff Garzik 	int rc;
1659c6fd2807SJeff Garzik 
1660c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
166144877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1662c6fd2807SJeff Garzik 
1663c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1664c6fd2807SJeff Garzik  retry:
1665c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1666c6fd2807SJeff Garzik 
1667c6fd2807SJeff Garzik 	switch (class) {
1668c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1669c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1670c6fd2807SJeff Garzik 		break;
1671c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1672c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1673c6fd2807SJeff Garzik 		break;
1674c6fd2807SJeff Garzik 	default:
1675c6fd2807SJeff Garzik 		rc = -ENODEV;
1676c6fd2807SJeff Garzik 		reason = "unsupported class";
1677c6fd2807SJeff Garzik 		goto err_out;
1678c6fd2807SJeff Garzik 	}
1679c6fd2807SJeff Garzik 
1680c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
168181afe893STejun Heo 
168281afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
168381afe893STejun Heo 	 * sure those are properly initialized.
168481afe893STejun Heo 	 */
168581afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
168681afe893STejun Heo 
168781afe893STejun Heo 	/* Device presence detection is unreliable on some
168881afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
168981afe893STejun Heo 	 */
169081afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1691c6fd2807SJeff Garzik 
1692c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1693c6fd2807SJeff Garzik 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1694c6fd2807SJeff Garzik 	if (err_mask) {
1695800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
169655a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
169744877b4eSTejun Heo 				ap->print_id, dev->devno);
169855a8e2c8STejun Heo 			return -ENOENT;
169955a8e2c8STejun Heo 		}
170055a8e2c8STejun Heo 
170154936f8bSTejun Heo 		/* Device or controller might have reported the wrong
170254936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
170354936f8bSTejun Heo 		 * the current one is aborted by the device.
170454936f8bSTejun Heo 		 */
170554936f8bSTejun Heo 		if (may_fallback &&
170654936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
170754936f8bSTejun Heo 			may_fallback = 0;
170854936f8bSTejun Heo 
170954936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
171054936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
171154936f8bSTejun Heo 			else
171254936f8bSTejun Heo 				class = ATA_DEV_ATA;
171354936f8bSTejun Heo 			goto retry;
171454936f8bSTejun Heo 		}
171554936f8bSTejun Heo 
1716c6fd2807SJeff Garzik 		rc = -EIO;
1717c6fd2807SJeff Garzik 		reason = "I/O error";
1718c6fd2807SJeff Garzik 		goto err_out;
1719c6fd2807SJeff Garzik 	}
1720c6fd2807SJeff Garzik 
172154936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
172254936f8bSTejun Heo 	 * successfully at least once.
172354936f8bSTejun Heo 	 */
172454936f8bSTejun Heo 	may_fallback = 0;
172554936f8bSTejun Heo 
1726c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1727c6fd2807SJeff Garzik 
1728c6fd2807SJeff Garzik 	/* sanity check */
1729c6fd2807SJeff Garzik 	rc = -EINVAL;
1730c6fd2807SJeff Garzik 	reason = "device reports illegal type";
17314a3381feSJeff Garzik 
17324a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17334a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17344a3381feSJeff Garzik 			goto err_out;
17354a3381feSJeff Garzik 	} else {
17364a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1737c6fd2807SJeff Garzik 			goto err_out;
1738c6fd2807SJeff Garzik 	}
1739c6fd2807SJeff Garzik 
1740169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1741169439c2SMark Lord 		tried_spinup = 1;
1742169439c2SMark Lord 		/*
1743169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1744169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1745169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1746169439c2SMark Lord 		 */
1747169439c2SMark Lord 		ata_tf_init(dev, &tf);
1748169439c2SMark Lord 		tf.command = ATA_CMD_SET_FEATURES;
1749169439c2SMark Lord 		tf.feature = SETFEATURES_SPINUP;
1750169439c2SMark Lord 		tf.protocol = ATA_PROT_NODATA;
1751169439c2SMark Lord 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1752169439c2SMark Lord 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1753169439c2SMark Lord 		if (err_mask) {
1754169439c2SMark Lord 			rc = -EIO;
1755169439c2SMark Lord 			reason = "SPINUP failed";
1756169439c2SMark Lord 			goto err_out;
1757169439c2SMark Lord 		}
1758169439c2SMark Lord 		/*
1759169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1760169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1761169439c2SMark Lord 		 */
1762169439c2SMark Lord 		if (id[2] == 0x37c8)
1763169439c2SMark Lord 			goto retry;
1764169439c2SMark Lord 	}
1765169439c2SMark Lord 
1766bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1767c6fd2807SJeff Garzik 		/*
1768c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1769c6fd2807SJeff Garzik 		 * SRST RESET
1770c6fd2807SJeff Garzik 		 * IDENTIFY
1771c6fd2807SJeff Garzik 		 * INITIALIZE DEVICE PARAMETERS
1772c6fd2807SJeff Garzik 		 * anything else..
1773c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
1774c6fd2807SJeff Garzik 		 */
1775c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1776c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1777c6fd2807SJeff Garzik 			if (err_mask) {
1778c6fd2807SJeff Garzik 				rc = -EIO;
1779c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1780c6fd2807SJeff Garzik 				goto err_out;
1781c6fd2807SJeff Garzik 			}
1782c6fd2807SJeff Garzik 
1783c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1784c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1785c6fd2807SJeff Garzik 			 */
1786bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1787c6fd2807SJeff Garzik 			goto retry;
1788c6fd2807SJeff Garzik 		}
1789c6fd2807SJeff Garzik 	}
1790c6fd2807SJeff Garzik 
1791c6fd2807SJeff Garzik 	*p_class = class;
1792c6fd2807SJeff Garzik 
1793c6fd2807SJeff Garzik 	return 0;
1794c6fd2807SJeff Garzik 
1795c6fd2807SJeff Garzik  err_out:
1796c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1797c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1798c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1799c6fd2807SJeff Garzik 	return rc;
1800c6fd2807SJeff Garzik }
1801c6fd2807SJeff Garzik 
1802c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1803c6fd2807SJeff Garzik {
1804c6fd2807SJeff Garzik 	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1805c6fd2807SJeff Garzik }
1806c6fd2807SJeff Garzik 
1807c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1808c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1809c6fd2807SJeff Garzik {
1810c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1811c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1812c6fd2807SJeff Garzik 
1813c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1814c6fd2807SJeff Garzik 		desc[0] = '\0';
1815c6fd2807SJeff Garzik 		return;
1816c6fd2807SJeff Garzik 	}
18176919a0a6SAlan Cox 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
18186919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
18196919a0a6SAlan Cox 		return;
18206919a0a6SAlan Cox 	}
1821c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1822cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1823c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1824c6fd2807SJeff Garzik 	}
1825c6fd2807SJeff Garzik 
1826c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1827c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1828c6fd2807SJeff Garzik 	else
1829c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1830c6fd2807SJeff Garzik }
1831c6fd2807SJeff Garzik 
1832c6fd2807SJeff Garzik /**
1833c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1834c6fd2807SJeff Garzik  *	@dev: Target device to configure
1835c6fd2807SJeff Garzik  *
1836c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1837c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1838c6fd2807SJeff Garzik  *
1839c6fd2807SJeff Garzik  *	LOCKING:
1840c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1841c6fd2807SJeff Garzik  *
1842c6fd2807SJeff Garzik  *	RETURNS:
1843c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1844c6fd2807SJeff Garzik  */
1845efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1846c6fd2807SJeff Garzik {
1847c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1848efdaedc4STejun Heo 	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1849c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1850c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1851b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
18523f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
18533f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1854c6fd2807SJeff Garzik 	int rc;
1855c6fd2807SJeff Garzik 
1856c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
185744877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
185844877b4eSTejun Heo 			       __FUNCTION__);
1859c6fd2807SJeff Garzik 		return 0;
1860c6fd2807SJeff Garzik 	}
1861c6fd2807SJeff Garzik 
1862c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
186344877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1864c6fd2807SJeff Garzik 
186508573a86SKristen Carlson Accardi 	/* set _SDD */
18663a32a8e9STejun Heo 	rc = ata_acpi_push_id(dev);
186708573a86SKristen Carlson Accardi 	if (rc) {
186808573a86SKristen Carlson Accardi 		ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
186908573a86SKristen Carlson Accardi 			rc);
187008573a86SKristen Carlson Accardi 	}
187108573a86SKristen Carlson Accardi 
187208573a86SKristen Carlson Accardi 	/* retrieve and execute the ATA task file of _GTF */
187308573a86SKristen Carlson Accardi 	ata_acpi_exec_tfs(ap);
187408573a86SKristen Carlson Accardi 
1875c6fd2807SJeff Garzik 	/* print device capabilities */
1876c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1877c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1878c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1879c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1880c6fd2807SJeff Garzik 			       __FUNCTION__,
1881c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1882c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1883c6fd2807SJeff Garzik 
1884c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1885c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1886c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1887c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1888c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1889c6fd2807SJeff Garzik 	dev->cylinders = 0;
1890c6fd2807SJeff Garzik 	dev->heads = 0;
1891c6fd2807SJeff Garzik 	dev->sectors = 0;
1892c6fd2807SJeff Garzik 
1893c6fd2807SJeff Garzik 	/*
1894c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1895c6fd2807SJeff Garzik 	 */
1896c6fd2807SJeff Garzik 
1897c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1898c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1899c6fd2807SJeff Garzik 
1900c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1901c6fd2807SJeff Garzik 		ata_dump_id(id);
1902c6fd2807SJeff Garzik 
1903ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1904ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1905ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1906ef143d57SAlbert Lee 
1907ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1908ef143d57SAlbert Lee 			sizeof(modelbuf));
1909ef143d57SAlbert Lee 
1910c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1911c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1912b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1913b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
191444877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
191544877b4eSTejun Heo 					       "supports DRM functions and may "
191644877b4eSTejun Heo 					       "not be fully accessable.\n");
1917b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
1918b352e57dSAlan Cox 		}
1919b352e57dSAlan Cox 		else
1920b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1921b352e57dSAlan Cox 
1922c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1923c6fd2807SJeff Garzik 
19243f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19253f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19263f64f565SEric D. Mudama 
1927c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1928c6fd2807SJeff Garzik 			const char *lba_desc;
1929c6fd2807SJeff Garzik 			char ncq_desc[20];
1930c6fd2807SJeff Garzik 
1931c6fd2807SJeff Garzik 			lba_desc = "LBA";
1932c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1933c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1934c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1935c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19366fc49adbSTejun Heo 
19376fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19386fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19396fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1940c6fd2807SJeff Garzik 			}
1941c6fd2807SJeff Garzik 
19421e999736SAlan Cox 			if (ata_id_hpa_enabled(dev->id))
19431e999736SAlan Cox 				dev->n_sectors = ata_hpa_resize(dev);
19441e999736SAlan Cox 
1945c6fd2807SJeff Garzik 			/* config NCQ */
1946c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1947c6fd2807SJeff Garzik 
1948c6fd2807SJeff Garzik 			/* print device info to dmesg */
19493f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
19503f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19513f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19523f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
19533f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19543f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19553f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
1956c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
19573f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
19583f64f565SEric D. Mudama 			}
1959c6fd2807SJeff Garzik 		} else {
1960c6fd2807SJeff Garzik 			/* CHS */
1961c6fd2807SJeff Garzik 
1962c6fd2807SJeff Garzik 			/* Default translation */
1963c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
1964c6fd2807SJeff Garzik 			dev->heads	= id[3];
1965c6fd2807SJeff Garzik 			dev->sectors	= id[6];
1966c6fd2807SJeff Garzik 
1967c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
1968c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
1969c6fd2807SJeff Garzik 				dev->cylinders = id[54];
1970c6fd2807SJeff Garzik 				dev->heads     = id[55];
1971c6fd2807SJeff Garzik 				dev->sectors   = id[56];
1972c6fd2807SJeff Garzik 			}
1973c6fd2807SJeff Garzik 
1974c6fd2807SJeff Garzik 			/* print device info to dmesg */
19753f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
1976c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
19773f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19783f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
19793f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19803f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19813f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
19823f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
19833f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
19843f64f565SEric D. Mudama 					dev->heads, dev->sectors);
19853f64f565SEric D. Mudama 			}
1986c6fd2807SJeff Garzik 		}
1987c6fd2807SJeff Garzik 
1988c6fd2807SJeff Garzik 		dev->cdb_len = 16;
1989c6fd2807SJeff Garzik 	}
1990c6fd2807SJeff Garzik 
1991c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
1992c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
1993c6fd2807SJeff Garzik 		char *cdb_intr_string = "";
1994c6fd2807SJeff Garzik 
1995c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
1996c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1997c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1998c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1999c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2000c6fd2807SJeff Garzik 			rc = -EINVAL;
2001c6fd2807SJeff Garzik 			goto err_out_nosup;
2002c6fd2807SJeff Garzik 		}
2003c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2004c6fd2807SJeff Garzik 
2005c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2006c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2007c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2008c6fd2807SJeff Garzik 		}
2009c6fd2807SJeff Garzik 
2010c6fd2807SJeff Garzik 		/* print device info to dmesg */
2011c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2012ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2013ef143d57SAlbert Lee 				       "ATAPI: %s, %s, max %s%s\n",
2014ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2015c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2016c6fd2807SJeff Garzik 				       cdb_intr_string);
2017c6fd2807SJeff Garzik 	}
2018c6fd2807SJeff Garzik 
2019914ed354STejun Heo 	/* determine max_sectors */
2020914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2021914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2022914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2023914ed354STejun Heo 
202493590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
202593590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
202693590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
202793590859SAlan Cox 		   idiot */
202893590859SAlan Cox                 if (print_info) {
202993590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
203093590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
203193590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
203293590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
203393590859SAlan Cox 		}
203493590859SAlan Cox 	}
203593590859SAlan Cox 
2036c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2037c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2038c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2039c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2040c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2041c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2042c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2043c6fd2807SJeff Garzik 	}
2044c6fd2807SJeff Garzik 
204518d6e9d5SAlbert Lee 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
204603ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
204703ec52deSTejun Heo 					 dev->max_sectors);
204818d6e9d5SAlbert Lee 
20496f23a31dSAlbert Lee 	/* limit ATAPI DMA to R/W commands only */
20506f23a31dSAlbert Lee 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
20516f23a31dSAlbert Lee 		dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
20526f23a31dSAlbert Lee 
2053c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2054cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2055c6fd2807SJeff Garzik 
2056c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2057c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2058c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2059c6fd2807SJeff Garzik 	return 0;
2060c6fd2807SJeff Garzik 
2061c6fd2807SJeff Garzik err_out_nosup:
2062c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2063c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2064c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2065c6fd2807SJeff Garzik 	return rc;
2066c6fd2807SJeff Garzik }
2067c6fd2807SJeff Garzik 
2068c6fd2807SJeff Garzik /**
20692e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2070be0d18dfSAlan Cox  *	@ap: port
2071be0d18dfSAlan Cox  *
20722e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2073be0d18dfSAlan Cox  *	detection.
2074be0d18dfSAlan Cox  */
2075be0d18dfSAlan Cox 
2076be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2077be0d18dfSAlan Cox {
2078be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2079be0d18dfSAlan Cox }
2080be0d18dfSAlan Cox 
2081be0d18dfSAlan Cox /**
20822e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2083be0d18dfSAlan Cox  *	@ap: port
2084be0d18dfSAlan Cox  *
20852e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2086be0d18dfSAlan Cox  *	detection.
2087be0d18dfSAlan Cox  */
2088be0d18dfSAlan Cox 
2089be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2090be0d18dfSAlan Cox {
2091be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2092be0d18dfSAlan Cox }
2093be0d18dfSAlan Cox 
2094be0d18dfSAlan Cox /**
2095be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2096be0d18dfSAlan Cox  *	@ap: port
2097be0d18dfSAlan Cox  *
2098be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2099be0d18dfSAlan Cox  */
2100be0d18dfSAlan Cox 
2101be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2102be0d18dfSAlan Cox {
2103be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2104be0d18dfSAlan Cox }
2105be0d18dfSAlan Cox 
2106be0d18dfSAlan Cox /**
2107be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2108be0d18dfSAlan Cox  *	@ap: port
2109be0d18dfSAlan Cox  *
2110be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2111be0d18dfSAlan Cox  */
2112be0d18dfSAlan Cox 
2113be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2114be0d18dfSAlan Cox {
2115be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2116be0d18dfSAlan Cox }
2117be0d18dfSAlan Cox 
2118be0d18dfSAlan Cox /**
2119c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2120c6fd2807SJeff Garzik  *	@ap: Bus to probe
2121c6fd2807SJeff Garzik  *
2122c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2123c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2124c6fd2807SJeff Garzik  *	the bus.
2125c6fd2807SJeff Garzik  *
2126c6fd2807SJeff Garzik  *	LOCKING:
2127c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2128c6fd2807SJeff Garzik  *
2129c6fd2807SJeff Garzik  *	RETURNS:
2130c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2131c6fd2807SJeff Garzik  */
2132c6fd2807SJeff Garzik 
2133c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2134c6fd2807SJeff Garzik {
2135c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2136c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
21374ae72a1eSTejun Heo 	int i, rc;
2138c6fd2807SJeff Garzik 	struct ata_device *dev;
2139c6fd2807SJeff Garzik 
2140c6fd2807SJeff Garzik 	ata_port_probe(ap);
2141c6fd2807SJeff Garzik 
2142c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2143c6fd2807SJeff Garzik 		tries[i] = ATA_PROBE_MAX_TRIES;
2144c6fd2807SJeff Garzik 
2145c6fd2807SJeff Garzik  retry:
2146c6fd2807SJeff Garzik 	/* reset and determine device classes */
2147c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2148c6fd2807SJeff Garzik 
2149c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2150c6fd2807SJeff Garzik 		dev = &ap->device[i];
2151c6fd2807SJeff Garzik 
2152c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2153c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2154c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2155c6fd2807SJeff Garzik 		else
2156c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2157c6fd2807SJeff Garzik 
2158c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2159c6fd2807SJeff Garzik 	}
2160c6fd2807SJeff Garzik 
2161c6fd2807SJeff Garzik 	ata_port_probe(ap);
2162c6fd2807SJeff Garzik 
2163c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2164c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2165c6fd2807SJeff Garzik 
2166c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2167c6fd2807SJeff Garzik 		ap->device[i].pio_mode = XFER_PIO_0;
2168c6fd2807SJeff Garzik 
2169f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2170f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2171f31f0cc2SJeff Garzik 	   the slave device */
2172f31f0cc2SJeff Garzik 
2173f31f0cc2SJeff Garzik 	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2174c6fd2807SJeff Garzik 		dev = &ap->device[i];
2175c6fd2807SJeff Garzik 
2176c6fd2807SJeff Garzik 		if (tries[i])
2177c6fd2807SJeff Garzik 			dev->class = classes[i];
2178c6fd2807SJeff Garzik 
2179c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2180c6fd2807SJeff Garzik 			continue;
2181c6fd2807SJeff Garzik 
2182bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2183bff04647STejun Heo 				     dev->id);
2184c6fd2807SJeff Garzik 		if (rc)
2185c6fd2807SJeff Garzik 			goto fail;
2186f31f0cc2SJeff Garzik 	}
2187f31f0cc2SJeff Garzik 
2188be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2189be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2190be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2191be0d18dfSAlan Cox 
2192f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2193f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2194f31f0cc2SJeff Garzik 
2195f31f0cc2SJeff Garzik 	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2196f31f0cc2SJeff Garzik 		dev = &ap->device[i];
2197f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2198f31f0cc2SJeff Garzik 			continue;
2199c6fd2807SJeff Garzik 
2200efdaedc4STejun Heo 		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2201efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
2202efdaedc4STejun Heo 		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2203c6fd2807SJeff Garzik 		if (rc)
2204c6fd2807SJeff Garzik 			goto fail;
2205c6fd2807SJeff Garzik 	}
2206c6fd2807SJeff Garzik 
2207c6fd2807SJeff Garzik 	/* configure transfer mode */
2208c6fd2807SJeff Garzik 	rc = ata_set_mode(ap, &dev);
22094ae72a1eSTejun Heo 	if (rc)
2210c6fd2807SJeff Garzik 		goto fail;
2211c6fd2807SJeff Garzik 
2212c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2213c6fd2807SJeff Garzik 		if (ata_dev_enabled(&ap->device[i]))
2214c6fd2807SJeff Garzik 			return 0;
2215c6fd2807SJeff Garzik 
2216c6fd2807SJeff Garzik 	/* no device present, disable port */
2217c6fd2807SJeff Garzik 	ata_port_disable(ap);
2218c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
2219c6fd2807SJeff Garzik 	return -ENODEV;
2220c6fd2807SJeff Garzik 
2221c6fd2807SJeff Garzik  fail:
22224ae72a1eSTejun Heo 	tries[dev->devno]--;
22234ae72a1eSTejun Heo 
2224c6fd2807SJeff Garzik 	switch (rc) {
2225c6fd2807SJeff Garzik 	case -EINVAL:
22264ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2227c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2228c6fd2807SJeff Garzik 		break;
22294ae72a1eSTejun Heo 
22304ae72a1eSTejun Heo 	case -ENODEV:
22314ae72a1eSTejun Heo 		/* give it just one more chance */
22324ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2233c6fd2807SJeff Garzik 	case -EIO:
22344ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
22354ae72a1eSTejun Heo 			/* This is the last chance, better to slow
22364ae72a1eSTejun Heo 			 * down than lose it.
22374ae72a1eSTejun Heo 			 */
2238c6fd2807SJeff Garzik 			sata_down_spd_limit(ap);
22394ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
22404ae72a1eSTejun Heo 		}
2241c6fd2807SJeff Garzik 	}
2242c6fd2807SJeff Garzik 
22434ae72a1eSTejun Heo 	if (!tries[dev->devno])
2244c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2245c6fd2807SJeff Garzik 
2246c6fd2807SJeff Garzik 	goto retry;
2247c6fd2807SJeff Garzik }
2248c6fd2807SJeff Garzik 
2249c6fd2807SJeff Garzik /**
2250c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2251c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2252c6fd2807SJeff Garzik  *
2253c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2254c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2255c6fd2807SJeff Garzik  *
2256cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2257c6fd2807SJeff Garzik  *	serialization.
2258c6fd2807SJeff Garzik  */
2259c6fd2807SJeff Garzik 
2260c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2261c6fd2807SJeff Garzik {
2262c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2263c6fd2807SJeff Garzik }
2264c6fd2807SJeff Garzik 
2265c6fd2807SJeff Garzik /**
2266c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2267c6fd2807SJeff Garzik  *	@ap: SATA port to printk link status about
2268c6fd2807SJeff Garzik  *
2269c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2270c6fd2807SJeff Garzik  *
2271c6fd2807SJeff Garzik  *	LOCKING:
2272c6fd2807SJeff Garzik  *	None.
2273c6fd2807SJeff Garzik  */
227443727fbcSJeff Garzik void sata_print_link_status(struct ata_port *ap)
2275c6fd2807SJeff Garzik {
2276c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2277c6fd2807SJeff Garzik 
2278c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2279c6fd2807SJeff Garzik 		return;
2280c6fd2807SJeff Garzik 	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2281c6fd2807SJeff Garzik 
2282c6fd2807SJeff Garzik 	if (ata_port_online(ap)) {
2283c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2284c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2285c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2286c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2287c6fd2807SJeff Garzik 	} else {
2288c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2289c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2290c6fd2807SJeff Garzik 				sstatus, scontrol);
2291c6fd2807SJeff Garzik 	}
2292c6fd2807SJeff Garzik }
2293c6fd2807SJeff Garzik 
2294c6fd2807SJeff Garzik /**
2295c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2296c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2297c6fd2807SJeff Garzik  *
2298c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2299c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2300c6fd2807SJeff Garzik  *	clear any reset condition.
2301c6fd2807SJeff Garzik  *
2302c6fd2807SJeff Garzik  *	LOCKING:
2303c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2304c6fd2807SJeff Garzik  *
2305c6fd2807SJeff Garzik  */
2306c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2307c6fd2807SJeff Garzik {
2308c6fd2807SJeff Garzik 	u32 sstatus;
2309c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2310c6fd2807SJeff Garzik 
2311c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2312c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2313c6fd2807SJeff Garzik 		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2314c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2315c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2316c6fd2807SJeff Garzik 		mdelay(1);
2317c6fd2807SJeff Garzik 	}
2318c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2319c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2320c6fd2807SJeff Garzik 
2321c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2322c6fd2807SJeff Garzik 	do {
2323c6fd2807SJeff Garzik 		msleep(200);
2324c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2325c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2326c6fd2807SJeff Garzik 			break;
2327c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2328c6fd2807SJeff Garzik 
2329c6fd2807SJeff Garzik 	/* print link status */
2330c6fd2807SJeff Garzik 	sata_print_link_status(ap);
2331c6fd2807SJeff Garzik 
2332c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2333c6fd2807SJeff Garzik 	if (!ata_port_offline(ap))
2334c6fd2807SJeff Garzik 		ata_port_probe(ap);
2335c6fd2807SJeff Garzik 	else
2336c6fd2807SJeff Garzik 		ata_port_disable(ap);
2337c6fd2807SJeff Garzik 
2338c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2339c6fd2807SJeff Garzik 		return;
2340c6fd2807SJeff Garzik 
2341c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2342c6fd2807SJeff Garzik 		ata_port_disable(ap);
2343c6fd2807SJeff Garzik 		return;
2344c6fd2807SJeff Garzik 	}
2345c6fd2807SJeff Garzik 
2346c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2347c6fd2807SJeff Garzik }
2348c6fd2807SJeff Garzik 
2349c6fd2807SJeff Garzik /**
2350c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2351c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2352c6fd2807SJeff Garzik  *
2353c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2354c6fd2807SJeff Garzik  *	the bus for devices.
2355c6fd2807SJeff Garzik  *
2356c6fd2807SJeff Garzik  *	LOCKING:
2357c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2358c6fd2807SJeff Garzik  *
2359c6fd2807SJeff Garzik  */
2360c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2361c6fd2807SJeff Garzik {
2362c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2363c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2364c6fd2807SJeff Garzik 		return;
2365c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2366c6fd2807SJeff Garzik }
2367c6fd2807SJeff Garzik 
2368c6fd2807SJeff Garzik /**
2369c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2370c6fd2807SJeff Garzik  *	@adev: device
2371c6fd2807SJeff Garzik  *
2372c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2373c6fd2807SJeff Garzik  *	present NULL is returned
2374c6fd2807SJeff Garzik  */
2375c6fd2807SJeff Garzik 
2376c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2377c6fd2807SJeff Garzik {
2378c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
2379c6fd2807SJeff Garzik 	struct ata_device *pair = &ap->device[1 - adev->devno];
2380c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2381c6fd2807SJeff Garzik 		return NULL;
2382c6fd2807SJeff Garzik 	return pair;
2383c6fd2807SJeff Garzik }
2384c6fd2807SJeff Garzik 
2385c6fd2807SJeff Garzik /**
2386c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2387c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2388c6fd2807SJeff Garzik  *
2389c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2390c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2391c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2392c6fd2807SJeff Garzik  *	on this port.
2393c6fd2807SJeff Garzik  *
2394cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2395c6fd2807SJeff Garzik  *	serialization.
2396c6fd2807SJeff Garzik  */
2397c6fd2807SJeff Garzik 
2398c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2399c6fd2807SJeff Garzik {
2400c6fd2807SJeff Garzik 	ap->device[0].class = ATA_DEV_NONE;
2401c6fd2807SJeff Garzik 	ap->device[1].class = ATA_DEV_NONE;
2402c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2403c6fd2807SJeff Garzik }
2404c6fd2807SJeff Garzik 
2405c6fd2807SJeff Garzik /**
2406c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2407c6fd2807SJeff Garzik  *	@ap: Port to adjust SATA spd limit for
2408c6fd2807SJeff Garzik  *
2409c6fd2807SJeff Garzik  *	Adjust SATA spd limit of @ap downward.  Note that this
2410c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2411c6fd2807SJeff Garzik  *	using sata_set_spd().
2412c6fd2807SJeff Garzik  *
2413c6fd2807SJeff Garzik  *	LOCKING:
2414c6fd2807SJeff Garzik  *	Inherited from caller.
2415c6fd2807SJeff Garzik  *
2416c6fd2807SJeff Garzik  *	RETURNS:
2417c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2418c6fd2807SJeff Garzik  */
2419c6fd2807SJeff Garzik int sata_down_spd_limit(struct ata_port *ap)
2420c6fd2807SJeff Garzik {
2421c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2422c6fd2807SJeff Garzik 	int rc, highbit;
2423c6fd2807SJeff Garzik 
2424c6fd2807SJeff Garzik 	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2425c6fd2807SJeff Garzik 	if (rc)
2426c6fd2807SJeff Garzik 		return rc;
2427c6fd2807SJeff Garzik 
2428c6fd2807SJeff Garzik 	mask = ap->sata_spd_limit;
2429c6fd2807SJeff Garzik 	if (mask <= 1)
2430c6fd2807SJeff Garzik 		return -EINVAL;
2431c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2432c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2433c6fd2807SJeff Garzik 
2434c6fd2807SJeff Garzik 	spd = (sstatus >> 4) & 0xf;
2435c6fd2807SJeff Garzik 	if (spd <= 1)
2436c6fd2807SJeff Garzik 		return -EINVAL;
2437c6fd2807SJeff Garzik 	spd--;
2438c6fd2807SJeff Garzik 	mask &= (1 << spd) - 1;
2439c6fd2807SJeff Garzik 	if (!mask)
2440c6fd2807SJeff Garzik 		return -EINVAL;
2441c6fd2807SJeff Garzik 
2442c6fd2807SJeff Garzik 	ap->sata_spd_limit = mask;
2443c6fd2807SJeff Garzik 
2444c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2445c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2446c6fd2807SJeff Garzik 
2447c6fd2807SJeff Garzik 	return 0;
2448c6fd2807SJeff Garzik }
2449c6fd2807SJeff Garzik 
2450c6fd2807SJeff Garzik static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2451c6fd2807SJeff Garzik {
2452c6fd2807SJeff Garzik 	u32 spd, limit;
2453c6fd2807SJeff Garzik 
2454c6fd2807SJeff Garzik 	if (ap->sata_spd_limit == UINT_MAX)
2455c6fd2807SJeff Garzik 		limit = 0;
2456c6fd2807SJeff Garzik 	else
2457c6fd2807SJeff Garzik 		limit = fls(ap->sata_spd_limit);
2458c6fd2807SJeff Garzik 
2459c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2460c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2461c6fd2807SJeff Garzik 
2462c6fd2807SJeff Garzik 	return spd != limit;
2463c6fd2807SJeff Garzik }
2464c6fd2807SJeff Garzik 
2465c6fd2807SJeff Garzik /**
2466c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2467c6fd2807SJeff Garzik  *	@ap: Port in question
2468c6fd2807SJeff Garzik  *
2469c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2470c6fd2807SJeff Garzik  *	@ap->sata_spd_limit.  This function is used to determine
2471c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2472c6fd2807SJeff Garzik  *	configuration.
2473c6fd2807SJeff Garzik  *
2474c6fd2807SJeff Garzik  *	LOCKING:
2475c6fd2807SJeff Garzik  *	Inherited from caller.
2476c6fd2807SJeff Garzik  *
2477c6fd2807SJeff Garzik  *	RETURNS:
2478c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2479c6fd2807SJeff Garzik  */
2480c6fd2807SJeff Garzik int sata_set_spd_needed(struct ata_port *ap)
2481c6fd2807SJeff Garzik {
2482c6fd2807SJeff Garzik 	u32 scontrol;
2483c6fd2807SJeff Garzik 
2484c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2485c6fd2807SJeff Garzik 		return 0;
2486c6fd2807SJeff Garzik 
2487c6fd2807SJeff Garzik 	return __sata_set_spd_needed(ap, &scontrol);
2488c6fd2807SJeff Garzik }
2489c6fd2807SJeff Garzik 
2490c6fd2807SJeff Garzik /**
2491c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2492c6fd2807SJeff Garzik  *	@ap: Port to set SATA spd for
2493c6fd2807SJeff Garzik  *
2494c6fd2807SJeff Garzik  *	Set SATA spd of @ap according to sata_spd_limit.
2495c6fd2807SJeff Garzik  *
2496c6fd2807SJeff Garzik  *	LOCKING:
2497c6fd2807SJeff Garzik  *	Inherited from caller.
2498c6fd2807SJeff Garzik  *
2499c6fd2807SJeff Garzik  *	RETURNS:
2500c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2501c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2502c6fd2807SJeff Garzik  */
2503c6fd2807SJeff Garzik int sata_set_spd(struct ata_port *ap)
2504c6fd2807SJeff Garzik {
2505c6fd2807SJeff Garzik 	u32 scontrol;
2506c6fd2807SJeff Garzik 	int rc;
2507c6fd2807SJeff Garzik 
2508c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2509c6fd2807SJeff Garzik 		return rc;
2510c6fd2807SJeff Garzik 
2511c6fd2807SJeff Garzik 	if (!__sata_set_spd_needed(ap, &scontrol))
2512c6fd2807SJeff Garzik 		return 0;
2513c6fd2807SJeff Garzik 
2514c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2515c6fd2807SJeff Garzik 		return rc;
2516c6fd2807SJeff Garzik 
2517c6fd2807SJeff Garzik 	return 1;
2518c6fd2807SJeff Garzik }
2519c6fd2807SJeff Garzik 
2520c6fd2807SJeff Garzik /*
2521c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2522c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2523c6fd2807SJeff Garzik  */
2524c6fd2807SJeff Garzik /*
2525b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2526c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2527b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2528b352e57dSAlan Cox  *
2529b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2530c6fd2807SJeff Garzik  */
2531c6fd2807SJeff Garzik 
2532c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2533c6fd2807SJeff Garzik 
2534c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2535c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2536c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2537c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2538c6fd2807SJeff Garzik 
2539b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2540b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2541c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2542c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2543c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2544c6fd2807SJeff Garzik 
2545c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2546c6fd2807SJeff Garzik 
2547c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2548c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2549c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2550c6fd2807SJeff Garzik 
2551c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2552c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2553c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2554c6fd2807SJeff Garzik 
2555b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2556b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2557c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2558c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2559c6fd2807SJeff Garzik 
2560c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2561c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2562c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2563c6fd2807SJeff Garzik 
2564c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2565c6fd2807SJeff Garzik 
2566c6fd2807SJeff Garzik 	{ 0xFF }
2567c6fd2807SJeff Garzik };
2568c6fd2807SJeff Garzik 
2569c6fd2807SJeff Garzik #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2570c6fd2807SJeff Garzik #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2571c6fd2807SJeff Garzik 
2572c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2573c6fd2807SJeff Garzik {
2574c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2575c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2576c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2577c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2578c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2579c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2580c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2581c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2582c6fd2807SJeff Garzik }
2583c6fd2807SJeff Garzik 
2584c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2585c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2586c6fd2807SJeff Garzik {
2587c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2588c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2589c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2590c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2591c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2592c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2593c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2594c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2595c6fd2807SJeff Garzik }
2596c6fd2807SJeff Garzik 
2597c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2598c6fd2807SJeff Garzik {
2599c6fd2807SJeff Garzik 	const struct ata_timing *t;
2600c6fd2807SJeff Garzik 
2601c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2602c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2603c6fd2807SJeff Garzik 			return NULL;
2604c6fd2807SJeff Garzik 	return t;
2605c6fd2807SJeff Garzik }
2606c6fd2807SJeff Garzik 
2607c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2608c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2609c6fd2807SJeff Garzik {
2610c6fd2807SJeff Garzik 	const struct ata_timing *s;
2611c6fd2807SJeff Garzik 	struct ata_timing p;
2612c6fd2807SJeff Garzik 
2613c6fd2807SJeff Garzik 	/*
2614c6fd2807SJeff Garzik 	 * Find the mode.
2615c6fd2807SJeff Garzik 	 */
2616c6fd2807SJeff Garzik 
2617c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2618c6fd2807SJeff Garzik 		return -EINVAL;
2619c6fd2807SJeff Garzik 
2620c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2621c6fd2807SJeff Garzik 
2622c6fd2807SJeff Garzik 	/*
2623c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2624c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2625c6fd2807SJeff Garzik 	 */
2626c6fd2807SJeff Garzik 
2627c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2628c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2629c6fd2807SJeff Garzik 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2630c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2631c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2632c6fd2807SJeff Garzik 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2633c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2634c6fd2807SJeff Garzik 		}
2635c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2636c6fd2807SJeff Garzik 	}
2637c6fd2807SJeff Garzik 
2638c6fd2807SJeff Garzik 	/*
2639c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2640c6fd2807SJeff Garzik 	 */
2641c6fd2807SJeff Garzik 
2642c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2643c6fd2807SJeff Garzik 
2644c6fd2807SJeff Garzik 	/*
2645c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2646c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2647c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2648c6fd2807SJeff Garzik 	 */
2649c6fd2807SJeff Garzik 
2650fd3367afSAlan 	if (speed > XFER_PIO_6) {
2651c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2652c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2653c6fd2807SJeff Garzik 	}
2654c6fd2807SJeff Garzik 
2655c6fd2807SJeff Garzik 	/*
2656c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2657c6fd2807SJeff Garzik 	 */
2658c6fd2807SJeff Garzik 
2659c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2660c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2661c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2662c6fd2807SJeff Garzik 	}
2663c6fd2807SJeff Garzik 
2664c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2665c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2666c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2667c6fd2807SJeff Garzik 	}
26684f701d1eSAlan Cox 
26694f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
26704f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
26714f701d1eSAlan Cox 	   if so we must correct this */
26724f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
26734f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2674c6fd2807SJeff Garzik 
2675c6fd2807SJeff Garzik 	return 0;
2676c6fd2807SJeff Garzik }
2677c6fd2807SJeff Garzik 
2678c6fd2807SJeff Garzik /**
2679c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2680c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2681458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2682c6fd2807SJeff Garzik  *
2683c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2684c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2685c6fd2807SJeff Garzik  *	will apply the limit.
2686c6fd2807SJeff Garzik  *
2687c6fd2807SJeff Garzik  *	LOCKING:
2688c6fd2807SJeff Garzik  *	Inherited from caller.
2689c6fd2807SJeff Garzik  *
2690c6fd2807SJeff Garzik  *	RETURNS:
2691c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2692c6fd2807SJeff Garzik  */
2693458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2694c6fd2807SJeff Garzik {
2695458337dbSTejun Heo 	char buf[32];
2696458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2697458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2698458337dbSTejun Heo 	int quiet, highbit;
2699c6fd2807SJeff Garzik 
2700458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2701458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2702458337dbSTejun Heo 
2703458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2704458337dbSTejun Heo 						  dev->mwdma_mask,
2705c6fd2807SJeff Garzik 						  dev->udma_mask);
2706458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2707c6fd2807SJeff Garzik 
2708458337dbSTejun Heo 	switch (sel) {
2709458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2710458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2711458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2712458337dbSTejun Heo 		break;
2713458337dbSTejun Heo 
2714458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2715458337dbSTejun Heo 		if (udma_mask) {
2716458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2717458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2718458337dbSTejun Heo 			if (!udma_mask)
2719458337dbSTejun Heo 				return -ENOENT;
2720458337dbSTejun Heo 		} else if (mwdma_mask) {
2721458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2722458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2723458337dbSTejun Heo 			if (!mwdma_mask)
2724458337dbSTejun Heo 				return -ENOENT;
2725458337dbSTejun Heo 		}
2726458337dbSTejun Heo 		break;
2727458337dbSTejun Heo 
2728458337dbSTejun Heo 	case ATA_DNXFER_40C:
2729458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2730458337dbSTejun Heo 		break;
2731458337dbSTejun Heo 
2732458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2733458337dbSTejun Heo 		pio_mask &= 1;
2734458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2735458337dbSTejun Heo 		mwdma_mask = 0;
2736458337dbSTejun Heo 		udma_mask = 0;
2737458337dbSTejun Heo 		break;
2738458337dbSTejun Heo 
2739458337dbSTejun Heo 	default:
2740458337dbSTejun Heo 		BUG();
2741458337dbSTejun Heo 	}
2742458337dbSTejun Heo 
2743458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2744458337dbSTejun Heo 
2745458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2746458337dbSTejun Heo 		return -ENOENT;
2747458337dbSTejun Heo 
2748458337dbSTejun Heo 	if (!quiet) {
2749458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2750458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2751458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2752458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2753458337dbSTejun Heo 		else
2754458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2755458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2756458337dbSTejun Heo 
2757458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2758458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2759458337dbSTejun Heo 	}
2760c6fd2807SJeff Garzik 
2761c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2762c6fd2807SJeff Garzik 			    &dev->udma_mask);
2763c6fd2807SJeff Garzik 
2764c6fd2807SJeff Garzik 	return 0;
2765c6fd2807SJeff Garzik }
2766c6fd2807SJeff Garzik 
2767c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2768c6fd2807SJeff Garzik {
2769baa1e78aSTejun Heo 	struct ata_eh_context *ehc = &dev->ap->eh_context;
2770c6fd2807SJeff Garzik 	unsigned int err_mask;
2771c6fd2807SJeff Garzik 	int rc;
2772c6fd2807SJeff Garzik 
2773c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2774c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2775c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2776c6fd2807SJeff Garzik 
2777c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
277811750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
277911750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
278011750a40SAlan         	err_mask &= ~AC_ERR_DEV;
278111750a40SAlan 
2782c6fd2807SJeff Garzik 	if (err_mask) {
2783c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2784c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2785c6fd2807SJeff Garzik 		return -EIO;
2786c6fd2807SJeff Garzik 	}
2787c6fd2807SJeff Garzik 
2788baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2789c6fd2807SJeff Garzik 	rc = ata_dev_revalidate(dev, 0);
2790baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2791c6fd2807SJeff Garzik 	if (rc)
2792c6fd2807SJeff Garzik 		return rc;
2793c6fd2807SJeff Garzik 
2794c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2795c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2796c6fd2807SJeff Garzik 
2797c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2798c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2799c6fd2807SJeff Garzik 	return 0;
2800c6fd2807SJeff Garzik }
2801c6fd2807SJeff Garzik 
2802c6fd2807SJeff Garzik /**
280304351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2804c6fd2807SJeff Garzik  *	@ap: port on which timings will be programmed
2805c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2806c6fd2807SJeff Garzik  *
280704351821SAlan  *	Standard implementation of the function used to tune and set
280804351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
280904351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2810c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2811c6fd2807SJeff Garzik  *
2812c6fd2807SJeff Garzik  *	LOCKING:
2813c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2814c6fd2807SJeff Garzik  *
2815c6fd2807SJeff Garzik  *	RETURNS:
2816c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2817c6fd2807SJeff Garzik  */
281804351821SAlan 
281904351821SAlan int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2820c6fd2807SJeff Garzik {
2821c6fd2807SJeff Garzik 	struct ata_device *dev;
2822c6fd2807SJeff Garzik 	int i, rc = 0, used_dma = 0, found = 0;
2823c6fd2807SJeff Garzik 
2824c6fd2807SJeff Garzik 
2825c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2826c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2827c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2828c6fd2807SJeff Garzik 
2829c6fd2807SJeff Garzik 		dev = &ap->device[i];
2830c6fd2807SJeff Garzik 
2831c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2832c6fd2807SJeff Garzik 			continue;
2833c6fd2807SJeff Garzik 
2834c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2835c6fd2807SJeff Garzik 
2836c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2837c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2838c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2839c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2840c6fd2807SJeff Garzik 
2841c6fd2807SJeff Garzik 		found = 1;
2842c6fd2807SJeff Garzik 		if (dev->dma_mode)
2843c6fd2807SJeff Garzik 			used_dma = 1;
2844c6fd2807SJeff Garzik 	}
2845c6fd2807SJeff Garzik 	if (!found)
2846c6fd2807SJeff Garzik 		goto out;
2847c6fd2807SJeff Garzik 
2848c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2849c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2850c6fd2807SJeff Garzik 		dev = &ap->device[i];
2851c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2852c6fd2807SJeff Garzik 			continue;
2853c6fd2807SJeff Garzik 
2854c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2855c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2856c6fd2807SJeff Garzik 			rc = -EINVAL;
2857c6fd2807SJeff Garzik 			goto out;
2858c6fd2807SJeff Garzik 		}
2859c6fd2807SJeff Garzik 
2860c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2861c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2862c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2863c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2864c6fd2807SJeff Garzik 	}
2865c6fd2807SJeff Garzik 
2866c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2867c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2868c6fd2807SJeff Garzik 		dev = &ap->device[i];
2869c6fd2807SJeff Garzik 
2870c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2871c6fd2807SJeff Garzik 			continue;
2872c6fd2807SJeff Garzik 
2873c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2874c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2875c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2876c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2877c6fd2807SJeff Garzik 	}
2878c6fd2807SJeff Garzik 
2879c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2880c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2881c6fd2807SJeff Garzik 		dev = &ap->device[i];
2882c6fd2807SJeff Garzik 
288318d90debSAlan 		/* don't update suspended devices' xfer mode */
28849666f400STejun Heo 		if (!ata_dev_enabled(dev))
2885c6fd2807SJeff Garzik 			continue;
2886c6fd2807SJeff Garzik 
2887c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
2888c6fd2807SJeff Garzik 		if (rc)
2889c6fd2807SJeff Garzik 			goto out;
2890c6fd2807SJeff Garzik 	}
2891c6fd2807SJeff Garzik 
2892c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
2893c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
2894c6fd2807SJeff Garzik 	 */
2895cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2896032af1ceSAlan 		ap->host->simplex_claimed = ap;
2897c6fd2807SJeff Garzik 
2898c6fd2807SJeff Garzik  out:
2899c6fd2807SJeff Garzik 	if (rc)
2900c6fd2807SJeff Garzik 		*r_failed_dev = dev;
2901c6fd2807SJeff Garzik 	return rc;
2902c6fd2807SJeff Garzik }
2903c6fd2807SJeff Garzik 
2904c6fd2807SJeff Garzik /**
290504351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
290604351821SAlan  *	@ap: port on which timings will be programmed
290704351821SAlan  *	@r_failed_dev: out paramter for failed device
290804351821SAlan  *
290904351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
291004351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
291104351821SAlan  *	returned in @r_failed_dev.
291204351821SAlan  *
291304351821SAlan  *	LOCKING:
291404351821SAlan  *	PCI/etc. bus probe sem.
291504351821SAlan  *
291604351821SAlan  *	RETURNS:
291704351821SAlan  *	0 on success, negative errno otherwise
291804351821SAlan  */
291904351821SAlan int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
292004351821SAlan {
292104351821SAlan 	/* has private set_mode? */
292204351821SAlan 	if (ap->ops->set_mode)
292304351821SAlan 		return ap->ops->set_mode(ap, r_failed_dev);
292404351821SAlan 	return ata_do_set_mode(ap, r_failed_dev);
292504351821SAlan }
292604351821SAlan 
292704351821SAlan /**
2928c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
2929c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
2930c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
2931c6fd2807SJeff Garzik  *
2932c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
2933c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
2934c6fd2807SJeff Garzik  *	other threads.
2935c6fd2807SJeff Garzik  *
2936c6fd2807SJeff Garzik  *	LOCKING:
2937cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
2938c6fd2807SJeff Garzik  */
2939c6fd2807SJeff Garzik 
2940c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
2941c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
2942c6fd2807SJeff Garzik {
2943c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
2944c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
2945c6fd2807SJeff Garzik }
2946c6fd2807SJeff Garzik 
2947c6fd2807SJeff Garzik /**
2948c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
2949c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
2950c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
2951c6fd2807SJeff Garzik  *	@tmout: overall timeout
2952c6fd2807SJeff Garzik  *
2953c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
2954c6fd2807SJeff Garzik  *	or a timeout occurs.
2955c6fd2807SJeff Garzik  *
2956d1adc1bbSTejun Heo  *	LOCKING:
2957d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
2958d1adc1bbSTejun Heo  *
2959d1adc1bbSTejun Heo  *	RETURNS:
2960d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
2961c6fd2807SJeff Garzik  */
2962d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
2963c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
2964c6fd2807SJeff Garzik {
2965c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
2966c6fd2807SJeff Garzik 	u8 status;
2967c6fd2807SJeff Garzik 
2968c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
2969c6fd2807SJeff Garzik 	timer_start = jiffies;
2970c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
2971d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2972d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2973c6fd2807SJeff Garzik 		msleep(50);
2974c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
2975c6fd2807SJeff Garzik 	}
2976c6fd2807SJeff Garzik 
2977d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
2978c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
297935aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
298035aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
2981c6fd2807SJeff Garzik 
2982c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
2983d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2984d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2985c6fd2807SJeff Garzik 		msleep(50);
2986c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
2987c6fd2807SJeff Garzik 	}
2988c6fd2807SJeff Garzik 
2989d1adc1bbSTejun Heo 	if (status == 0xff)
2990d1adc1bbSTejun Heo 		return -ENODEV;
2991d1adc1bbSTejun Heo 
2992c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
2993c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
299435aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
299535aa7a43SJeff Garzik 				tmout / HZ, status);
2996d1adc1bbSTejun Heo 		return -EBUSY;
2997c6fd2807SJeff Garzik 	}
2998c6fd2807SJeff Garzik 
2999c6fd2807SJeff Garzik 	return 0;
3000c6fd2807SJeff Garzik }
3001c6fd2807SJeff Garzik 
3002d4b2bab4STejun Heo /**
3003d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3004d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3005d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3006d4b2bab4STejun Heo  *
3007d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3008d4b2bab4STejun Heo  *	occurs.
3009d4b2bab4STejun Heo  *
3010d4b2bab4STejun Heo  *	LOCKING:
3011d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3012d4b2bab4STejun Heo  *
3013d4b2bab4STejun Heo  *	RETURNS:
3014d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3015d4b2bab4STejun Heo  */
3016d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3017d4b2bab4STejun Heo {
3018d4b2bab4STejun Heo 	unsigned long start = jiffies;
3019d4b2bab4STejun Heo 	int warned = 0;
3020d4b2bab4STejun Heo 
3021d4b2bab4STejun Heo 	while (1) {
3022d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3023d4b2bab4STejun Heo 		unsigned long now = jiffies;
3024d4b2bab4STejun Heo 
3025d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3026d4b2bab4STejun Heo 			return 0;
3027fd7fe701STejun Heo 		if (!ata_port_online(ap) && status == 0xff)
3028d4b2bab4STejun Heo 			return -ENODEV;
3029d4b2bab4STejun Heo 		if (time_after(now, deadline))
3030d4b2bab4STejun Heo 			return -EBUSY;
3031d4b2bab4STejun Heo 
3032d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3033d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3034d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3035d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3036d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3037d4b2bab4STejun Heo 			warned = 1;
3038d4b2bab4STejun Heo 		}
3039d4b2bab4STejun Heo 
3040d4b2bab4STejun Heo 		msleep(50);
3041d4b2bab4STejun Heo 	}
3042d4b2bab4STejun Heo }
3043d4b2bab4STejun Heo 
3044d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3045d4b2bab4STejun Heo 			      unsigned long deadline)
3046c6fd2807SJeff Garzik {
3047c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3048c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3049c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
30509b89391cSTejun Heo 	int rc, ret = 0;
3051c6fd2807SJeff Garzik 
3052c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3053c6fd2807SJeff Garzik 	 * BSY bit to clear
3054c6fd2807SJeff Garzik 	 */
3055d4b2bab4STejun Heo 	if (dev0) {
3056d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30579b89391cSTejun Heo 		if (rc) {
30589b89391cSTejun Heo 			if (rc != -ENODEV)
3059d4b2bab4STejun Heo 				return rc;
30609b89391cSTejun Heo 			ret = rc;
30619b89391cSTejun Heo 		}
3062d4b2bab4STejun Heo 	}
3063c6fd2807SJeff Garzik 
3064c6fd2807SJeff Garzik 	/* if device 1 was found in ata_devchk, wait for
3065c6fd2807SJeff Garzik 	 * register access, then wait for BSY to clear
3066c6fd2807SJeff Garzik 	 */
3067c6fd2807SJeff Garzik 	while (dev1) {
3068c6fd2807SJeff Garzik 		u8 nsect, lbal;
3069c6fd2807SJeff Garzik 
3070c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
30710d5ff566STejun Heo 		nsect = ioread8(ioaddr->nsect_addr);
30720d5ff566STejun Heo 		lbal = ioread8(ioaddr->lbal_addr);
3073c6fd2807SJeff Garzik 		if ((nsect == 1) && (lbal == 1))
3074c6fd2807SJeff Garzik 			break;
3075d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3076d4b2bab4STejun Heo 			return -EBUSY;
3077c6fd2807SJeff Garzik 		msleep(50);	/* give drive a breather */
3078c6fd2807SJeff Garzik 	}
3079d4b2bab4STejun Heo 	if (dev1) {
3080d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30819b89391cSTejun Heo 		if (rc) {
30829b89391cSTejun Heo 			if (rc != -ENODEV)
3083d4b2bab4STejun Heo 				return rc;
30849b89391cSTejun Heo 			ret = rc;
30859b89391cSTejun Heo 		}
3086d4b2bab4STejun Heo 	}
3087c6fd2807SJeff Garzik 
3088c6fd2807SJeff Garzik 	/* is all this really necessary? */
3089c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3090c6fd2807SJeff Garzik 	if (dev1)
3091c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3092c6fd2807SJeff Garzik 	if (dev0)
3093c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3094d4b2bab4STejun Heo 
30959b89391cSTejun Heo 	return ret;
3096c6fd2807SJeff Garzik }
3097c6fd2807SJeff Garzik 
3098d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3099d4b2bab4STejun Heo 			     unsigned long deadline)
3100c6fd2807SJeff Garzik {
3101c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3102c6fd2807SJeff Garzik 
310344877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3104c6fd2807SJeff Garzik 
3105c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
31060d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3107c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31080d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3109c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31100d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3111c6fd2807SJeff Garzik 
3112c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3113c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3114c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3115c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3116c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3117c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3118c6fd2807SJeff Garzik 	 * delay here as well.
3119c6fd2807SJeff Garzik 	 *
3120c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3121c6fd2807SJeff Garzik 	 */
3122c6fd2807SJeff Garzik 	msleep(150);
3123c6fd2807SJeff Garzik 
3124c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3125c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3126c6fd2807SJeff Garzik 	 * pulldown resistor.
3127c6fd2807SJeff Garzik 	 */
3128d1adc1bbSTejun Heo 	if (ata_check_status(ap) == 0xFF)
31299b89391cSTejun Heo 		return -ENODEV;
3130c6fd2807SJeff Garzik 
3131d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3132c6fd2807SJeff Garzik }
3133c6fd2807SJeff Garzik 
3134c6fd2807SJeff Garzik /**
3135c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3136c6fd2807SJeff Garzik  *	@ap: port to reset
3137c6fd2807SJeff Garzik  *
3138c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3139c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3140c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3141c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3142c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3143c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3144c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3145c6fd2807SJeff Garzik  *
3146c6fd2807SJeff Garzik  *	LOCKING:
3147c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3148cca3974eSJeff Garzik  *	Obtains host lock.
3149c6fd2807SJeff Garzik  *
3150c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3151c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3152c6fd2807SJeff Garzik  */
3153c6fd2807SJeff Garzik 
3154c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3155c6fd2807SJeff Garzik {
3156c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3157c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3158c6fd2807SJeff Garzik 	u8 err;
3159c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
31609b89391cSTejun Heo 	int rc;
3161c6fd2807SJeff Garzik 
316244877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3163c6fd2807SJeff Garzik 
3164c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3165c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3166c6fd2807SJeff Garzik 		dev0 = 1;
3167c6fd2807SJeff Garzik 	else {
3168c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3169c6fd2807SJeff Garzik 		if (slave_possible)
3170c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3171c6fd2807SJeff Garzik 	}
3172c6fd2807SJeff Garzik 
3173c6fd2807SJeff Garzik 	if (dev0)
3174c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3175c6fd2807SJeff Garzik 	if (dev1)
3176c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3177c6fd2807SJeff Garzik 
3178c6fd2807SJeff Garzik 	/* select device 0 again */
3179c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3180c6fd2807SJeff Garzik 
3181c6fd2807SJeff Garzik 	/* issue bus reset */
31829b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
31839b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
31849b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3185c6fd2807SJeff Garzik 			goto err_out;
31869b89391cSTejun Heo 	}
3187c6fd2807SJeff Garzik 
3188c6fd2807SJeff Garzik 	/*
3189c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3190c6fd2807SJeff Garzik 	 */
3191c6fd2807SJeff Garzik 	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3192c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
3193c6fd2807SJeff Garzik 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3194c6fd2807SJeff Garzik 
3195c6fd2807SJeff Garzik 	/* re-enable interrupts */
319683625006SAkira Iguchi 	ap->ops->irq_on(ap);
3197c6fd2807SJeff Garzik 
3198c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3199c6fd2807SJeff Garzik 	if (ap->device[1].class != ATA_DEV_NONE)
3200c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3201c6fd2807SJeff Garzik 	if (ap->device[0].class != ATA_DEV_NONE)
3202c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3203c6fd2807SJeff Garzik 
3204c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
3205c6fd2807SJeff Garzik 	if ((ap->device[0].class == ATA_DEV_NONE) &&
3206c6fd2807SJeff Garzik 	    (ap->device[1].class == ATA_DEV_NONE))
3207c6fd2807SJeff Garzik 		goto err_out;
3208c6fd2807SJeff Garzik 
3209c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3210c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
32110d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3212c6fd2807SJeff Garzik 	}
3213c6fd2807SJeff Garzik 
3214c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3215c6fd2807SJeff Garzik 	return;
3216c6fd2807SJeff Garzik 
3217c6fd2807SJeff Garzik err_out:
3218c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3219c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
3220c6fd2807SJeff Garzik 
3221c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3222c6fd2807SJeff Garzik }
3223c6fd2807SJeff Garzik 
3224c6fd2807SJeff Garzik /**
3225c6fd2807SJeff Garzik  *	sata_phy_debounce - debounce SATA phy status
3226c6fd2807SJeff Garzik  *	@ap: ATA port to debounce SATA phy status for
3227c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3228d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3229c6fd2807SJeff Garzik  *
3230c6fd2807SJeff Garzik  *	Make sure SStatus of @ap reaches stable state, determined by
3231c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3232c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3233d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3234d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3235c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3236c6fd2807SJeff Garzik  *
3237d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3238d4b2bab4STejun Heo  *	two is used.
3239d4b2bab4STejun Heo  *
3240c6fd2807SJeff Garzik  *	LOCKING:
3241c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3242c6fd2807SJeff Garzik  *
3243c6fd2807SJeff Garzik  *	RETURNS:
3244c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3245c6fd2807SJeff Garzik  */
3246d4b2bab4STejun Heo int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3247d4b2bab4STejun Heo 		      unsigned long deadline)
3248c6fd2807SJeff Garzik {
3249c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3250d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3251d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3252c6fd2807SJeff Garzik 	u32 last, cur;
3253c6fd2807SJeff Garzik 	int rc;
3254c6fd2807SJeff Garzik 
3255d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3256d4b2bab4STejun Heo 	if (time_before(t, deadline))
3257d4b2bab4STejun Heo 		deadline = t;
3258d4b2bab4STejun Heo 
3259c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3260c6fd2807SJeff Garzik 		return rc;
3261c6fd2807SJeff Garzik 	cur &= 0xf;
3262c6fd2807SJeff Garzik 
3263c6fd2807SJeff Garzik 	last = cur;
3264c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3265c6fd2807SJeff Garzik 
3266c6fd2807SJeff Garzik 	while (1) {
3267c6fd2807SJeff Garzik 		msleep(interval_msec);
3268c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3269c6fd2807SJeff Garzik 			return rc;
3270c6fd2807SJeff Garzik 		cur &= 0xf;
3271c6fd2807SJeff Garzik 
3272c6fd2807SJeff Garzik 		/* DET stable? */
3273c6fd2807SJeff Garzik 		if (cur == last) {
3274d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3275c6fd2807SJeff Garzik 				continue;
3276c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3277c6fd2807SJeff Garzik 				return 0;
3278c6fd2807SJeff Garzik 			continue;
3279c6fd2807SJeff Garzik 		}
3280c6fd2807SJeff Garzik 
3281c6fd2807SJeff Garzik 		/* unstable, start over */
3282c6fd2807SJeff Garzik 		last = cur;
3283c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3284c6fd2807SJeff Garzik 
3285d4b2bab4STejun Heo 		/* check deadline */
3286d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3287c6fd2807SJeff Garzik 			return -EBUSY;
3288c6fd2807SJeff Garzik 	}
3289c6fd2807SJeff Garzik }
3290c6fd2807SJeff Garzik 
3291c6fd2807SJeff Garzik /**
3292c6fd2807SJeff Garzik  *	sata_phy_resume - resume SATA phy
3293c6fd2807SJeff Garzik  *	@ap: ATA port to resume SATA phy for
3294c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3295d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3296c6fd2807SJeff Garzik  *
3297c6fd2807SJeff Garzik  *	Resume SATA phy of @ap and debounce it.
3298c6fd2807SJeff Garzik  *
3299c6fd2807SJeff Garzik  *	LOCKING:
3300c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3301c6fd2807SJeff Garzik  *
3302c6fd2807SJeff Garzik  *	RETURNS:
3303c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3304c6fd2807SJeff Garzik  */
3305d4b2bab4STejun Heo int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3306d4b2bab4STejun Heo 		    unsigned long deadline)
3307c6fd2807SJeff Garzik {
3308c6fd2807SJeff Garzik 	u32 scontrol;
3309c6fd2807SJeff Garzik 	int rc;
3310c6fd2807SJeff Garzik 
3311c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3312c6fd2807SJeff Garzik 		return rc;
3313c6fd2807SJeff Garzik 
3314c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3315c6fd2807SJeff Garzik 
3316c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3317c6fd2807SJeff Garzik 		return rc;
3318c6fd2807SJeff Garzik 
3319c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3320c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3321c6fd2807SJeff Garzik 	 */
3322c6fd2807SJeff Garzik 	msleep(200);
3323c6fd2807SJeff Garzik 
3324d4b2bab4STejun Heo 	return sata_phy_debounce(ap, params, deadline);
3325c6fd2807SJeff Garzik }
3326c6fd2807SJeff Garzik 
3327c6fd2807SJeff Garzik /**
3328c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3329c6fd2807SJeff Garzik  *	@ap: ATA port to be reset
3330d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3331c6fd2807SJeff Garzik  *
3332b8cffc6aSTejun Heo  *	@ap is about to be reset.  Initialize it.  Failure from
3333b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3334b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3335b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3336b8cffc6aSTejun Heo  *	should just whine, not fail.
3337c6fd2807SJeff Garzik  *
3338c6fd2807SJeff Garzik  *	LOCKING:
3339c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3340c6fd2807SJeff Garzik  *
3341c6fd2807SJeff Garzik  *	RETURNS:
3342c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3343c6fd2807SJeff Garzik  */
3344d4b2bab4STejun Heo int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3345c6fd2807SJeff Garzik {
3346c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
3347c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3348c6fd2807SJeff Garzik 	int rc;
3349c6fd2807SJeff Garzik 
335031daabdaSTejun Heo 	/* handle link resume */
3351c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3352c6fd2807SJeff Garzik 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3353c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3354c6fd2807SJeff Garzik 
3355c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3356c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3357c6fd2807SJeff Garzik 		return 0;
3358c6fd2807SJeff Garzik 
3359c6fd2807SJeff Garzik 	/* if SATA, resume phy */
3360c6fd2807SJeff Garzik 	if (ap->cbl == ATA_CBL_SATA) {
3361d4b2bab4STejun Heo 		rc = sata_phy_resume(ap, timing, deadline);
3362b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3363b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3364c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3365c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3366c6fd2807SJeff Garzik 	}
3367c6fd2807SJeff Garzik 
3368c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3369c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3370c6fd2807SJeff Garzik 	 */
3371b8cffc6aSTejun Heo 	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3372b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
33736dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3374b8cffc6aSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "device not ready "
3375b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3376b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3377b8cffc6aSTejun Heo 		}
3378b8cffc6aSTejun Heo 	}
3379c6fd2807SJeff Garzik 
3380c6fd2807SJeff Garzik 	return 0;
3381c6fd2807SJeff Garzik }
3382c6fd2807SJeff Garzik 
3383c6fd2807SJeff Garzik /**
3384c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3385c6fd2807SJeff Garzik  *	@ap: port to reset
3386c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3387d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3388c6fd2807SJeff Garzik  *
3389c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3390c6fd2807SJeff Garzik  *
3391c6fd2807SJeff Garzik  *	LOCKING:
3392c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3393c6fd2807SJeff Garzik  *
3394c6fd2807SJeff Garzik  *	RETURNS:
3395c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3396c6fd2807SJeff Garzik  */
3397d4b2bab4STejun Heo int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3398d4b2bab4STejun Heo 		      unsigned long deadline)
3399c6fd2807SJeff Garzik {
3400c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3401d4b2bab4STejun Heo 	unsigned int devmask = 0;
3402d4b2bab4STejun Heo 	int rc;
3403c6fd2807SJeff Garzik 	u8 err;
3404c6fd2807SJeff Garzik 
3405c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3406c6fd2807SJeff Garzik 
3407c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3408c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3409c6fd2807SJeff Garzik 		goto out;
3410c6fd2807SJeff Garzik 	}
3411c6fd2807SJeff Garzik 
3412c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3413c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3414c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3415c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3416c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3417c6fd2807SJeff Garzik 
3418c6fd2807SJeff Garzik 	/* select device 0 again */
3419c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3420c6fd2807SJeff Garzik 
3421c6fd2807SJeff Garzik 	/* issue bus reset */
3422c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3423d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
34249b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
34259b89391cSTejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3426d4b2bab4STejun Heo 		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3427d4b2bab4STejun Heo 		return rc;
3428c6fd2807SJeff Garzik 	}
3429c6fd2807SJeff Garzik 
3430c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
3431c6fd2807SJeff Garzik 	classes[0] = ata_dev_try_classify(ap, 0, &err);
3432c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
3433c6fd2807SJeff Garzik 		classes[1] = ata_dev_try_classify(ap, 1, &err);
3434c6fd2807SJeff Garzik 
3435c6fd2807SJeff Garzik  out:
3436c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3437c6fd2807SJeff Garzik 	return 0;
3438c6fd2807SJeff Garzik }
3439c6fd2807SJeff Garzik 
3440c6fd2807SJeff Garzik /**
3441b6103f6dSTejun Heo  *	sata_port_hardreset - reset port via SATA phy reset
3442c6fd2807SJeff Garzik  *	@ap: port to reset
3443b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3444d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3445c6fd2807SJeff Garzik  *
3446c6fd2807SJeff Garzik  *	SATA phy-reset host port using DET bits of SControl register.
3447c6fd2807SJeff Garzik  *
3448c6fd2807SJeff Garzik  *	LOCKING:
3449c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3450c6fd2807SJeff Garzik  *
3451c6fd2807SJeff Garzik  *	RETURNS:
3452c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3453c6fd2807SJeff Garzik  */
3454d4b2bab4STejun Heo int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3455d4b2bab4STejun Heo 			unsigned long deadline)
3456c6fd2807SJeff Garzik {
3457c6fd2807SJeff Garzik 	u32 scontrol;
3458c6fd2807SJeff Garzik 	int rc;
3459c6fd2807SJeff Garzik 
3460c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3461c6fd2807SJeff Garzik 
3462c6fd2807SJeff Garzik 	if (sata_set_spd_needed(ap)) {
3463c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3464c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3465c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3466c6fd2807SJeff Garzik 		 * and Sil3124.
3467c6fd2807SJeff Garzik 		 */
3468c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3469b6103f6dSTejun Heo 			goto out;
3470c6fd2807SJeff Garzik 
3471cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3472c6fd2807SJeff Garzik 
3473c6fd2807SJeff Garzik 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3474b6103f6dSTejun Heo 			goto out;
3475c6fd2807SJeff Garzik 
3476c6fd2807SJeff Garzik 		sata_set_spd(ap);
3477c6fd2807SJeff Garzik 	}
3478c6fd2807SJeff Garzik 
3479c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3480c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3481b6103f6dSTejun Heo 		goto out;
3482c6fd2807SJeff Garzik 
3483c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3484c6fd2807SJeff Garzik 
3485c6fd2807SJeff Garzik 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3486b6103f6dSTejun Heo 		goto out;
3487c6fd2807SJeff Garzik 
3488c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3489c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3490c6fd2807SJeff Garzik 	 */
3491c6fd2807SJeff Garzik 	msleep(1);
3492c6fd2807SJeff Garzik 
3493c6fd2807SJeff Garzik 	/* bring phy back */
3494d4b2bab4STejun Heo 	rc = sata_phy_resume(ap, timing, deadline);
3495b6103f6dSTejun Heo  out:
3496b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3497b6103f6dSTejun Heo 	return rc;
3498b6103f6dSTejun Heo }
3499b6103f6dSTejun Heo 
3500b6103f6dSTejun Heo /**
3501b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3502b6103f6dSTejun Heo  *	@ap: port to reset
3503b6103f6dSTejun Heo  *	@class: resulting class of attached device
3504d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3505b6103f6dSTejun Heo  *
3506b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3507b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3508b6103f6dSTejun Heo  *
3509b6103f6dSTejun Heo  *	LOCKING:
3510b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3511b6103f6dSTejun Heo  *
3512b6103f6dSTejun Heo  *	RETURNS:
3513b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3514b6103f6dSTejun Heo  */
3515d4b2bab4STejun Heo int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3516d4b2bab4STejun Heo 		       unsigned long deadline)
3517b6103f6dSTejun Heo {
3518b6103f6dSTejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3519b6103f6dSTejun Heo 	int rc;
3520b6103f6dSTejun Heo 
3521b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3522b6103f6dSTejun Heo 
3523b6103f6dSTejun Heo 	/* do hardreset */
3524d4b2bab4STejun Heo 	rc = sata_port_hardreset(ap, timing, deadline);
3525b6103f6dSTejun Heo 	if (rc) {
3526b6103f6dSTejun Heo 		ata_port_printk(ap, KERN_ERR,
3527b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3528b6103f6dSTejun Heo 		return rc;
3529b6103f6dSTejun Heo 	}
3530c6fd2807SJeff Garzik 
3531c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3532c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3533c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3534c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3535c6fd2807SJeff Garzik 		return 0;
3536c6fd2807SJeff Garzik 	}
3537c6fd2807SJeff Garzik 
353834fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
353934fee227STejun Heo 	msleep(150);
354034fee227STejun Heo 
3541d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
35429b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
35439b89391cSTejun Heo 	if (rc) {
3544c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR,
3545d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3546d4b2bab4STejun Heo 		return rc;
3547c6fd2807SJeff Garzik 	}
3548c6fd2807SJeff Garzik 
3549c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3550c6fd2807SJeff Garzik 
3551c6fd2807SJeff Garzik 	*class = ata_dev_try_classify(ap, 0, NULL);
3552c6fd2807SJeff Garzik 
3553c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3554c6fd2807SJeff Garzik 	return 0;
3555c6fd2807SJeff Garzik }
3556c6fd2807SJeff Garzik 
3557c6fd2807SJeff Garzik /**
3558c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3559c6fd2807SJeff Garzik  *	@ap: the target ata_port
3560c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3561c6fd2807SJeff Garzik  *
3562c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3563c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3564c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3565c6fd2807SJeff Garzik  *
3566c6fd2807SJeff Garzik  *	LOCKING:
3567c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3568c6fd2807SJeff Garzik  */
3569c6fd2807SJeff Garzik void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3570c6fd2807SJeff Garzik {
3571c6fd2807SJeff Garzik 	u32 serror;
3572c6fd2807SJeff Garzik 
3573c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3574c6fd2807SJeff Garzik 
3575c6fd2807SJeff Garzik 	/* print link status */
3576c6fd2807SJeff Garzik 	sata_print_link_status(ap);
3577c6fd2807SJeff Garzik 
3578c6fd2807SJeff Garzik 	/* clear SError */
3579c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3580c6fd2807SJeff Garzik 		sata_scr_write(ap, SCR_ERROR, serror);
3581c6fd2807SJeff Garzik 
3582c6fd2807SJeff Garzik 	/* re-enable interrupts */
358383625006SAkira Iguchi 	if (!ap->ops->error_handler)
358483625006SAkira Iguchi 		ap->ops->irq_on(ap);
3585c6fd2807SJeff Garzik 
3586c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3587c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3588c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3589c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3590c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3591c6fd2807SJeff Garzik 
3592c6fd2807SJeff Garzik 	/* bail out if no device is present */
3593c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3594c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3595c6fd2807SJeff Garzik 		return;
3596c6fd2807SJeff Garzik 	}
3597c6fd2807SJeff Garzik 
3598c6fd2807SJeff Garzik 	/* set up device control */
35990d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
36000d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3601c6fd2807SJeff Garzik 
3602c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3603c6fd2807SJeff Garzik }
3604c6fd2807SJeff Garzik 
3605c6fd2807SJeff Garzik /**
3606c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3607c6fd2807SJeff Garzik  *	@dev: device to compare against
3608c6fd2807SJeff Garzik  *	@new_class: class of the new device
3609c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3610c6fd2807SJeff Garzik  *
3611c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3612c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3613c6fd2807SJeff Garzik  *	@new_id.
3614c6fd2807SJeff Garzik  *
3615c6fd2807SJeff Garzik  *	LOCKING:
3616c6fd2807SJeff Garzik  *	None.
3617c6fd2807SJeff Garzik  *
3618c6fd2807SJeff Garzik  *	RETURNS:
3619c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3620c6fd2807SJeff Garzik  */
3621c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3622c6fd2807SJeff Garzik 			       const u16 *new_id)
3623c6fd2807SJeff Garzik {
3624c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3625a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3626a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3627c6fd2807SJeff Garzik 
3628c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3629c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3630c6fd2807SJeff Garzik 			       dev->class, new_class);
3631c6fd2807SJeff Garzik 		return 0;
3632c6fd2807SJeff Garzik 	}
3633c6fd2807SJeff Garzik 
3634a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3635a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3636a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3637a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3638c6fd2807SJeff Garzik 
3639c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3640c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3641c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3642c6fd2807SJeff Garzik 		return 0;
3643c6fd2807SJeff Garzik 	}
3644c6fd2807SJeff Garzik 
3645c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3646c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3647c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3648c6fd2807SJeff Garzik 		return 0;
3649c6fd2807SJeff Garzik 	}
3650c6fd2807SJeff Garzik 
3651c6fd2807SJeff Garzik 	return 1;
3652c6fd2807SJeff Garzik }
3653c6fd2807SJeff Garzik 
3654c6fd2807SJeff Garzik /**
3655fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
3656fe30911bSTejun Heo  *	@adev: target ATA device
3657bff04647STejun Heo  *	@readid_flags: read ID flags
3658c6fd2807SJeff Garzik  *
3659c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3660c6fd2807SJeff Garzik  *	the port.
3661c6fd2807SJeff Garzik  *
3662c6fd2807SJeff Garzik  *	LOCKING:
3663c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3664c6fd2807SJeff Garzik  *
3665c6fd2807SJeff Garzik  *	RETURNS:
3666c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3667c6fd2807SJeff Garzik  */
3668fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3669c6fd2807SJeff Garzik {
3670c6fd2807SJeff Garzik 	unsigned int class = dev->class;
3671c6fd2807SJeff Garzik 	u16 *id = (void *)dev->ap->sector_buf;
3672c6fd2807SJeff Garzik 	int rc;
3673c6fd2807SJeff Garzik 
3674c6fd2807SJeff Garzik 	/* read ID data */
3675bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3676c6fd2807SJeff Garzik 	if (rc)
3677fe30911bSTejun Heo 		return rc;
3678c6fd2807SJeff Garzik 
3679c6fd2807SJeff Garzik 	/* is the device still there? */
3680fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3681fe30911bSTejun Heo 		return -ENODEV;
3682c6fd2807SJeff Garzik 
3683c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3684fe30911bSTejun Heo 	return 0;
3685fe30911bSTejun Heo }
3686fe30911bSTejun Heo 
3687fe30911bSTejun Heo /**
3688fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3689fe30911bSTejun Heo  *	@dev: device to revalidate
3690fe30911bSTejun Heo  *	@readid_flags: read ID flags
3691fe30911bSTejun Heo  *
3692fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3693fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3694fe30911bSTejun Heo  *
3695fe30911bSTejun Heo  *	LOCKING:
3696fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3697fe30911bSTejun Heo  *
3698fe30911bSTejun Heo  *	RETURNS:
3699fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3700fe30911bSTejun Heo  */
3701fe30911bSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3702fe30911bSTejun Heo {
37036ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3704fe30911bSTejun Heo 	int rc;
3705fe30911bSTejun Heo 
3706fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3707fe30911bSTejun Heo 		return -ENODEV;
3708fe30911bSTejun Heo 
3709fe30911bSTejun Heo 	/* re-read ID */
3710fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3711fe30911bSTejun Heo 	if (rc)
3712fe30911bSTejun Heo 		goto fail;
3713c6fd2807SJeff Garzik 
3714c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3715efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
37166ddcd3b0STejun Heo 	if (rc)
37176ddcd3b0STejun Heo 		goto fail;
37186ddcd3b0STejun Heo 
37196ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
37206ddcd3b0STejun Heo 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
37216ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
37226ddcd3b0STejun Heo 			       "%llu != %llu\n",
37236ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
37246ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
37256ddcd3b0STejun Heo 		rc = -ENODEV;
37266ddcd3b0STejun Heo 		goto fail;
37276ddcd3b0STejun Heo 	}
37286ddcd3b0STejun Heo 
3729c6fd2807SJeff Garzik 	return 0;
3730c6fd2807SJeff Garzik 
3731c6fd2807SJeff Garzik  fail:
3732c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3733c6fd2807SJeff Garzik 	return rc;
3734c6fd2807SJeff Garzik }
3735c6fd2807SJeff Garzik 
37366919a0a6SAlan Cox struct ata_blacklist_entry {
37376919a0a6SAlan Cox 	const char *model_num;
37386919a0a6SAlan Cox 	const char *model_rev;
37396919a0a6SAlan Cox 	unsigned long horkage;
37406919a0a6SAlan Cox };
37416919a0a6SAlan Cox 
37426919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
37436919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
37446919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
37456919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
37466919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
37476919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
37486919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
37496919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
37506919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
37516919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
37526919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
37536919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
37546919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
37556919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
37566919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
37576919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
37586919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
37596919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
37606919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
37616919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
37626919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
37636919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
37646919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
37656919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
37666919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
37676919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
37686919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
37696919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
37706919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
37716919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
37726919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
377339f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
37746919a0a6SAlan Cox 
377518d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
37766f23a31dSAlbert Lee 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 |
37776f23a31dSAlbert Lee 						ATA_HORKAGE_DMA_RW_ONLY },
377818d6e9d5SAlbert Lee 
37796919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
37806919a0a6SAlan Cox 
37816919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
37826919a0a6SAlan Cox 	/* NCQ is slow */
37836919a0a6SAlan Cox         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
378409125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
378509125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
37867acfaf30SPaul Rolland 	/* NCQ is broken */
37877acfaf30SPaul Rolland 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
3788471e44b2SJeff Garzik 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
378996442925SJens Axboe 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
379096442925SJens Axboe 	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
379136e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
379236e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
379336e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
379436e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
379536e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3796bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
3797bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
37986919a0a6SAlan Cox 
37996919a0a6SAlan Cox 	/* Devices with NCQ limits */
38006919a0a6SAlan Cox 
38016919a0a6SAlan Cox 	/* End Marker */
38026919a0a6SAlan Cox 	{ }
3803c6fd2807SJeff Garzik };
3804c6fd2807SJeff Garzik 
38056919a0a6SAlan Cox unsigned long ata_device_blacklisted(const struct ata_device *dev)
3806c6fd2807SJeff Garzik {
38078bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
38088bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
38096919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3810c6fd2807SJeff Garzik 
38118bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
38128bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3813c6fd2807SJeff Garzik 
38146919a0a6SAlan Cox 	while (ad->model_num) {
38158bfa79fcSTejun Heo 		if (!strcmp(ad->model_num, model_num)) {
38166919a0a6SAlan Cox 			if (ad->model_rev == NULL)
38176919a0a6SAlan Cox 				return ad->horkage;
38188bfa79fcSTejun Heo 			if (!strcmp(ad->model_rev, model_rev))
38196919a0a6SAlan Cox 				return ad->horkage;
3820c6fd2807SJeff Garzik 		}
38216919a0a6SAlan Cox 		ad++;
3822c6fd2807SJeff Garzik 	}
3823c6fd2807SJeff Garzik 	return 0;
3824c6fd2807SJeff Garzik }
3825c6fd2807SJeff Garzik 
38266919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
38276919a0a6SAlan Cox {
38286919a0a6SAlan Cox 	/* We don't support polling DMA.
38296919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
38306919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
38316919a0a6SAlan Cox 	 */
38326919a0a6SAlan Cox 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
38336919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
38346919a0a6SAlan Cox 		return 1;
38356919a0a6SAlan Cox 	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
38366919a0a6SAlan Cox }
38376919a0a6SAlan Cox 
3838c6fd2807SJeff Garzik /**
3839c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
3840c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
3841c6fd2807SJeff Garzik  *
3842c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
3843c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
3844c6fd2807SJeff Garzik  *	known limits including host controller limits, device
3845c6fd2807SJeff Garzik  *	blacklist, etc...
3846c6fd2807SJeff Garzik  *
3847c6fd2807SJeff Garzik  *	LOCKING:
3848c6fd2807SJeff Garzik  *	None.
3849c6fd2807SJeff Garzik  */
3850c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
3851c6fd2807SJeff Garzik {
3852c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
3853cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
3854c6fd2807SJeff Garzik 	unsigned long xfer_mask;
3855c6fd2807SJeff Garzik 
3856c6fd2807SJeff Garzik 	/* controller modes available */
3857c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3858c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
3859c6fd2807SJeff Garzik 
38608343f889SRobert Hancock 	/* drive modes available */
3861c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3862c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
3863c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
3864c6fd2807SJeff Garzik 
3865b352e57dSAlan Cox 	/*
3866b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3867b352e57dSAlan Cox 	 *	cable
3868b352e57dSAlan Cox 	 */
3869b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
3870b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
3871b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3872b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
3873b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3874b352e57dSAlan Cox 	}
3875b352e57dSAlan Cox 
3876c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
3877c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3878c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
3879c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
3880c6fd2807SJeff Garzik 	}
3881c6fd2807SJeff Garzik 
388214d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
388314d66ab7SPetr Vandrovec             host->simplex_claimed && host->simplex_claimed != ap) {
3884c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3885c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3886c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
3887c6fd2807SJeff Garzik 	}
3888c6fd2807SJeff Garzik 
3889e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
3890e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
3891e424675fSJeff Garzik 
3892c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
3893a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3894c6fd2807SJeff Garzik 
38958343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
38968343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
38978343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
38988343f889SRobert Hancock 	 * solely limited by the cable.
38998343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
39008343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
39018343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
39028343f889SRobert Hancock 	 */
39038343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
39048343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
39058343f889SRobert Hancock 		if((ap->cbl == ATA_CBL_PATA40) ||
39068343f889SRobert Hancock    		    (ata_drive_40wire(dev->id) &&
39078343f889SRobert Hancock 		     (ap->cbl == ATA_CBL_PATA_UNK ||
39088343f889SRobert Hancock                      ap->cbl == ATA_CBL_PATA80))) {
39098343f889SRobert Hancock 		      	ata_dev_printk(dev, KERN_WARNING,
39108343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
39118343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
39128343f889SRobert Hancock 		}
39138343f889SRobert Hancock 
3914c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3915c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
3916c6fd2807SJeff Garzik }
3917c6fd2807SJeff Garzik 
3918c6fd2807SJeff Garzik /**
3919c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3920c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3921c6fd2807SJeff Garzik  *
3922c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
3923c6fd2807SJeff Garzik  *	on port @ap.
3924c6fd2807SJeff Garzik  *
3925c6fd2807SJeff Garzik  *	LOCKING:
3926c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3927c6fd2807SJeff Garzik  *
3928c6fd2807SJeff Garzik  *	RETURNS:
3929c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3930c6fd2807SJeff Garzik  */
3931c6fd2807SJeff Garzik 
3932c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3933c6fd2807SJeff Garzik {
3934c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3935c6fd2807SJeff Garzik 	unsigned int err_mask;
3936c6fd2807SJeff Garzik 
3937c6fd2807SJeff Garzik 	/* set up set-features taskfile */
3938c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
3939c6fd2807SJeff Garzik 
3940464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
3941464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
3942464cf177STejun Heo 	 */
3943c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3944c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
3945c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
3946464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3947c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3948c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
3949c6fd2807SJeff Garzik 
3950c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3951c6fd2807SJeff Garzik 
3952c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3953c6fd2807SJeff Garzik 	return err_mask;
3954c6fd2807SJeff Garzik }
3955c6fd2807SJeff Garzik 
3956c6fd2807SJeff Garzik /**
3957c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
3958c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3959c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
3960c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
3961c6fd2807SJeff Garzik  *
3962c6fd2807SJeff Garzik  *	LOCKING:
3963c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3964c6fd2807SJeff Garzik  *
3965c6fd2807SJeff Garzik  *	RETURNS:
3966c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3967c6fd2807SJeff Garzik  */
3968c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
3969c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
3970c6fd2807SJeff Garzik {
3971c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3972c6fd2807SJeff Garzik 	unsigned int err_mask;
3973c6fd2807SJeff Garzik 
3974c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
3975c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3976c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
3977c6fd2807SJeff Garzik 
3978c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
3979c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
3980c6fd2807SJeff Garzik 
3981c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3982c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3983c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3984c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3985c6fd2807SJeff Garzik 	tf.nsect = sectors;
3986c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3987c6fd2807SJeff Garzik 
3988c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3989c6fd2807SJeff Garzik 
3990c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3991c6fd2807SJeff Garzik 	return err_mask;
3992c6fd2807SJeff Garzik }
3993c6fd2807SJeff Garzik 
3994c6fd2807SJeff Garzik /**
3995c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
3996c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
3997c6fd2807SJeff Garzik  *
3998c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
3999c6fd2807SJeff Garzik  *
4000c6fd2807SJeff Garzik  *	LOCKING:
4001cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4002c6fd2807SJeff Garzik  */
400370e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4004c6fd2807SJeff Garzik {
4005c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4006c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4007c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4008c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4009c6fd2807SJeff Garzik 
4010c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4011c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4012c6fd2807SJeff Garzik 
4013c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4014c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4015c6fd2807SJeff Garzik 
4016c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4017c6fd2807SJeff Garzik 
4018c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4019c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4020c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4021c6fd2807SJeff Garzik 	 */
4022c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4023c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4024c6fd2807SJeff Garzik 
4025c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4026c6fd2807SJeff Garzik 		if (qc->n_elem)
4027c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4028c6fd2807SJeff Garzik 		/* restore last sg */
4029c6fd2807SJeff Garzik 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4030c6fd2807SJeff Garzik 		if (pad_buf) {
4031c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
4032c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4033c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4034c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4035c6fd2807SJeff Garzik 		}
4036c6fd2807SJeff Garzik 	} else {
4037c6fd2807SJeff Garzik 		if (qc->n_elem)
4038c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4039c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4040c6fd2807SJeff Garzik 				dir);
4041c6fd2807SJeff Garzik 		/* restore sg */
4042c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4043c6fd2807SJeff Garzik 		if (pad_buf)
4044c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4045c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4046c6fd2807SJeff Garzik 	}
4047c6fd2807SJeff Garzik 
4048c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4049c6fd2807SJeff Garzik 	qc->__sg = NULL;
4050c6fd2807SJeff Garzik }
4051c6fd2807SJeff Garzik 
4052c6fd2807SJeff Garzik /**
4053c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4054c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4055c6fd2807SJeff Garzik  *
4056c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4057c6fd2807SJeff Garzik  *	associated with the current disk command.
4058c6fd2807SJeff Garzik  *
4059c6fd2807SJeff Garzik  *	LOCKING:
4060cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4061c6fd2807SJeff Garzik  *
4062c6fd2807SJeff Garzik  */
4063c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4064c6fd2807SJeff Garzik {
4065c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4066c6fd2807SJeff Garzik 	struct scatterlist *sg;
4067c6fd2807SJeff Garzik 	unsigned int idx;
4068c6fd2807SJeff Garzik 
4069c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4070c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4071c6fd2807SJeff Garzik 
4072c6fd2807SJeff Garzik 	idx = 0;
4073c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4074c6fd2807SJeff Garzik 		u32 addr, offset;
4075c6fd2807SJeff Garzik 		u32 sg_len, len;
4076c6fd2807SJeff Garzik 
4077c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4078c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4079c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4080c6fd2807SJeff Garzik 		 */
4081c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4082c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4083c6fd2807SJeff Garzik 
4084c6fd2807SJeff Garzik 		while (sg_len) {
4085c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4086c6fd2807SJeff Garzik 			len = sg_len;
4087c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4088c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4089c6fd2807SJeff Garzik 
4090c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4091c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4092c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4093c6fd2807SJeff Garzik 
4094c6fd2807SJeff Garzik 			idx++;
4095c6fd2807SJeff Garzik 			sg_len -= len;
4096c6fd2807SJeff Garzik 			addr += len;
4097c6fd2807SJeff Garzik 		}
4098c6fd2807SJeff Garzik 	}
4099c6fd2807SJeff Garzik 
4100c6fd2807SJeff Garzik 	if (idx)
4101c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4102c6fd2807SJeff Garzik }
4103c6fd2807SJeff Garzik /**
4104c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4105c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4106c6fd2807SJeff Garzik  *
4107c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4108c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4109c6fd2807SJeff Garzik  *	supplied PACKET command.
4110c6fd2807SJeff Garzik  *
4111c6fd2807SJeff Garzik  *	LOCKING:
4112cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4113c6fd2807SJeff Garzik  *
4114c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4115c6fd2807SJeff Garzik  *               nonzero otherwise
4116c6fd2807SJeff Garzik  */
4117c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4118c6fd2807SJeff Garzik {
4119c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4120c6fd2807SJeff Garzik 	int rc = 0; /* Assume ATAPI DMA is OK by default */
4121c6fd2807SJeff Garzik 
41226f23a31dSAlbert Lee 	/* some drives can only do ATAPI DMA on read/write */
41236f23a31dSAlbert Lee 	if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
41246f23a31dSAlbert Lee 		struct scsi_cmnd *cmd = qc->scsicmd;
41256f23a31dSAlbert Lee 		u8 *scsicmd = cmd->cmnd;
41266f23a31dSAlbert Lee 
41276f23a31dSAlbert Lee 		switch (scsicmd[0]) {
41286f23a31dSAlbert Lee 		case READ_10:
41296f23a31dSAlbert Lee 		case WRITE_10:
41306f23a31dSAlbert Lee 		case READ_12:
41316f23a31dSAlbert Lee 		case WRITE_12:
41326f23a31dSAlbert Lee 		case READ_6:
41336f23a31dSAlbert Lee 		case WRITE_6:
41346f23a31dSAlbert Lee 			/* atapi dma maybe ok */
41356f23a31dSAlbert Lee 			break;
41366f23a31dSAlbert Lee 		default:
41376f23a31dSAlbert Lee 			/* turn off atapi dma */
41386f23a31dSAlbert Lee 			return 1;
41396f23a31dSAlbert Lee 		}
41406f23a31dSAlbert Lee 	}
41416f23a31dSAlbert Lee 
4142c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4143c6fd2807SJeff Garzik 		rc = ap->ops->check_atapi_dma(qc);
4144c6fd2807SJeff Garzik 
4145c6fd2807SJeff Garzik 	return rc;
4146c6fd2807SJeff Garzik }
4147c6fd2807SJeff Garzik /**
4148c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4149c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4150c6fd2807SJeff Garzik  *
4151c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4152c6fd2807SJeff Garzik  *
4153c6fd2807SJeff Garzik  *	LOCKING:
4154cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4155c6fd2807SJeff Garzik  */
4156c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4157c6fd2807SJeff Garzik {
4158c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4159c6fd2807SJeff Garzik 		return;
4160c6fd2807SJeff Garzik 
4161c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4162c6fd2807SJeff Garzik }
4163c6fd2807SJeff Garzik 
4164c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4165c6fd2807SJeff Garzik 
4166c6fd2807SJeff Garzik /**
4167c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4168c6fd2807SJeff Garzik  *	@qc: Command to be associated
4169c6fd2807SJeff Garzik  *	@buf: Memory buffer
4170c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4171c6fd2807SJeff Garzik  *
4172c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4173c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4174c6fd2807SJeff Garzik  *
4175c6fd2807SJeff Garzik  *	LOCKING:
4176cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4177c6fd2807SJeff Garzik  */
4178c6fd2807SJeff Garzik 
4179c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4180c6fd2807SJeff Garzik {
4181c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4182c6fd2807SJeff Garzik 
4183c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4184c6fd2807SJeff Garzik 	qc->n_elem = 1;
4185c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4186c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4187c6fd2807SJeff Garzik 	qc->nbytes = buflen;
4188c6fd2807SJeff Garzik 
418961c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4190c6fd2807SJeff Garzik }
4191c6fd2807SJeff Garzik 
4192c6fd2807SJeff Garzik /**
4193c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4194c6fd2807SJeff Garzik  *	@qc: Command to be associated
4195c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4196c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4197c6fd2807SJeff Garzik  *
4198c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4199c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4200c6fd2807SJeff Garzik  *	elements.
4201c6fd2807SJeff Garzik  *
4202c6fd2807SJeff Garzik  *	LOCKING:
4203cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4204c6fd2807SJeff Garzik  */
4205c6fd2807SJeff Garzik 
4206c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4207c6fd2807SJeff Garzik 		 unsigned int n_elem)
4208c6fd2807SJeff Garzik {
4209c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4210c6fd2807SJeff Garzik 	qc->__sg = sg;
4211c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4212c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
4213c6fd2807SJeff Garzik }
4214c6fd2807SJeff Garzik 
4215c6fd2807SJeff Garzik /**
4216c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4217c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4218c6fd2807SJeff Garzik  *
4219c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4220c6fd2807SJeff Garzik  *
4221c6fd2807SJeff Garzik  *	LOCKING:
4222cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4223c6fd2807SJeff Garzik  *
4224c6fd2807SJeff Garzik  *	RETURNS:
4225c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4226c6fd2807SJeff Garzik  */
4227c6fd2807SJeff Garzik 
4228c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4229c6fd2807SJeff Garzik {
4230c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4231c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4232c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4233c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4234c6fd2807SJeff Garzik 	int trim_sg = 0;
4235c6fd2807SJeff Garzik 
4236c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4237c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4238c6fd2807SJeff Garzik 	if (qc->pad_len) {
4239c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4240c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4241c6fd2807SJeff Garzik 
4242c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4243c6fd2807SJeff Garzik 
4244c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4245c6fd2807SJeff Garzik 
4246c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4247c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4248c6fd2807SJeff Garzik 			       qc->pad_len);
4249c6fd2807SJeff Garzik 
4250c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4251c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4252c6fd2807SJeff Garzik 		/* trim sg */
4253c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4254c6fd2807SJeff Garzik 		if (sg->length == 0)
4255c6fd2807SJeff Garzik 			trim_sg = 1;
4256c6fd2807SJeff Garzik 
4257c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4258c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4259c6fd2807SJeff Garzik 	}
4260c6fd2807SJeff Garzik 
4261c6fd2807SJeff Garzik 	if (trim_sg) {
4262c6fd2807SJeff Garzik 		qc->n_elem--;
4263c6fd2807SJeff Garzik 		goto skip_map;
4264c6fd2807SJeff Garzik 	}
4265c6fd2807SJeff Garzik 
4266c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4267c6fd2807SJeff Garzik 				     sg->length, dir);
4268c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4269c6fd2807SJeff Garzik 		/* restore sg */
4270c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4271c6fd2807SJeff Garzik 		return -1;
4272c6fd2807SJeff Garzik 	}
4273c6fd2807SJeff Garzik 
4274c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4275c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4276c6fd2807SJeff Garzik 
4277c6fd2807SJeff Garzik skip_map:
4278c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4279c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4280c6fd2807SJeff Garzik 
4281c6fd2807SJeff Garzik 	return 0;
4282c6fd2807SJeff Garzik }
4283c6fd2807SJeff Garzik 
4284c6fd2807SJeff Garzik /**
4285c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4286c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4287c6fd2807SJeff Garzik  *
4288c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4289c6fd2807SJeff Garzik  *
4290c6fd2807SJeff Garzik  *	LOCKING:
4291cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4292c6fd2807SJeff Garzik  *
4293c6fd2807SJeff Garzik  *	RETURNS:
4294c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4295c6fd2807SJeff Garzik  *
4296c6fd2807SJeff Garzik  */
4297c6fd2807SJeff Garzik 
4298c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4299c6fd2807SJeff Garzik {
4300c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4301c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4302c6fd2807SJeff Garzik 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4303c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4304c6fd2807SJeff Garzik 
430544877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4306c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4307c6fd2807SJeff Garzik 
4308c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4309c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4310c6fd2807SJeff Garzik 	if (qc->pad_len) {
4311c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4312c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4313c6fd2807SJeff Garzik 		unsigned int offset;
4314c6fd2807SJeff Garzik 
4315c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4316c6fd2807SJeff Garzik 
4317c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4318c6fd2807SJeff Garzik 
4319c6fd2807SJeff Garzik 		/*
4320c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4321c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4322c6fd2807SJeff Garzik 		 */
4323c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4324c6fd2807SJeff Garzik 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4325c6fd2807SJeff Garzik 		psg->offset = offset_in_page(offset);
4326c6fd2807SJeff Garzik 
4327c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4328c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4329c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4330c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4331c6fd2807SJeff Garzik 		}
4332c6fd2807SJeff Garzik 
4333c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4334c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4335c6fd2807SJeff Garzik 		/* trim last sg */
4336c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4337c6fd2807SJeff Garzik 		if (lsg->length == 0)
4338c6fd2807SJeff Garzik 			trim_sg = 1;
4339c6fd2807SJeff Garzik 
4340c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4341c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4342c6fd2807SJeff Garzik 	}
4343c6fd2807SJeff Garzik 
4344c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4345c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4346c6fd2807SJeff Garzik 		pre_n_elem--;
4347c6fd2807SJeff Garzik 
4348c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4349c6fd2807SJeff Garzik 		n_elem = 0;
4350c6fd2807SJeff Garzik 		goto skip_map;
4351c6fd2807SJeff Garzik 	}
4352c6fd2807SJeff Garzik 
4353c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4354c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4355c6fd2807SJeff Garzik 	if (n_elem < 1) {
4356c6fd2807SJeff Garzik 		/* restore last sg */
4357c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4358c6fd2807SJeff Garzik 		return -1;
4359c6fd2807SJeff Garzik 	}
4360c6fd2807SJeff Garzik 
4361c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4362c6fd2807SJeff Garzik 
4363c6fd2807SJeff Garzik skip_map:
4364c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4365c6fd2807SJeff Garzik 
4366c6fd2807SJeff Garzik 	return 0;
4367c6fd2807SJeff Garzik }
4368c6fd2807SJeff Garzik 
4369c6fd2807SJeff Garzik /**
4370c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4371c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4372c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4373c6fd2807SJeff Garzik  *
4374c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4375c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4376c6fd2807SJeff Garzik  *	vice-versa.
4377c6fd2807SJeff Garzik  *
4378c6fd2807SJeff Garzik  *	LOCKING:
4379c6fd2807SJeff Garzik  *	Inherited from caller.
4380c6fd2807SJeff Garzik  */
4381c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4382c6fd2807SJeff Garzik {
4383c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4384c6fd2807SJeff Garzik 	unsigned int i;
4385c6fd2807SJeff Garzik 
4386c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4387c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4388c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4389c6fd2807SJeff Garzik }
4390c6fd2807SJeff Garzik 
4391c6fd2807SJeff Garzik /**
43920d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4393c6fd2807SJeff Garzik  *	@adev: device to target
4394c6fd2807SJeff Garzik  *	@buf: data buffer
4395c6fd2807SJeff Garzik  *	@buflen: buffer length
4396c6fd2807SJeff Garzik  *	@write_data: read/write
4397c6fd2807SJeff Garzik  *
4398c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4399c6fd2807SJeff Garzik  *
4400c6fd2807SJeff Garzik  *	LOCKING:
4401c6fd2807SJeff Garzik  *	Inherited from caller.
4402c6fd2807SJeff Garzik  */
44030d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4404c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4405c6fd2807SJeff Garzik {
4406c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
4407c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4408c6fd2807SJeff Garzik 
4409c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4410c6fd2807SJeff Garzik 	if (write_data)
44110d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4412c6fd2807SJeff Garzik 	else
44130d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4414c6fd2807SJeff Garzik 
4415c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4416c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4417c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4418c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4419c6fd2807SJeff Garzik 
4420c6fd2807SJeff Garzik 		if (write_data) {
4421c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
44220d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4423c6fd2807SJeff Garzik 		} else {
44240d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4425c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4426c6fd2807SJeff Garzik 		}
4427c6fd2807SJeff Garzik 	}
4428c6fd2807SJeff Garzik }
4429c6fd2807SJeff Garzik 
4430c6fd2807SJeff Garzik /**
44310d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4432c6fd2807SJeff Garzik  *	@adev: device to target
4433c6fd2807SJeff Garzik  *	@buf: data buffer
4434c6fd2807SJeff Garzik  *	@buflen: buffer length
4435c6fd2807SJeff Garzik  *	@write_data: read/write
4436c6fd2807SJeff Garzik  *
4437c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4438c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4439c6fd2807SJeff Garzik  *
4440c6fd2807SJeff Garzik  *	LOCKING:
4441c6fd2807SJeff Garzik  *	Inherited from caller.
4442c6fd2807SJeff Garzik  */
44430d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4444c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4445c6fd2807SJeff Garzik {
4446c6fd2807SJeff Garzik 	unsigned long flags;
4447c6fd2807SJeff Garzik 	local_irq_save(flags);
44480d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4449c6fd2807SJeff Garzik 	local_irq_restore(flags);
4450c6fd2807SJeff Garzik }
4451c6fd2807SJeff Garzik 
4452c6fd2807SJeff Garzik 
4453c6fd2807SJeff Garzik /**
44545a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4455c6fd2807SJeff Garzik  *	@qc: Command on going
4456c6fd2807SJeff Garzik  *
44575a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4458c6fd2807SJeff Garzik  *
4459c6fd2807SJeff Garzik  *	LOCKING:
4460c6fd2807SJeff Garzik  *	Inherited from caller.
4461c6fd2807SJeff Garzik  */
4462c6fd2807SJeff Garzik 
4463c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4464c6fd2807SJeff Garzik {
4465c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4466c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4467c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4468c6fd2807SJeff Garzik 	struct page *page;
4469c6fd2807SJeff Garzik 	unsigned int offset;
4470c6fd2807SJeff Garzik 	unsigned char *buf;
4471c6fd2807SJeff Garzik 
44725a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4473c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4474c6fd2807SJeff Garzik 
4475c6fd2807SJeff Garzik 	page = sg[qc->cursg].page;
4476726f0785STejun Heo 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4477c6fd2807SJeff Garzik 
4478c6fd2807SJeff Garzik 	/* get the current page and offset */
4479c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4480c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4481c6fd2807SJeff Garzik 
4482c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4483c6fd2807SJeff Garzik 
4484c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4485c6fd2807SJeff Garzik 		unsigned long flags;
4486c6fd2807SJeff Garzik 
4487c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4488c6fd2807SJeff Garzik 		local_irq_save(flags);
4489c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4490c6fd2807SJeff Garzik 
4491c6fd2807SJeff Garzik 		/* do the actual data transfer */
44925a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4493c6fd2807SJeff Garzik 
4494c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4495c6fd2807SJeff Garzik 		local_irq_restore(flags);
4496c6fd2807SJeff Garzik 	} else {
4497c6fd2807SJeff Garzik 		buf = page_address(page);
44985a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4499c6fd2807SJeff Garzik 	}
4500c6fd2807SJeff Garzik 
45015a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
45025a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4503c6fd2807SJeff Garzik 
4504726f0785STejun Heo 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4505c6fd2807SJeff Garzik 		qc->cursg++;
4506c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4507c6fd2807SJeff Garzik 	}
4508c6fd2807SJeff Garzik }
4509c6fd2807SJeff Garzik 
4510c6fd2807SJeff Garzik /**
45115a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4512c6fd2807SJeff Garzik  *	@qc: Command on going
4513c6fd2807SJeff Garzik  *
45145a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4515c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4516c6fd2807SJeff Garzik  *
4517c6fd2807SJeff Garzik  *	LOCKING:
4518c6fd2807SJeff Garzik  *	Inherited from caller.
4519c6fd2807SJeff Garzik  */
4520c6fd2807SJeff Garzik 
4521c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4522c6fd2807SJeff Garzik {
4523c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4524c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4525c6fd2807SJeff Garzik 		unsigned int nsect;
4526c6fd2807SJeff Garzik 
4527c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4528c6fd2807SJeff Garzik 
45295a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4530726f0785STejun Heo 			    qc->dev->multi_count);
4531c6fd2807SJeff Garzik 		while (nsect--)
4532c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4533c6fd2807SJeff Garzik 	} else
4534c6fd2807SJeff Garzik 		ata_pio_sector(qc);
4535c6fd2807SJeff Garzik }
4536c6fd2807SJeff Garzik 
4537c6fd2807SJeff Garzik /**
4538c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4539c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4540c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4541c6fd2807SJeff Garzik  *
4542c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4543c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4544c6fd2807SJeff Garzik  *
4545c6fd2807SJeff Garzik  *	LOCKING:
4546c6fd2807SJeff Garzik  *	caller.
4547c6fd2807SJeff Garzik  */
4548c6fd2807SJeff Garzik 
4549c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4550c6fd2807SJeff Garzik {
4551c6fd2807SJeff Garzik 	/* send SCSI cdb */
4552c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4553c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4554c6fd2807SJeff Garzik 
4555c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4556c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4557c6fd2807SJeff Garzik 
4558c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4559c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4560c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4561c6fd2807SJeff Garzik 		break;
4562c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4563c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4564c6fd2807SJeff Garzik 		break;
4565c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4566c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4567c6fd2807SJeff Garzik 		/* initiate bmdma */
4568c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4569c6fd2807SJeff Garzik 		break;
4570c6fd2807SJeff Garzik 	}
4571c6fd2807SJeff Garzik }
4572c6fd2807SJeff Garzik 
4573c6fd2807SJeff Garzik /**
4574c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4575c6fd2807SJeff Garzik  *	@qc: Command on going
4576c6fd2807SJeff Garzik  *	@bytes: number of bytes
4577c6fd2807SJeff Garzik  *
4578c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4579c6fd2807SJeff Garzik  *
4580c6fd2807SJeff Garzik  *	LOCKING:
4581c6fd2807SJeff Garzik  *	Inherited from caller.
4582c6fd2807SJeff Garzik  *
4583c6fd2807SJeff Garzik  */
4584c6fd2807SJeff Garzik 
4585c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4586c6fd2807SJeff Garzik {
4587c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4588c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4589c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4590c6fd2807SJeff Garzik 	struct page *page;
4591c6fd2807SJeff Garzik 	unsigned char *buf;
4592c6fd2807SJeff Garzik 	unsigned int offset, count;
4593c6fd2807SJeff Garzik 
4594c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4595c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4596c6fd2807SJeff Garzik 
4597c6fd2807SJeff Garzik next_sg:
4598c6fd2807SJeff Garzik 	if (unlikely(qc->cursg >= qc->n_elem)) {
4599c6fd2807SJeff Garzik 		/*
4600c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4601c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4602c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4603c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4604c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4605c6fd2807SJeff Garzik 		 */
4606c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4607c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4608c6fd2807SJeff Garzik 		unsigned int i;
4609c6fd2807SJeff Garzik 
4610c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4611c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4612c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4613c6fd2807SJeff Garzik 
4614c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4615c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4616c6fd2807SJeff Garzik 
4617c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4618c6fd2807SJeff Garzik 		return;
4619c6fd2807SJeff Garzik 	}
4620c6fd2807SJeff Garzik 
4621c6fd2807SJeff Garzik 	sg = &qc->__sg[qc->cursg];
4622c6fd2807SJeff Garzik 
4623c6fd2807SJeff Garzik 	page = sg->page;
4624c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4625c6fd2807SJeff Garzik 
4626c6fd2807SJeff Garzik 	/* get the current page and offset */
4627c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4628c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4629c6fd2807SJeff Garzik 
4630c6fd2807SJeff Garzik 	/* don't overrun current sg */
4631c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
4632c6fd2807SJeff Garzik 
4633c6fd2807SJeff Garzik 	/* don't cross page boundaries */
4634c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4635c6fd2807SJeff Garzik 
4636c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4637c6fd2807SJeff Garzik 
4638c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4639c6fd2807SJeff Garzik 		unsigned long flags;
4640c6fd2807SJeff Garzik 
4641c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
4642c6fd2807SJeff Garzik 		local_irq_save(flags);
4643c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4644c6fd2807SJeff Garzik 
4645c6fd2807SJeff Garzik 		/* do the actual data transfer */
4646c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4647c6fd2807SJeff Garzik 
4648c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4649c6fd2807SJeff Garzik 		local_irq_restore(flags);
4650c6fd2807SJeff Garzik 	} else {
4651c6fd2807SJeff Garzik 		buf = page_address(page);
4652c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4653c6fd2807SJeff Garzik 	}
4654c6fd2807SJeff Garzik 
4655c6fd2807SJeff Garzik 	bytes -= count;
4656c6fd2807SJeff Garzik 	qc->curbytes += count;
4657c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
4658c6fd2807SJeff Garzik 
4659c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
4660c6fd2807SJeff Garzik 		qc->cursg++;
4661c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4662c6fd2807SJeff Garzik 	}
4663c6fd2807SJeff Garzik 
4664c6fd2807SJeff Garzik 	if (bytes)
4665c6fd2807SJeff Garzik 		goto next_sg;
4666c6fd2807SJeff Garzik }
4667c6fd2807SJeff Garzik 
4668c6fd2807SJeff Garzik /**
4669c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4670c6fd2807SJeff Garzik  *	@qc: Command on going
4671c6fd2807SJeff Garzik  *
4672c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4673c6fd2807SJeff Garzik  *
4674c6fd2807SJeff Garzik  *	LOCKING:
4675c6fd2807SJeff Garzik  *	Inherited from caller.
4676c6fd2807SJeff Garzik  */
4677c6fd2807SJeff Garzik 
4678c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4679c6fd2807SJeff Garzik {
4680c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4681c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
4682c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
4683c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4684c6fd2807SJeff Garzik 
4685c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
4686c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
4687c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
4688c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4689c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
4690c6fd2807SJeff Garzik 	 */
4691c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
4692c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
4693c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
4694c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
4695c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
4696c6fd2807SJeff Garzik 
4697c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
4698c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
4699c6fd2807SJeff Garzik 		goto err_out;
4700c6fd2807SJeff Garzik 
4701c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
4702c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4703c6fd2807SJeff Garzik 	if (do_write != i_write)
4704c6fd2807SJeff Garzik 		goto err_out;
4705c6fd2807SJeff Garzik 
470644877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4707c6fd2807SJeff Garzik 
4708c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
4709c6fd2807SJeff Garzik 
4710c6fd2807SJeff Garzik 	return;
4711c6fd2807SJeff Garzik 
4712c6fd2807SJeff Garzik err_out:
4713c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4714c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
4715c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
4716c6fd2807SJeff Garzik }
4717c6fd2807SJeff Garzik 
4718c6fd2807SJeff Garzik /**
4719c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4720c6fd2807SJeff Garzik  *	@ap: the target ata_port
4721c6fd2807SJeff Garzik  *	@qc: qc on going
4722c6fd2807SJeff Garzik  *
4723c6fd2807SJeff Garzik  *	RETURNS:
4724c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
4725c6fd2807SJeff Garzik  */
4726c6fd2807SJeff Garzik 
4727c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4728c6fd2807SJeff Garzik {
4729c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4730c6fd2807SJeff Garzik 		return 1;
4731c6fd2807SJeff Garzik 
4732c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4733c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
4734c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4735c6fd2807SJeff Garzik 		    return 1;
4736c6fd2807SJeff Garzik 
4737c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
4738c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4739c6fd2807SJeff Garzik 			return 1;
4740c6fd2807SJeff Garzik 	}
4741c6fd2807SJeff Garzik 
4742c6fd2807SJeff Garzik 	return 0;
4743c6fd2807SJeff Garzik }
4744c6fd2807SJeff Garzik 
4745c6fd2807SJeff Garzik /**
4746c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
4747c6fd2807SJeff Garzik  *	@qc: Command to complete
4748c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4749c6fd2807SJeff Garzik  *
4750c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
4751c6fd2807SJeff Garzik  *
4752c6fd2807SJeff Garzik  *	LOCKING:
4753cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
4754c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
4755c6fd2807SJeff Garzik  */
4756c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4757c6fd2807SJeff Garzik {
4758c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4759c6fd2807SJeff Garzik 	unsigned long flags;
4760c6fd2807SJeff Garzik 
4761c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
4762c6fd2807SJeff Garzik 		if (in_wq) {
4763c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4764c6fd2807SJeff Garzik 
4765cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
4766cca3974eSJeff Garzik 			 * released.
4767c6fd2807SJeff Garzik 			 */
4768c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
4769c6fd2807SJeff Garzik 			if (qc) {
4770c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
477183625006SAkira Iguchi 					ap->ops->irq_on(ap);
4772c6fd2807SJeff Garzik 					ata_qc_complete(qc);
4773c6fd2807SJeff Garzik 				} else
4774c6fd2807SJeff Garzik 					ata_port_freeze(ap);
4775c6fd2807SJeff Garzik 			}
4776c6fd2807SJeff Garzik 
4777c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4778c6fd2807SJeff Garzik 		} else {
4779c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4780c6fd2807SJeff Garzik 				ata_qc_complete(qc);
4781c6fd2807SJeff Garzik 			else
4782c6fd2807SJeff Garzik 				ata_port_freeze(ap);
4783c6fd2807SJeff Garzik 		}
4784c6fd2807SJeff Garzik 	} else {
4785c6fd2807SJeff Garzik 		if (in_wq) {
4786c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
478783625006SAkira Iguchi 			ap->ops->irq_on(ap);
4788c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4789c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4790c6fd2807SJeff Garzik 		} else
4791c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4792c6fd2807SJeff Garzik 	}
4793c6fd2807SJeff Garzik 
4794c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4795c6fd2807SJeff Garzik }
4796c6fd2807SJeff Garzik 
4797c6fd2807SJeff Garzik /**
4798c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
4799c6fd2807SJeff Garzik  *	@ap: the target ata_port
4800c6fd2807SJeff Garzik  *	@qc: qc on going
4801c6fd2807SJeff Garzik  *	@status: current device status
4802c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4803c6fd2807SJeff Garzik  *
4804c6fd2807SJeff Garzik  *	RETURNS:
4805c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
4806c6fd2807SJeff Garzik  */
4807c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4808c6fd2807SJeff Garzik 		 u8 status, int in_wq)
4809c6fd2807SJeff Garzik {
4810c6fd2807SJeff Garzik 	unsigned long flags = 0;
4811c6fd2807SJeff Garzik 	int poll_next;
4812c6fd2807SJeff Garzik 
4813c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4814c6fd2807SJeff Garzik 
4815c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
4816c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
4817c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4818c6fd2807SJeff Garzik 	 */
4819c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4820c6fd2807SJeff Garzik 
4821c6fd2807SJeff Garzik fsm_start:
4822c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
482344877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4824c6fd2807SJeff Garzik 
4825c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
4826c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
4827c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
4828c6fd2807SJeff Garzik 
4829c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
4830c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
4831c6fd2807SJeff Garzik 		 * takes over after sending the data.
4832c6fd2807SJeff Garzik 		 */
4833c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4834c6fd2807SJeff Garzik 
4835c6fd2807SJeff Garzik 		/* check device status */
4836c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
4837c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
4838c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
4839c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
4840c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4841c6fd2807SJeff Garzik 			else
4842c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
4843c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4844c6fd2807SJeff Garzik 
4845c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4846c6fd2807SJeff Garzik 			goto fsm_start;
4847c6fd2807SJeff Garzik 		}
4848c6fd2807SJeff Garzik 
4849c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
4850c6fd2807SJeff Garzik 		 * when it finds something wrong.
4851c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
4852c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
4853c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
4854c6fd2807SJeff Garzik 		 */
4855c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
485644877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
485744877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
4858c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
4859c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4860c6fd2807SJeff Garzik 			goto fsm_start;
4861c6fd2807SJeff Garzik 		}
4862c6fd2807SJeff Garzik 
4863c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
4864c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
4865c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
4866c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
4867c6fd2807SJeff Garzik 		 */
4868c6fd2807SJeff Garzik 		if (in_wq)
4869c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4870c6fd2807SJeff Garzik 
4871c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
4872c6fd2807SJeff Garzik 			/* PIO data out protocol.
4873c6fd2807SJeff Garzik 			 * send first data block.
4874c6fd2807SJeff Garzik 			 */
4875c6fd2807SJeff Garzik 
4876c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
4877c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
4878c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
4879c6fd2807SJeff Garzik 			 */
4880c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
4881c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4882c6fd2807SJeff Garzik 			ata_altstatus(ap); /* flush */
4883c6fd2807SJeff Garzik 		} else
4884c6fd2807SJeff Garzik 			/* send CDB */
4885c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
4886c6fd2807SJeff Garzik 
4887c6fd2807SJeff Garzik 		if (in_wq)
4888c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4889c6fd2807SJeff Garzik 
4890c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
4891c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
4892c6fd2807SJeff Garzik 		 */
4893c6fd2807SJeff Garzik 		break;
4894c6fd2807SJeff Garzik 
4895c6fd2807SJeff Garzik 	case HSM_ST:
4896c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
4897c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4898c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
4899c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
4900c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
4901c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
4902c6fd2807SJeff Garzik 				 */
4903c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
4904c6fd2807SJeff Garzik 				goto fsm_start;
4905c6fd2807SJeff Garzik 			}
4906c6fd2807SJeff Garzik 
4907c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
4908c6fd2807SJeff Garzik 			 * when it finds something wrong.
4909c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
4910c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
4911c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
4912c6fd2807SJeff Garzik 			 */
4913c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
491444877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
491544877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
491644877b4eSTejun Heo 						status);
4917c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4918c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4919c6fd2807SJeff Garzik 				goto fsm_start;
4920c6fd2807SJeff Garzik 			}
4921c6fd2807SJeff Garzik 
4922c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
4923c6fd2807SJeff Garzik 
4924c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4925c6fd2807SJeff Garzik 				/* bad ireason reported by device */
4926c6fd2807SJeff Garzik 				goto fsm_start;
4927c6fd2807SJeff Garzik 
4928c6fd2807SJeff Garzik 		} else {
4929c6fd2807SJeff Garzik 			/* ATA PIO protocol */
4930c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
4931c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
4932c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
4933c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
4934c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
4935c6fd2807SJeff Garzik 				else
493655a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
493755a8e2c8STejun Heo 					 * Phantom devices also trigger this
493855a8e2c8STejun Heo 					 * condition.  Mark hint.
493955a8e2c8STejun Heo 					 */
494055a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
494155a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
4942c6fd2807SJeff Garzik 
4943c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4944c6fd2807SJeff Garzik 				goto fsm_start;
4945c6fd2807SJeff Garzik 			}
4946c6fd2807SJeff Garzik 
4947c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
4948c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
4949c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
4950c6fd2807SJeff Garzik 			 * block of junk data before changing the
4951c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
4952c6fd2807SJeff Garzik 			 *
4953c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
4954c6fd2807SJeff Garzik 			 * sense since the data block has been
4955c6fd2807SJeff Garzik 			 * transferred to the device.
4956c6fd2807SJeff Garzik 			 */
4957c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4958c6fd2807SJeff Garzik 				/* data might be corrputed */
4959c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4960c6fd2807SJeff Garzik 
4961c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4962c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
4963c6fd2807SJeff Garzik 					ata_altstatus(ap);
4964c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
4965c6fd2807SJeff Garzik 				}
4966c6fd2807SJeff Garzik 
4967c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
4968c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
4969c6fd2807SJeff Garzik 
4970c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
4971c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
4972c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
4973c6fd2807SJeff Garzik 				 */
4974c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4975c6fd2807SJeff Garzik 				goto fsm_start;
4976c6fd2807SJeff Garzik 			}
4977c6fd2807SJeff Garzik 
4978c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4979c6fd2807SJeff Garzik 
4980c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
4981c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4982c6fd2807SJeff Garzik 				/* all data read */
4983c6fd2807SJeff Garzik 				ata_altstatus(ap);
4984c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
4985c6fd2807SJeff Garzik 				goto fsm_start;
4986c6fd2807SJeff Garzik 			}
4987c6fd2807SJeff Garzik 		}
4988c6fd2807SJeff Garzik 
4989c6fd2807SJeff Garzik 		ata_altstatus(ap); /* flush */
4990c6fd2807SJeff Garzik 		poll_next = 1;
4991c6fd2807SJeff Garzik 		break;
4992c6fd2807SJeff Garzik 
4993c6fd2807SJeff Garzik 	case HSM_ST_LAST:
4994c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
4995c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
4996c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4997c6fd2807SJeff Garzik 			goto fsm_start;
4998c6fd2807SJeff Garzik 		}
4999c6fd2807SJeff Garzik 
5000c6fd2807SJeff Garzik 		/* no more data to transfer */
5001c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
500244877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5003c6fd2807SJeff Garzik 
5004c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5005c6fd2807SJeff Garzik 
5006c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5009c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5010c6fd2807SJeff Garzik 
5011c6fd2807SJeff Garzik 		poll_next = 0;
5012c6fd2807SJeff Garzik 		break;
5013c6fd2807SJeff Garzik 
5014c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5015c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5016c6fd2807SJeff Garzik 		 * know what's wrong and recover
5017c6fd2807SJeff Garzik 		 */
5018c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5019c6fd2807SJeff Garzik 
5020c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5021c6fd2807SJeff Garzik 
5022c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5023c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5024c6fd2807SJeff Garzik 
5025c6fd2807SJeff Garzik 		poll_next = 0;
5026c6fd2807SJeff Garzik 		break;
5027c6fd2807SJeff Garzik 	default:
5028c6fd2807SJeff Garzik 		poll_next = 0;
5029c6fd2807SJeff Garzik 		BUG();
5030c6fd2807SJeff Garzik 	}
5031c6fd2807SJeff Garzik 
5032c6fd2807SJeff Garzik 	return poll_next;
5033c6fd2807SJeff Garzik }
5034c6fd2807SJeff Garzik 
503565f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5036c6fd2807SJeff Garzik {
503765f27f38SDavid Howells 	struct ata_port *ap =
503865f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
503965f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5040c6fd2807SJeff Garzik 	u8 status;
5041c6fd2807SJeff Garzik 	int poll_next;
5042c6fd2807SJeff Garzik 
5043c6fd2807SJeff Garzik fsm_start:
5044c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5045c6fd2807SJeff Garzik 
5046c6fd2807SJeff Garzik 	/*
5047c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5048c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5049c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5050c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5051c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5052c6fd2807SJeff Garzik 	 */
5053c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5054c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5055c6fd2807SJeff Garzik 		msleep(2);
5056c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5057c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5058c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5059c6fd2807SJeff Garzik 			return;
5060c6fd2807SJeff Garzik 		}
5061c6fd2807SJeff Garzik 	}
5062c6fd2807SJeff Garzik 
5063c6fd2807SJeff Garzik 	/* move the HSM */
5064c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5065c6fd2807SJeff Garzik 
5066c6fd2807SJeff Garzik 	/* another command or interrupt handler
5067c6fd2807SJeff Garzik 	 * may be running at this point.
5068c6fd2807SJeff Garzik 	 */
5069c6fd2807SJeff Garzik 	if (poll_next)
5070c6fd2807SJeff Garzik 		goto fsm_start;
5071c6fd2807SJeff Garzik }
5072c6fd2807SJeff Garzik 
5073c6fd2807SJeff Garzik /**
5074c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5075c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5076c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5077c6fd2807SJeff Garzik  *
5078c6fd2807SJeff Garzik  *	LOCKING:
5079c6fd2807SJeff Garzik  *	None.
5080c6fd2807SJeff Garzik  */
5081c6fd2807SJeff Garzik 
5082c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5083c6fd2807SJeff Garzik {
5084c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5085c6fd2807SJeff Garzik 	unsigned int i;
5086c6fd2807SJeff Garzik 
5087c6fd2807SJeff Garzik 	/* no command while frozen */
5088c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5089c6fd2807SJeff Garzik 		return NULL;
5090c6fd2807SJeff Garzik 
5091c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5092c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5093c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5094c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5095c6fd2807SJeff Garzik 			break;
5096c6fd2807SJeff Garzik 		}
5097c6fd2807SJeff Garzik 
5098c6fd2807SJeff Garzik 	if (qc)
5099c6fd2807SJeff Garzik 		qc->tag = i;
5100c6fd2807SJeff Garzik 
5101c6fd2807SJeff Garzik 	return qc;
5102c6fd2807SJeff Garzik }
5103c6fd2807SJeff Garzik 
5104c6fd2807SJeff Garzik /**
5105c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5106c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5107c6fd2807SJeff Garzik  *
5108c6fd2807SJeff Garzik  *	LOCKING:
5109c6fd2807SJeff Garzik  *	None.
5110c6fd2807SJeff Garzik  */
5111c6fd2807SJeff Garzik 
5112c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5113c6fd2807SJeff Garzik {
5114c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5115c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5116c6fd2807SJeff Garzik 
5117c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5118c6fd2807SJeff Garzik 	if (qc) {
5119c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5120c6fd2807SJeff Garzik 		qc->ap = ap;
5121c6fd2807SJeff Garzik 		qc->dev = dev;
5122c6fd2807SJeff Garzik 
5123c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5124c6fd2807SJeff Garzik 	}
5125c6fd2807SJeff Garzik 
5126c6fd2807SJeff Garzik 	return qc;
5127c6fd2807SJeff Garzik }
5128c6fd2807SJeff Garzik 
5129c6fd2807SJeff Garzik /**
5130c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5131c6fd2807SJeff Garzik  *	@qc: Command to complete
5132c6fd2807SJeff Garzik  *
5133c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5134c6fd2807SJeff Garzik  *	in case something prevents using it.
5135c6fd2807SJeff Garzik  *
5136c6fd2807SJeff Garzik  *	LOCKING:
5137cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5138c6fd2807SJeff Garzik  */
5139c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5140c6fd2807SJeff Garzik {
5141c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5142c6fd2807SJeff Garzik 	unsigned int tag;
5143c6fd2807SJeff Garzik 
5144c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5145c6fd2807SJeff Garzik 
5146c6fd2807SJeff Garzik 	qc->flags = 0;
5147c6fd2807SJeff Garzik 	tag = qc->tag;
5148c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5149c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5150c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5151c6fd2807SJeff Garzik 	}
5152c6fd2807SJeff Garzik }
5153c6fd2807SJeff Garzik 
5154c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5155c6fd2807SJeff Garzik {
5156c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5157c6fd2807SJeff Garzik 
5158c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5159c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5160c6fd2807SJeff Garzik 
5161c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5162c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5163c6fd2807SJeff Garzik 
5164c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5165c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ)
5166c6fd2807SJeff Garzik 		ap->sactive &= ~(1 << qc->tag);
5167c6fd2807SJeff Garzik 	else
5168c6fd2807SJeff Garzik 		ap->active_tag = ATA_TAG_POISON;
5169c6fd2807SJeff Garzik 
5170c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5171c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5172c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5173c6fd2807SJeff Garzik 	 */
5174c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5175c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5176c6fd2807SJeff Garzik 
5177c6fd2807SJeff Garzik 	/* call completion callback */
5178c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5179c6fd2807SJeff Garzik }
5180c6fd2807SJeff Garzik 
518139599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
518239599a53STejun Heo {
518339599a53STejun Heo 	struct ata_port *ap = qc->ap;
518439599a53STejun Heo 
518539599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
51864742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
518739599a53STejun Heo }
518839599a53STejun Heo 
5189c6fd2807SJeff Garzik /**
5190c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5191c6fd2807SJeff Garzik  *	@qc: Command to complete
5192c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5193c6fd2807SJeff Garzik  *
5194c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5195c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5196c6fd2807SJeff Garzik  *
5197c6fd2807SJeff Garzik  *	LOCKING:
5198cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5199c6fd2807SJeff Garzik  */
5200c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5201c6fd2807SJeff Garzik {
5202c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5203c6fd2807SJeff Garzik 
5204c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5205c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5206c6fd2807SJeff Garzik 	 *
5207c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5208c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5209c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5210c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5211c6fd2807SJeff Garzik 	 *
5212c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5213c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5214c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5215c6fd2807SJeff Garzik 	 * taken care of.
5216c6fd2807SJeff Garzik 	 */
5217c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5218c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5219c6fd2807SJeff Garzik 
5220c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5221c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5222c6fd2807SJeff Garzik 
5223c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5224c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5225c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
522639599a53STejun Heo 				fill_result_tf(qc);
5227c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5228c6fd2807SJeff Garzik 				return;
5229c6fd2807SJeff Garzik 			}
5230c6fd2807SJeff Garzik 		}
5231c6fd2807SJeff Garzik 
5232c6fd2807SJeff Garzik 		/* read result TF if requested */
5233c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
523439599a53STejun Heo 			fill_result_tf(qc);
5235c6fd2807SJeff Garzik 
5236c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5237c6fd2807SJeff Garzik 	} else {
5238c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5239c6fd2807SJeff Garzik 			return;
5240c6fd2807SJeff Garzik 
5241c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5242c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
524339599a53STejun Heo 			fill_result_tf(qc);
5244c6fd2807SJeff Garzik 
5245c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5246c6fd2807SJeff Garzik 	}
5247c6fd2807SJeff Garzik }
5248c6fd2807SJeff Garzik 
5249c6fd2807SJeff Garzik /**
5250c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5251c6fd2807SJeff Garzik  *	@ap: port in question
5252c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5253c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5254c6fd2807SJeff Garzik  *
5255c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5256c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5257c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5258c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5259c6fd2807SJeff Garzik  *
5260c6fd2807SJeff Garzik  *	LOCKING:
5261cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5262c6fd2807SJeff Garzik  *
5263c6fd2807SJeff Garzik  *	RETURNS:
5264c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5265c6fd2807SJeff Garzik  */
5266c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5267c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5268c6fd2807SJeff Garzik {
5269c6fd2807SJeff Garzik 	int nr_done = 0;
5270c6fd2807SJeff Garzik 	u32 done_mask;
5271c6fd2807SJeff Garzik 	int i;
5272c6fd2807SJeff Garzik 
5273c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5274c6fd2807SJeff Garzik 
5275c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5276c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5277c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5278c6fd2807SJeff Garzik 		return -EINVAL;
5279c6fd2807SJeff Garzik 	}
5280c6fd2807SJeff Garzik 
5281c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5282c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5283c6fd2807SJeff Garzik 
5284c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5285c6fd2807SJeff Garzik 			continue;
5286c6fd2807SJeff Garzik 
5287c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5288c6fd2807SJeff Garzik 			if (finish_qc)
5289c6fd2807SJeff Garzik 				finish_qc(qc);
5290c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5291c6fd2807SJeff Garzik 			nr_done++;
5292c6fd2807SJeff Garzik 		}
5293c6fd2807SJeff Garzik 	}
5294c6fd2807SJeff Garzik 
5295c6fd2807SJeff Garzik 	return nr_done;
5296c6fd2807SJeff Garzik }
5297c6fd2807SJeff Garzik 
5298c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5299c6fd2807SJeff Garzik {
5300c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5301c6fd2807SJeff Garzik 
5302c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5303c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5304c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5305c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5306c6fd2807SJeff Garzik 		return 1;
5307c6fd2807SJeff Garzik 
5308c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5309c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5310c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5311c6fd2807SJeff Garzik 			return 1;
5312c6fd2807SJeff Garzik 
5313c6fd2807SJeff Garzik 		/* fall through */
5314c6fd2807SJeff Garzik 
5315c6fd2807SJeff Garzik 	default:
5316c6fd2807SJeff Garzik 		return 0;
5317c6fd2807SJeff Garzik 	}
5318c6fd2807SJeff Garzik 
5319c6fd2807SJeff Garzik 	/* never reached */
5320c6fd2807SJeff Garzik }
5321c6fd2807SJeff Garzik 
5322c6fd2807SJeff Garzik /**
5323c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5324c6fd2807SJeff Garzik  *	@qc: command to issue to device
5325c6fd2807SJeff Garzik  *
5326c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5327c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5328c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5329c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5330c6fd2807SJeff Garzik  *
5331c6fd2807SJeff Garzik  *	LOCKING:
5332cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5333c6fd2807SJeff Garzik  */
5334c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5335c6fd2807SJeff Garzik {
5336c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5337c6fd2807SJeff Garzik 
5338c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5339c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5340c6fd2807SJeff Garzik 	 * request ATAPI sense.
5341c6fd2807SJeff Garzik 	 */
5342c6fd2807SJeff Garzik 	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5343c6fd2807SJeff Garzik 
5344c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5345c6fd2807SJeff Garzik 		WARN_ON(ap->sactive & (1 << qc->tag));
5346c6fd2807SJeff Garzik 		ap->sactive |= 1 << qc->tag;
5347c6fd2807SJeff Garzik 	} else {
5348c6fd2807SJeff Garzik 		WARN_ON(ap->sactive);
5349c6fd2807SJeff Garzik 		ap->active_tag = qc->tag;
5350c6fd2807SJeff Garzik 	}
5351c6fd2807SJeff Garzik 
5352c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5353c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5354c6fd2807SJeff Garzik 
5355c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5356c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5357c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5358c6fd2807SJeff Garzik 				goto sg_err;
5359c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5360c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5361c6fd2807SJeff Garzik 				goto sg_err;
5362c6fd2807SJeff Garzik 		}
5363c6fd2807SJeff Garzik 	} else {
5364c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5365c6fd2807SJeff Garzik 	}
5366c6fd2807SJeff Garzik 
5367c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5368c6fd2807SJeff Garzik 
5369c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5370c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5371c6fd2807SJeff Garzik 		goto err;
5372c6fd2807SJeff Garzik 	return;
5373c6fd2807SJeff Garzik 
5374c6fd2807SJeff Garzik sg_err:
5375c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5376c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5377c6fd2807SJeff Garzik err:
5378c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5379c6fd2807SJeff Garzik }
5380c6fd2807SJeff Garzik 
5381c6fd2807SJeff Garzik /**
5382c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5383c6fd2807SJeff Garzik  *	@qc: command to issue to device
5384c6fd2807SJeff Garzik  *
5385c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5386c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5387c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5388c6fd2807SJeff Garzik  *	is slightly different.
5389c6fd2807SJeff Garzik  *
5390c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5391c6fd2807SJeff Garzik  *
5392c6fd2807SJeff Garzik  *	LOCKING:
5393cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5394c6fd2807SJeff Garzik  *
5395c6fd2807SJeff Garzik  *	RETURNS:
5396c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5397c6fd2807SJeff Garzik  */
5398c6fd2807SJeff Garzik 
5399c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5400c6fd2807SJeff Garzik {
5401c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5402c6fd2807SJeff Garzik 
5403c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5404c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5405c6fd2807SJeff Garzik 	 */
5406c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5407c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5408c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5409e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5410c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5411c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5412c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5413c6fd2807SJeff Garzik 			break;
5414c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5415c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5416c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5417c6fd2807SJeff Garzik 				BUG();
5418c6fd2807SJeff Garzik 			break;
5419c6fd2807SJeff Garzik 		default:
5420c6fd2807SJeff Garzik 			break;
5421c6fd2807SJeff Garzik 		}
5422c6fd2807SJeff Garzik 	}
5423c6fd2807SJeff Garzik 
5424c6fd2807SJeff Garzik 	/* select the device */
5425c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5426c6fd2807SJeff Garzik 
5427c6fd2807SJeff Garzik 	/* start the command */
5428c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5429c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5430c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5431c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5432c6fd2807SJeff Garzik 
5433c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5434c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5435c6fd2807SJeff Garzik 
5436c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5437c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5438c6fd2807SJeff Garzik 
5439c6fd2807SJeff Garzik 		break;
5440c6fd2807SJeff Garzik 
5441c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5442c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5443c6fd2807SJeff Garzik 
5444c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5445c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5446c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5447c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5448c6fd2807SJeff Garzik 		break;
5449c6fd2807SJeff Garzik 
5450c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5451c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5452c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5453c6fd2807SJeff Garzik 
5454c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5455c6fd2807SJeff Garzik 
5456c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5457c6fd2807SJeff Garzik 			/* PIO data out protocol */
5458c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5459c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5460c6fd2807SJeff Garzik 
5461c6fd2807SJeff Garzik 			/* always send first data block using
5462c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5463c6fd2807SJeff Garzik 			 */
5464c6fd2807SJeff Garzik 		} else {
5465c6fd2807SJeff Garzik 			/* PIO data in protocol */
5466c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5467c6fd2807SJeff Garzik 
5468c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5469c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5470c6fd2807SJeff Garzik 
5471c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5472c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5473c6fd2807SJeff Garzik 			 */
5474c6fd2807SJeff Garzik 		}
5475c6fd2807SJeff Garzik 
5476c6fd2807SJeff Garzik 		break;
5477c6fd2807SJeff Garzik 
5478c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5479c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5480c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5481c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5482c6fd2807SJeff Garzik 
5483c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5484c6fd2807SJeff Garzik 
5485c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5486c6fd2807SJeff Garzik 
5487c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5488c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5489c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5490c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5491c6fd2807SJeff Garzik 		break;
5492c6fd2807SJeff Garzik 
5493c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5494c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5495c6fd2807SJeff Garzik 
5496c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5497c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5498c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5499c6fd2807SJeff Garzik 
5500c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5501c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5502c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5503c6fd2807SJeff Garzik 		break;
5504c6fd2807SJeff Garzik 
5505c6fd2807SJeff Garzik 	default:
5506c6fd2807SJeff Garzik 		WARN_ON(1);
5507c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5508c6fd2807SJeff Garzik 	}
5509c6fd2807SJeff Garzik 
5510c6fd2807SJeff Garzik 	return 0;
5511c6fd2807SJeff Garzik }
5512c6fd2807SJeff Garzik 
5513c6fd2807SJeff Garzik /**
5514c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5515c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5516c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5517c6fd2807SJeff Garzik  *
5518c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5519c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5520c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5521c6fd2807SJeff Garzik  *
5522c6fd2807SJeff Garzik  *	LOCKING:
5523cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5524c6fd2807SJeff Garzik  *
5525c6fd2807SJeff Garzik  *	RETURNS:
5526c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5527c6fd2807SJeff Garzik  */
5528c6fd2807SJeff Garzik 
5529c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap,
5530c6fd2807SJeff Garzik 				   struct ata_queued_cmd *qc)
5531c6fd2807SJeff Garzik {
5532ea54763fSTejun Heo 	struct ata_eh_info *ehi = &ap->eh_info;
5533c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5534c6fd2807SJeff Garzik 
5535c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
553644877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5537c6fd2807SJeff Garzik 
5538c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5539c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5540c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5541c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5542c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5543c6fd2807SJeff Garzik 		 */
5544c6fd2807SJeff Garzik 
5545c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5546c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5547c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5548c6fd2807SJeff Garzik 		 */
5549c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5550c6fd2807SJeff Garzik 			goto idle_irq;
5551c6fd2807SJeff Garzik 		break;
5552c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5553c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5554c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5555c6fd2807SJeff Garzik 			/* check status of DMA engine */
5556c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
555744877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
555844877b4eSTejun Heo 				ap->print_id, host_stat);
5559c6fd2807SJeff Garzik 
5560c6fd2807SJeff Garzik 			/* if it's not our irq... */
5561c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5562c6fd2807SJeff Garzik 				goto idle_irq;
5563c6fd2807SJeff Garzik 
5564c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5565c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5566c6fd2807SJeff Garzik 
5567c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5568c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5569c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5570c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5571c6fd2807SJeff Garzik 			}
5572c6fd2807SJeff Garzik 		}
5573c6fd2807SJeff Garzik 		break;
5574c6fd2807SJeff Garzik 	case HSM_ST:
5575c6fd2807SJeff Garzik 		break;
5576c6fd2807SJeff Garzik 	default:
5577c6fd2807SJeff Garzik 		goto idle_irq;
5578c6fd2807SJeff Garzik 	}
5579c6fd2807SJeff Garzik 
5580c6fd2807SJeff Garzik 	/* check altstatus */
5581c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5582c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5583c6fd2807SJeff Garzik 		goto idle_irq;
5584c6fd2807SJeff Garzik 
5585c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5586c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5587c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5588c6fd2807SJeff Garzik 		goto idle_irq;
5589c6fd2807SJeff Garzik 
5590c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5591c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5592c6fd2807SJeff Garzik 
5593c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5594ea54763fSTejun Heo 
5595ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5596ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5597ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5598ea54763fSTejun Heo 
5599c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5600c6fd2807SJeff Garzik 
5601c6fd2807SJeff Garzik idle_irq:
5602c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5603c6fd2807SJeff Garzik 
5604c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5605c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
560683625006SAkira Iguchi 		ap->ops->irq_ack(ap, 0); /* debug trap */
5607c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5608c6fd2807SJeff Garzik 		return 1;
5609c6fd2807SJeff Garzik 	}
5610c6fd2807SJeff Garzik #endif
5611c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5612c6fd2807SJeff Garzik }
5613c6fd2807SJeff Garzik 
5614c6fd2807SJeff Garzik /**
5615c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
5616c6fd2807SJeff Garzik  *	@irq: irq line (unused)
5617cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
5618c6fd2807SJeff Garzik  *
5619c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
5620c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
5621c6fd2807SJeff Garzik  *
5622c6fd2807SJeff Garzik  *	LOCKING:
5623cca3974eSJeff Garzik  *	Obtains host lock during operation.
5624c6fd2807SJeff Garzik  *
5625c6fd2807SJeff Garzik  *	RETURNS:
5626c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
5627c6fd2807SJeff Garzik  */
5628c6fd2807SJeff Garzik 
56297d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance)
5630c6fd2807SJeff Garzik {
5631cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
5632c6fd2807SJeff Garzik 	unsigned int i;
5633c6fd2807SJeff Garzik 	unsigned int handled = 0;
5634c6fd2807SJeff Garzik 	unsigned long flags;
5635c6fd2807SJeff Garzik 
5636c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5637cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
5638c6fd2807SJeff Garzik 
5639cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5640c6fd2807SJeff Garzik 		struct ata_port *ap;
5641c6fd2807SJeff Garzik 
5642cca3974eSJeff Garzik 		ap = host->ports[i];
5643c6fd2807SJeff Garzik 		if (ap &&
5644c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5645c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
5646c6fd2807SJeff Garzik 
5647c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, ap->active_tag);
5648c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5649c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5650c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
5651c6fd2807SJeff Garzik 		}
5652c6fd2807SJeff Garzik 	}
5653c6fd2807SJeff Garzik 
5654cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
5655c6fd2807SJeff Garzik 
5656c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
5657c6fd2807SJeff Garzik }
5658c6fd2807SJeff Garzik 
5659c6fd2807SJeff Garzik /**
5660c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
5661c6fd2807SJeff Garzik  *	@ap: ATA port to test SCR accessibility for
5662c6fd2807SJeff Garzik  *
5663c6fd2807SJeff Garzik  *	Test whether SCRs are accessible for @ap.
5664c6fd2807SJeff Garzik  *
5665c6fd2807SJeff Garzik  *	LOCKING:
5666c6fd2807SJeff Garzik  *	None.
5667c6fd2807SJeff Garzik  *
5668c6fd2807SJeff Garzik  *	RETURNS:
5669c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
5670c6fd2807SJeff Garzik  */
5671c6fd2807SJeff Garzik int sata_scr_valid(struct ata_port *ap)
5672c6fd2807SJeff Garzik {
5673c6fd2807SJeff Garzik 	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5674c6fd2807SJeff Garzik }
5675c6fd2807SJeff Garzik 
5676c6fd2807SJeff Garzik /**
5677c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
5678c6fd2807SJeff Garzik  *	@ap: ATA port to read SCR for
5679c6fd2807SJeff Garzik  *	@reg: SCR to read
5680c6fd2807SJeff Garzik  *	@val: Place to store read value
5681c6fd2807SJeff Garzik  *
5682c6fd2807SJeff Garzik  *	Read SCR register @reg of @ap into *@val.  This function is
5683c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5684c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5685c6fd2807SJeff Garzik  *
5686c6fd2807SJeff Garzik  *	LOCKING:
5687c6fd2807SJeff Garzik  *	None.
5688c6fd2807SJeff Garzik  *
5689c6fd2807SJeff Garzik  *	RETURNS:
5690c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5691c6fd2807SJeff Garzik  */
5692c6fd2807SJeff Garzik int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5693c6fd2807SJeff Garzik {
5694c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5695c6fd2807SJeff Garzik 		*val = ap->ops->scr_read(ap, reg);
5696c6fd2807SJeff Garzik 		return 0;
5697c6fd2807SJeff Garzik 	}
5698c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5699c6fd2807SJeff Garzik }
5700c6fd2807SJeff Garzik 
5701c6fd2807SJeff Garzik /**
5702c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
5703c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5704c6fd2807SJeff Garzik  *	@reg: SCR to write
5705c6fd2807SJeff Garzik  *	@val: value to write
5706c6fd2807SJeff Garzik  *
5707c6fd2807SJeff Garzik  *	Write @val to SCR register @reg of @ap.  This function is
5708c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5709c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5710c6fd2807SJeff Garzik  *
5711c6fd2807SJeff Garzik  *	LOCKING:
5712c6fd2807SJeff Garzik  *	None.
5713c6fd2807SJeff Garzik  *
5714c6fd2807SJeff Garzik  *	RETURNS:
5715c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5716c6fd2807SJeff Garzik  */
5717c6fd2807SJeff Garzik int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5718c6fd2807SJeff Garzik {
5719c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5720c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5721c6fd2807SJeff Garzik 		return 0;
5722c6fd2807SJeff Garzik 	}
5723c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5724c6fd2807SJeff Garzik }
5725c6fd2807SJeff Garzik 
5726c6fd2807SJeff Garzik /**
5727c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
5728c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5729c6fd2807SJeff Garzik  *	@reg: SCR to write
5730c6fd2807SJeff Garzik  *	@val: value to write
5731c6fd2807SJeff Garzik  *
5732c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
5733c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
5734c6fd2807SJeff Garzik  *
5735c6fd2807SJeff Garzik  *	LOCKING:
5736c6fd2807SJeff Garzik  *	None.
5737c6fd2807SJeff Garzik  *
5738c6fd2807SJeff Garzik  *	RETURNS:
5739c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5740c6fd2807SJeff Garzik  */
5741c6fd2807SJeff Garzik int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5742c6fd2807SJeff Garzik {
5743c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5744c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5745c6fd2807SJeff Garzik 		ap->ops->scr_read(ap, reg);
5746c6fd2807SJeff Garzik 		return 0;
5747c6fd2807SJeff Garzik 	}
5748c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5749c6fd2807SJeff Garzik }
5750c6fd2807SJeff Garzik 
5751c6fd2807SJeff Garzik /**
5752c6fd2807SJeff Garzik  *	ata_port_online - test whether the given port is online
5753c6fd2807SJeff Garzik  *	@ap: ATA port to test
5754c6fd2807SJeff Garzik  *
5755c6fd2807SJeff Garzik  *	Test whether @ap is online.  Note that this function returns 0
5756c6fd2807SJeff Garzik  *	if online status of @ap cannot be obtained, so
5757c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5758c6fd2807SJeff Garzik  *
5759c6fd2807SJeff Garzik  *	LOCKING:
5760c6fd2807SJeff Garzik  *	None.
5761c6fd2807SJeff Garzik  *
5762c6fd2807SJeff Garzik  *	RETURNS:
5763c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
5764c6fd2807SJeff Garzik  */
5765c6fd2807SJeff Garzik int ata_port_online(struct ata_port *ap)
5766c6fd2807SJeff Garzik {
5767c6fd2807SJeff Garzik 	u32 sstatus;
5768c6fd2807SJeff Garzik 
5769c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5770c6fd2807SJeff Garzik 		return 1;
5771c6fd2807SJeff Garzik 	return 0;
5772c6fd2807SJeff Garzik }
5773c6fd2807SJeff Garzik 
5774c6fd2807SJeff Garzik /**
5775c6fd2807SJeff Garzik  *	ata_port_offline - test whether the given port is offline
5776c6fd2807SJeff Garzik  *	@ap: ATA port to test
5777c6fd2807SJeff Garzik  *
5778c6fd2807SJeff Garzik  *	Test whether @ap is offline.  Note that this function returns
5779c6fd2807SJeff Garzik  *	0 if offline status of @ap cannot be obtained, so
5780c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5781c6fd2807SJeff Garzik  *
5782c6fd2807SJeff Garzik  *	LOCKING:
5783c6fd2807SJeff Garzik  *	None.
5784c6fd2807SJeff Garzik  *
5785c6fd2807SJeff Garzik  *	RETURNS:
5786c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
5787c6fd2807SJeff Garzik  */
5788c6fd2807SJeff Garzik int ata_port_offline(struct ata_port *ap)
5789c6fd2807SJeff Garzik {
5790c6fd2807SJeff Garzik 	u32 sstatus;
5791c6fd2807SJeff Garzik 
5792c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5793c6fd2807SJeff Garzik 		return 1;
5794c6fd2807SJeff Garzik 	return 0;
5795c6fd2807SJeff Garzik }
5796c6fd2807SJeff Garzik 
5797c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
5798c6fd2807SJeff Garzik {
5799c6fd2807SJeff Garzik 	unsigned int err_mask;
5800c6fd2807SJeff Garzik 	u8 cmd;
5801c6fd2807SJeff Garzik 
5802c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
5803c6fd2807SJeff Garzik 		return 0;
5804c6fd2807SJeff Garzik 
58056fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5806c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
5807c6fd2807SJeff Garzik 	else
5808c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
5809c6fd2807SJeff Garzik 
5810c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
5811c6fd2807SJeff Garzik 	if (err_mask) {
5812c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5813c6fd2807SJeff Garzik 		return -EIO;
5814c6fd2807SJeff Garzik 	}
5815c6fd2807SJeff Garzik 
5816c6fd2807SJeff Garzik 	return 0;
5817c6fd2807SJeff Garzik }
5818c6fd2807SJeff Garzik 
58196ffa01d8STejun Heo #ifdef CONFIG_PM
5820cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5821cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
5822cca3974eSJeff Garzik 			       int wait)
5823c6fd2807SJeff Garzik {
5824c6fd2807SJeff Garzik 	unsigned long flags;
5825c6fd2807SJeff Garzik 	int i, rc;
5826c6fd2807SJeff Garzik 
5827cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5828cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
5829c6fd2807SJeff Garzik 
5830c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
5831c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
5832c6fd2807SJeff Garzik 		 */
5833c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5834c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5835c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5836c6fd2807SJeff Garzik 		}
5837c6fd2807SJeff Garzik 
5838c6fd2807SJeff Garzik 		/* request PM ops to EH */
5839c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
5840c6fd2807SJeff Garzik 
5841c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
5842c6fd2807SJeff Garzik 		if (wait) {
5843c6fd2807SJeff Garzik 			rc = 0;
5844c6fd2807SJeff Garzik 			ap->pm_result = &rc;
5845c6fd2807SJeff Garzik 		}
5846c6fd2807SJeff Garzik 
5847c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5848c6fd2807SJeff Garzik 		ap->eh_info.action |= action;
5849c6fd2807SJeff Garzik 		ap->eh_info.flags |= ehi_flags;
5850c6fd2807SJeff Garzik 
5851c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
5852c6fd2807SJeff Garzik 
5853c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
5854c6fd2807SJeff Garzik 
5855c6fd2807SJeff Garzik 		/* wait and check result */
5856c6fd2807SJeff Garzik 		if (wait) {
5857c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5858c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5859c6fd2807SJeff Garzik 			if (rc)
5860c6fd2807SJeff Garzik 				return rc;
5861c6fd2807SJeff Garzik 		}
5862c6fd2807SJeff Garzik 	}
5863c6fd2807SJeff Garzik 
5864c6fd2807SJeff Garzik 	return 0;
5865c6fd2807SJeff Garzik }
5866c6fd2807SJeff Garzik 
5867c6fd2807SJeff Garzik /**
5868cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
5869cca3974eSJeff Garzik  *	@host: host to suspend
5870c6fd2807SJeff Garzik  *	@mesg: PM message
5871c6fd2807SJeff Garzik  *
5872cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
5873c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
5874c6fd2807SJeff Garzik  *	to finish.
5875c6fd2807SJeff Garzik  *
5876c6fd2807SJeff Garzik  *	LOCKING:
5877c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5878c6fd2807SJeff Garzik  *
5879c6fd2807SJeff Garzik  *	RETURNS:
5880c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
5881c6fd2807SJeff Garzik  */
5882cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5883c6fd2807SJeff Garzik {
58849666f400STejun Heo 	int rc;
5885c6fd2807SJeff Garzik 
5886cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
58879666f400STejun Heo 	if (rc == 0)
5888cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
5889c6fd2807SJeff Garzik 	return rc;
5890c6fd2807SJeff Garzik }
5891c6fd2807SJeff Garzik 
5892c6fd2807SJeff Garzik /**
5893cca3974eSJeff Garzik  *	ata_host_resume - resume host
5894cca3974eSJeff Garzik  *	@host: host to resume
5895c6fd2807SJeff Garzik  *
5896cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
5897c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
5898c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
5899c6fd2807SJeff Garzik  *
5900c6fd2807SJeff Garzik  *	LOCKING:
5901c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5902c6fd2807SJeff Garzik  */
5903cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
5904c6fd2807SJeff Garzik {
5905cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5906c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5907cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
5908c6fd2807SJeff Garzik }
59096ffa01d8STejun Heo #endif
5910c6fd2807SJeff Garzik 
5911c6fd2807SJeff Garzik /**
5912c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
5913c6fd2807SJeff Garzik  *	@ap: Port to initialize
5914c6fd2807SJeff Garzik  *
5915c6fd2807SJeff Garzik  *	Called just after data structures for each port are
5916c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
5917c6fd2807SJeff Garzik  *
5918c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
5919c6fd2807SJeff Garzik  *
5920c6fd2807SJeff Garzik  *	LOCKING:
5921c6fd2807SJeff Garzik  *	Inherited from caller.
5922c6fd2807SJeff Garzik  */
5923c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
5924c6fd2807SJeff Garzik {
5925c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
5926c6fd2807SJeff Garzik 	int rc;
5927c6fd2807SJeff Garzik 
5928f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5929f0d36efdSTejun Heo 				      GFP_KERNEL);
5930c6fd2807SJeff Garzik 	if (!ap->prd)
5931c6fd2807SJeff Garzik 		return -ENOMEM;
5932c6fd2807SJeff Garzik 
5933c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
5934f0d36efdSTejun Heo 	if (rc)
5935c6fd2807SJeff Garzik 		return rc;
5936c6fd2807SJeff Garzik 
5937f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5938f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
5939c6fd2807SJeff Garzik 	return 0;
5940c6fd2807SJeff Garzik }
5941c6fd2807SJeff Garzik 
5942c6fd2807SJeff Garzik /**
5943c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
5944c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
5945c6fd2807SJeff Garzik  *
5946c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
5947c6fd2807SJeff Garzik  *
5948c6fd2807SJeff Garzik  *	LOCKING:
5949c6fd2807SJeff Garzik  *	Inherited from caller.
5950c6fd2807SJeff Garzik  */
5951c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
5952c6fd2807SJeff Garzik {
5953c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5954c6fd2807SJeff Garzik 	unsigned long flags;
5955c6fd2807SJeff Garzik 
5956c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
5957c6fd2807SJeff Garzik 	ap->sata_spd_limit = ap->hw_sata_spd_limit;
5958c6fd2807SJeff Garzik 
5959c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
5960c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
5961cca3974eSJeff Garzik 	 * host lock.
5962c6fd2807SJeff Garzik 	 */
5963c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5964c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5965c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
5966c6fd2807SJeff Garzik 
5967c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5968c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5969c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
5970c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
5971c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
5972c6fd2807SJeff Garzik }
5973c6fd2807SJeff Garzik 
5974c6fd2807SJeff Garzik /**
5975f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
5976f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
5977c6fd2807SJeff Garzik  *
5978f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
5979f3187195STejun Heo  *
5980f3187195STejun Heo  *	RETURNS:
5981f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
5982c6fd2807SJeff Garzik  *
5983c6fd2807SJeff Garzik  *	LOCKING:
5984f3187195STejun Heo  *	Inherited from calling layer (may sleep).
5985c6fd2807SJeff Garzik  */
5986f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
5987c6fd2807SJeff Garzik {
5988f3187195STejun Heo 	struct ata_port *ap;
5989c6fd2807SJeff Garzik 	unsigned int i;
5990c6fd2807SJeff Garzik 
5991f3187195STejun Heo 	DPRINTK("ENTER\n");
5992f3187195STejun Heo 
5993f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5994f3187195STejun Heo 	if (!ap)
5995f3187195STejun Heo 		return NULL;
5996f3187195STejun Heo 
5997f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5998cca3974eSJeff Garzik 	ap->lock = &host->lock;
5999c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6000f3187195STejun Heo 	ap->print_id = -1;
6001c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6002cca3974eSJeff Garzik 	ap->host = host;
6003f3187195STejun Heo 	ap->dev = host->dev;
6004f3187195STejun Heo 
6005c6fd2807SJeff Garzik 	ap->hw_sata_spd_limit = UINT_MAX;
6006c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
6007c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6008c6fd2807SJeff Garzik 
6009c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6010c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6011c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6012c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6013c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6014c6fd2807SJeff Garzik #else
6015c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6016c6fd2807SJeff Garzik #endif
6017c6fd2807SJeff Garzik 
601865f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
601965f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
602065f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6021c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6022c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
6023c6fd2807SJeff Garzik 
6024c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6025c6fd2807SJeff Garzik 
6026c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
6027c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
6028c6fd2807SJeff Garzik 		dev->ap = ap;
6029c6fd2807SJeff Garzik 		dev->devno = i;
6030c6fd2807SJeff Garzik 		ata_dev_init(dev);
6031c6fd2807SJeff Garzik 	}
6032c6fd2807SJeff Garzik 
6033c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6034c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6035c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6036c6fd2807SJeff Garzik #endif
6037c6fd2807SJeff Garzik 	return ap;
6038c6fd2807SJeff Garzik }
6039c6fd2807SJeff Garzik 
6040f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6041f0d36efdSTejun Heo {
6042f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6043f0d36efdSTejun Heo 	int i;
6044f0d36efdSTejun Heo 
6045f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6046f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6047f0d36efdSTejun Heo 
6048ecef7253STejun Heo 		if (!ap)
6049ecef7253STejun Heo 			continue;
6050ecef7253STejun Heo 
6051ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6052f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6053f0d36efdSTejun Heo 	}
6054f0d36efdSTejun Heo 
6055ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6056f0d36efdSTejun Heo 		host->ops->host_stop(host);
60571aa56ccaSTejun Heo 
60581aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
60591aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
60601aa506e4STejun Heo 
60614911487aSTejun Heo 		if (!ap)
60624911487aSTejun Heo 			continue;
60634911487aSTejun Heo 
60644911487aSTejun Heo 		if (ap->scsi_host)
60651aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
60661aa506e4STejun Heo 
60674911487aSTejun Heo 		kfree(ap);
60681aa506e4STejun Heo 		host->ports[i] = NULL;
60691aa506e4STejun Heo 	}
60701aa506e4STejun Heo 
60711aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6072f0d36efdSTejun Heo }
6073f0d36efdSTejun Heo 
6074c6fd2807SJeff Garzik /**
6075f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6076f3187195STejun Heo  *	@dev: generic device this host is associated with
6077f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6078f3187195STejun Heo  *
6079f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6080f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6081f3187195STejun Heo  *	attaches it using ata_host_register().
6082f3187195STejun Heo  *
6083f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6084f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6085f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6086f3187195STejun Heo  *	ports will be automatically freed on registration.
6087f3187195STejun Heo  *
6088f3187195STejun Heo  *	RETURNS:
6089f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6090f3187195STejun Heo  *
6091f3187195STejun Heo  *	LOCKING:
6092f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6093f3187195STejun Heo  */
6094f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6095f3187195STejun Heo {
6096f3187195STejun Heo 	struct ata_host *host;
6097f3187195STejun Heo 	size_t sz;
6098f3187195STejun Heo 	int i;
6099f3187195STejun Heo 
6100f3187195STejun Heo 	DPRINTK("ENTER\n");
6101f3187195STejun Heo 
6102f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6103f3187195STejun Heo 		return NULL;
6104f3187195STejun Heo 
6105f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6106f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6107f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6108f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6109f3187195STejun Heo 	if (!host)
6110f3187195STejun Heo 		goto err_out;
6111f3187195STejun Heo 
6112f3187195STejun Heo 	devres_add(dev, host);
6113f3187195STejun Heo 	dev_set_drvdata(dev, host);
6114f3187195STejun Heo 
6115f3187195STejun Heo 	spin_lock_init(&host->lock);
6116f3187195STejun Heo 	host->dev = dev;
6117f3187195STejun Heo 	host->n_ports = max_ports;
6118f3187195STejun Heo 
6119f3187195STejun Heo 	/* allocate ports bound to this host */
6120f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6121f3187195STejun Heo 		struct ata_port *ap;
6122f3187195STejun Heo 
6123f3187195STejun Heo 		ap = ata_port_alloc(host);
6124f3187195STejun Heo 		if (!ap)
6125f3187195STejun Heo 			goto err_out;
6126f3187195STejun Heo 
6127f3187195STejun Heo 		ap->port_no = i;
6128f3187195STejun Heo 		host->ports[i] = ap;
6129f3187195STejun Heo 	}
6130f3187195STejun Heo 
6131f3187195STejun Heo 	devres_remove_group(dev, NULL);
6132f3187195STejun Heo 	return host;
6133f3187195STejun Heo 
6134f3187195STejun Heo  err_out:
6135f3187195STejun Heo 	devres_release_group(dev, NULL);
6136f3187195STejun Heo 	return NULL;
6137f3187195STejun Heo }
6138f3187195STejun Heo 
6139f3187195STejun Heo /**
6140f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6141f5cda257STejun Heo  *	@dev: generic device this host is associated with
6142f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6143f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6144f5cda257STejun Heo  *
6145f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6146f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6147f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6148f5cda257STejun Heo  *
6149f5cda257STejun Heo  *	RETURNS:
6150f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6151f5cda257STejun Heo  *
6152f5cda257STejun Heo  *	LOCKING:
6153f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6154f5cda257STejun Heo  */
6155f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6156f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6157f5cda257STejun Heo 				      int n_ports)
6158f5cda257STejun Heo {
6159f5cda257STejun Heo 	const struct ata_port_info *pi;
6160f5cda257STejun Heo 	struct ata_host *host;
6161f5cda257STejun Heo 	int i, j;
6162f5cda257STejun Heo 
6163f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6164f5cda257STejun Heo 	if (!host)
6165f5cda257STejun Heo 		return NULL;
6166f5cda257STejun Heo 
6167f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6168f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6169f5cda257STejun Heo 
6170f5cda257STejun Heo 		if (ppi[j])
6171f5cda257STejun Heo 			pi = ppi[j++];
6172f5cda257STejun Heo 
6173f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6174f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6175f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6176f5cda257STejun Heo 		ap->flags |= pi->flags;
6177f5cda257STejun Heo 		ap->ops = pi->port_ops;
6178f5cda257STejun Heo 
6179f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6180f5cda257STejun Heo 			host->ops = pi->port_ops;
6181f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6182f5cda257STejun Heo 			host->private_data = pi->private_data;
6183f5cda257STejun Heo 	}
6184f5cda257STejun Heo 
6185f5cda257STejun Heo 	return host;
6186f5cda257STejun Heo }
6187f5cda257STejun Heo 
6188f5cda257STejun Heo /**
6189ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6190ecef7253STejun Heo  *	@host: ATA host to start ports for
6191ecef7253STejun Heo  *
6192ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6193ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6194ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6195f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6196f3187195STejun Heo  *	first non-dummy port ops.
6197ecef7253STejun Heo  *
6198ecef7253STejun Heo  *	LOCKING:
6199ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6200ecef7253STejun Heo  *
6201ecef7253STejun Heo  *	RETURNS:
6202ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6203ecef7253STejun Heo  */
6204ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6205ecef7253STejun Heo {
6206ecef7253STejun Heo 	int i, rc;
6207ecef7253STejun Heo 
6208ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6209ecef7253STejun Heo 		return 0;
6210ecef7253STejun Heo 
6211ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6212ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6213ecef7253STejun Heo 
6214f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6215f3187195STejun Heo 			host->ops = ap->ops;
6216f3187195STejun Heo 
6217ecef7253STejun Heo 		if (ap->ops->port_start) {
6218ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6219ecef7253STejun Heo 			if (rc) {
6220ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6221ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6222ecef7253STejun Heo 				goto err_out;
6223ecef7253STejun Heo 			}
6224ecef7253STejun Heo 		}
6225ecef7253STejun Heo 
6226ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6227ecef7253STejun Heo 	}
6228ecef7253STejun Heo 
6229ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6230ecef7253STejun Heo 	return 0;
6231ecef7253STejun Heo 
6232ecef7253STejun Heo  err_out:
6233ecef7253STejun Heo 	while (--i >= 0) {
6234ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6235ecef7253STejun Heo 
6236ecef7253STejun Heo 		if (ap->ops->port_stop)
6237ecef7253STejun Heo 			ap->ops->port_stop(ap);
6238ecef7253STejun Heo 	}
6239ecef7253STejun Heo 	return rc;
6240ecef7253STejun Heo }
6241ecef7253STejun Heo 
6242ecef7253STejun Heo /**
6243cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6244cca3974eSJeff Garzik  *	@host:	host to initialize
6245cca3974eSJeff Garzik  *	@dev:	device host is attached to
6246cca3974eSJeff Garzik  *	@flags:	host flags
6247c6fd2807SJeff Garzik  *	@ops:	port_ops
6248c6fd2807SJeff Garzik  *
6249c6fd2807SJeff Garzik  *	LOCKING:
6250c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6251c6fd2807SJeff Garzik  *
6252c6fd2807SJeff Garzik  */
6253f3187195STejun Heo /* KILLME - the only user left is ipr */
6254cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6255cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6256c6fd2807SJeff Garzik {
6257cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6258cca3974eSJeff Garzik 	host->dev = dev;
6259cca3974eSJeff Garzik 	host->flags = flags;
6260cca3974eSJeff Garzik 	host->ops = ops;
6261c6fd2807SJeff Garzik }
6262c6fd2807SJeff Garzik 
6263c6fd2807SJeff Garzik /**
6264f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6265f3187195STejun Heo  *	@host: ATA host to register
6266f3187195STejun Heo  *	@sht: template for SCSI host
6267c6fd2807SJeff Garzik  *
6268f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6269f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6270f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6271f3187195STejun Heo  *	probe registered devices.
6272c6fd2807SJeff Garzik  *
6273c6fd2807SJeff Garzik  *	LOCKING:
6274f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6275c6fd2807SJeff Garzik  *
6276c6fd2807SJeff Garzik  *	RETURNS:
6277f3187195STejun Heo  *	0 on success, -errno otherwise.
6278c6fd2807SJeff Garzik  */
6279f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6280c6fd2807SJeff Garzik {
6281f3187195STejun Heo 	int i, rc;
6282c6fd2807SJeff Garzik 
6283f3187195STejun Heo 	/* host must have been started */
6284f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6285f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6286f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6287f3187195STejun Heo 		WARN_ON(1);
6288f3187195STejun Heo 		return -EINVAL;
628902f076aaSAlan Cox 	}
6290f0d36efdSTejun Heo 
6291f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6292f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6293f3187195STejun Heo 	 * allocation time.
6294f3187195STejun Heo 	 */
6295f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6296f3187195STejun Heo 		kfree(host->ports[i]);
6297f0d36efdSTejun Heo 
6298f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6299f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6300f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6301c6fd2807SJeff Garzik 
6302f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6303ecef7253STejun Heo 	if (rc)
6304f3187195STejun Heo 		return rc;
6305ecef7253STejun Heo 
6306f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6307cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6308cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6309f3187195STejun Heo 		int irq_line;
6310c6fd2807SJeff Garzik 		u32 scontrol;
6311f3187195STejun Heo 		unsigned long xfer_mask;
6312f3187195STejun Heo 
6313f3187195STejun Heo 		/* set SATA cable type if still unset */
6314f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6315f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6316c6fd2807SJeff Garzik 
6317c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
6318c6fd2807SJeff Garzik 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6319c6fd2807SJeff Garzik 			int spd = (scontrol >> 4) & 0xf;
6320afe3cc51STejun Heo 			if (spd)
6321c6fd2807SJeff Garzik 				ap->hw_sata_spd_limit &= (1 << spd) - 1;
6322c6fd2807SJeff Garzik 		}
6323c6fd2807SJeff Garzik 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6324c6fd2807SJeff Garzik 
6325f3187195STejun Heo 		/* report the secondary IRQ for second channel legacy */
6326f3187195STejun Heo 		irq_line = host->irq;
6327f3187195STejun Heo 		if (i == 1 && host->irq2)
6328f3187195STejun Heo 			irq_line = host->irq2;
6329f3187195STejun Heo 
6330f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6331f3187195STejun Heo 					      ap->udma_mask);
6332f3187195STejun Heo 
6333f3187195STejun Heo 		/* print per-port info to dmesg */
6334f3187195STejun Heo 		if (!ata_port_is_dummy(ap))
6335f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6336f3187195STejun Heo 					"ctl 0x%p bmdma 0x%p irq %d\n",
6337f3187195STejun Heo 					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6338f3187195STejun Heo 					ata_mode_string(xfer_mask),
6339f3187195STejun Heo 					ap->ioaddr.cmd_addr,
6340f3187195STejun Heo 					ap->ioaddr.ctl_addr,
6341f3187195STejun Heo 					ap->ioaddr.bmdma_addr,
6342f3187195STejun Heo 					irq_line);
6343f3187195STejun Heo 		else
6344f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6345c6fd2807SJeff Garzik 	}
6346c6fd2807SJeff Garzik 
6347f3187195STejun Heo 	/* perform each probe synchronously */
6348f3187195STejun Heo 	DPRINTK("probe begin\n");
6349f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6350f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6351f3187195STejun Heo 		int rc;
6352f3187195STejun Heo 
6353f3187195STejun Heo 		/* probe */
6354c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
6355c6fd2807SJeff Garzik 			struct ata_eh_info *ehi = &ap->eh_info;
6356c6fd2807SJeff Garzik 			unsigned long flags;
6357c6fd2807SJeff Garzik 
6358c6fd2807SJeff Garzik 			ata_port_probe(ap);
6359c6fd2807SJeff Garzik 
6360c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6361c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6362c6fd2807SJeff Garzik 
6363c6fd2807SJeff Garzik 			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6364c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6365c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6366c6fd2807SJeff Garzik 
6367f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6368c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6369c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6370c6fd2807SJeff Garzik 
6371c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6372c6fd2807SJeff Garzik 
6373c6fd2807SJeff Garzik 			/* wait for EH to finish */
6374c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6375c6fd2807SJeff Garzik 		} else {
637644877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6377c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
637844877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6379c6fd2807SJeff Garzik 
6380c6fd2807SJeff Garzik 			if (rc) {
6381c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6382c6fd2807SJeff Garzik 				 * Current libata behavior will
6383c6fd2807SJeff Garzik 				 * tear down everything when
6384c6fd2807SJeff Garzik 				 * the module is removed
6385c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6386c6fd2807SJeff Garzik 				 */
6387c6fd2807SJeff Garzik 			}
6388c6fd2807SJeff Garzik 		}
6389c6fd2807SJeff Garzik 	}
6390c6fd2807SJeff Garzik 
6391c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6392c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6393cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6394cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6395c6fd2807SJeff Garzik 
6396c6fd2807SJeff Garzik 		ata_scsi_scan_host(ap);
6397c6fd2807SJeff Garzik 	}
6398c6fd2807SJeff Garzik 
6399f3187195STejun Heo 	return 0;
6400f3187195STejun Heo }
6401f3187195STejun Heo 
6402f3187195STejun Heo /**
6403f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6404f5cda257STejun Heo  *	@host: target ATA host
6405f5cda257STejun Heo  *	@irq: IRQ to request
6406f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6407f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6408f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6409f5cda257STejun Heo  *
6410f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6411f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6412f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6413f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6414f5cda257STejun Heo  *
6415f5cda257STejun Heo  *	LOCKING:
6416f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6417f5cda257STejun Heo  *
6418f5cda257STejun Heo  *	RETURNS:
6419f5cda257STejun Heo  *	0 on success, -errno otherwise.
6420f5cda257STejun Heo  */
6421f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6422f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6423f5cda257STejun Heo 		      struct scsi_host_template *sht)
6424f5cda257STejun Heo {
6425f5cda257STejun Heo 	int rc;
6426f5cda257STejun Heo 
6427f5cda257STejun Heo 	rc = ata_host_start(host);
6428f5cda257STejun Heo 	if (rc)
6429f5cda257STejun Heo 		return rc;
6430f5cda257STejun Heo 
6431f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6432f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6433f5cda257STejun Heo 	if (rc)
6434f5cda257STejun Heo 		return rc;
6435f5cda257STejun Heo 
6436f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6437f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6438f5cda257STejun Heo 	if (rc)
6439f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6440f5cda257STejun Heo 
644122888423SOlof Johansson 	/* Used to print device info at probe */
644222888423SOlof Johansson 	host->irq = irq;
644322888423SOlof Johansson 
6444f5cda257STejun Heo 	return rc;
6445f5cda257STejun Heo }
6446f5cda257STejun Heo 
6447f5cda257STejun Heo /**
6448c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6449c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6450c6fd2807SJeff Garzik  *
6451c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6452c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6453c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6454c6fd2807SJeff Garzik  *
6455c6fd2807SJeff Garzik  *	LOCKING:
6456c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6457c6fd2807SJeff Garzik  */
6458c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap)
6459c6fd2807SJeff Garzik {
6460c6fd2807SJeff Garzik 	unsigned long flags;
6461c6fd2807SJeff Garzik 	int i;
6462c6fd2807SJeff Garzik 
6463c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6464c6fd2807SJeff Garzik 		goto skip_eh;
6465c6fd2807SJeff Garzik 
6466c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6467c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6468c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6469c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6470c6fd2807SJeff Garzik 
6471c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6472c6fd2807SJeff Garzik 
6473c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6474c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6475c6fd2807SJeff Garzik 	 */
6476c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6477c6fd2807SJeff Garzik 
6478c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
6479c6fd2807SJeff Garzik 		ata_dev_disable(&ap->device[i]);
6480c6fd2807SJeff Garzik 
6481c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6482c6fd2807SJeff Garzik 
6483c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6484c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6485c6fd2807SJeff Garzik 	 * target.
6486c6fd2807SJeff Garzik 	 */
6487c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6488c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6489c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6490c6fd2807SJeff Garzik 
6491c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6492c6fd2807SJeff Garzik 
6493c6fd2807SJeff Garzik 	/* Flush hotplug task.  The sequence is similar to
6494c6fd2807SJeff Garzik 	 * ata_port_flush_task().
6495c6fd2807SJeff Garzik 	 */
649628e53bddSOleg Nesterov 	cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6497c6fd2807SJeff Garzik 	cancel_delayed_work(&ap->hotplug_task);
649828e53bddSOleg Nesterov 	cancel_work_sync(&ap->hotplug_task.work);
6499c6fd2807SJeff Garzik 
6500c6fd2807SJeff Garzik  skip_eh:
6501c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6502cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6503c6fd2807SJeff Garzik }
6504c6fd2807SJeff Garzik 
6505c6fd2807SJeff Garzik /**
65060529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
65070529c159STejun Heo  *	@host: Host to detach
65080529c159STejun Heo  *
65090529c159STejun Heo  *	Detach all ports of @host.
65100529c159STejun Heo  *
65110529c159STejun Heo  *	LOCKING:
65120529c159STejun Heo  *	Kernel thread context (may sleep).
65130529c159STejun Heo  */
65140529c159STejun Heo void ata_host_detach(struct ata_host *host)
65150529c159STejun Heo {
65160529c159STejun Heo 	int i;
65170529c159STejun Heo 
65180529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
65190529c159STejun Heo 		ata_port_detach(host->ports[i]);
65200529c159STejun Heo }
65210529c159STejun Heo 
6522c6fd2807SJeff Garzik /**
6523c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6524c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6525c6fd2807SJeff Garzik  *
6526c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6527c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6528c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6529c6fd2807SJeff Garzik  *	relative to cmd_addr.
6530c6fd2807SJeff Garzik  *
6531c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6532c6fd2807SJeff Garzik  */
6533c6fd2807SJeff Garzik 
6534c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6535c6fd2807SJeff Garzik {
6536c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6537c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6538c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6539c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6540c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6541c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6542c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6543c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6544c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6545c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6546c6fd2807SJeff Garzik }
6547c6fd2807SJeff Garzik 
6548c6fd2807SJeff Garzik 
6549c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6550c6fd2807SJeff Garzik 
6551c6fd2807SJeff Garzik /**
6552c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
6553c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
6554c6fd2807SJeff Garzik  *
6555b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
6556b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
6557b878ca5dSTejun Heo  *	release is handled via devres.
6558c6fd2807SJeff Garzik  *
6559c6fd2807SJeff Garzik  *	LOCKING:
6560c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
6561c6fd2807SJeff Garzik  */
6562c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
6563c6fd2807SJeff Garzik {
6564c6fd2807SJeff Garzik 	struct device *dev = pci_dev_to_dev(pdev);
6565cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
6566c6fd2807SJeff Garzik 
6567f0d36efdSTejun Heo 	ata_host_detach(host);
6568c6fd2807SJeff Garzik }
6569c6fd2807SJeff Garzik 
6570c6fd2807SJeff Garzik /* move to PCI subsystem */
6571c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6572c6fd2807SJeff Garzik {
6573c6fd2807SJeff Garzik 	unsigned long tmp = 0;
6574c6fd2807SJeff Garzik 
6575c6fd2807SJeff Garzik 	switch (bits->width) {
6576c6fd2807SJeff Garzik 	case 1: {
6577c6fd2807SJeff Garzik 		u8 tmp8 = 0;
6578c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6579c6fd2807SJeff Garzik 		tmp = tmp8;
6580c6fd2807SJeff Garzik 		break;
6581c6fd2807SJeff Garzik 	}
6582c6fd2807SJeff Garzik 	case 2: {
6583c6fd2807SJeff Garzik 		u16 tmp16 = 0;
6584c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
6585c6fd2807SJeff Garzik 		tmp = tmp16;
6586c6fd2807SJeff Garzik 		break;
6587c6fd2807SJeff Garzik 	}
6588c6fd2807SJeff Garzik 	case 4: {
6589c6fd2807SJeff Garzik 		u32 tmp32 = 0;
6590c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6591c6fd2807SJeff Garzik 		tmp = tmp32;
6592c6fd2807SJeff Garzik 		break;
6593c6fd2807SJeff Garzik 	}
6594c6fd2807SJeff Garzik 
6595c6fd2807SJeff Garzik 	default:
6596c6fd2807SJeff Garzik 		return -EINVAL;
6597c6fd2807SJeff Garzik 	}
6598c6fd2807SJeff Garzik 
6599c6fd2807SJeff Garzik 	tmp &= bits->mask;
6600c6fd2807SJeff Garzik 
6601c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
6602c6fd2807SJeff Garzik }
6603c6fd2807SJeff Garzik 
66046ffa01d8STejun Heo #ifdef CONFIG_PM
6605c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6606c6fd2807SJeff Garzik {
6607c6fd2807SJeff Garzik 	pci_save_state(pdev);
6608c6fd2807SJeff Garzik 	pci_disable_device(pdev);
66094c90d971STejun Heo 
66104c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
6611c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
6612c6fd2807SJeff Garzik }
6613c6fd2807SJeff Garzik 
6614553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
6615c6fd2807SJeff Garzik {
6616553c4aa6STejun Heo 	int rc;
6617553c4aa6STejun Heo 
6618c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
6619c6fd2807SJeff Garzik 	pci_restore_state(pdev);
6620553c4aa6STejun Heo 
6621f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
6622553c4aa6STejun Heo 	if (rc) {
6623553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
6624553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
6625553c4aa6STejun Heo 		return rc;
6626553c4aa6STejun Heo 	}
6627553c4aa6STejun Heo 
6628c6fd2807SJeff Garzik 	pci_set_master(pdev);
6629553c4aa6STejun Heo 	return 0;
6630c6fd2807SJeff Garzik }
6631c6fd2807SJeff Garzik 
6632c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6633c6fd2807SJeff Garzik {
6634cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6635c6fd2807SJeff Garzik 	int rc = 0;
6636c6fd2807SJeff Garzik 
6637cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
6638c6fd2807SJeff Garzik 	if (rc)
6639c6fd2807SJeff Garzik 		return rc;
6640c6fd2807SJeff Garzik 
6641c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
6642c6fd2807SJeff Garzik 
6643c6fd2807SJeff Garzik 	return 0;
6644c6fd2807SJeff Garzik }
6645c6fd2807SJeff Garzik 
6646c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
6647c6fd2807SJeff Garzik {
6648cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6649553c4aa6STejun Heo 	int rc;
6650c6fd2807SJeff Garzik 
6651553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
6652553c4aa6STejun Heo 	if (rc == 0)
6653cca3974eSJeff Garzik 		ata_host_resume(host);
6654553c4aa6STejun Heo 	return rc;
6655c6fd2807SJeff Garzik }
66566ffa01d8STejun Heo #endif /* CONFIG_PM */
66576ffa01d8STejun Heo 
6658c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6659c6fd2807SJeff Garzik 
6660c6fd2807SJeff Garzik 
6661c6fd2807SJeff Garzik static int __init ata_init(void)
6662c6fd2807SJeff Garzik {
6663c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
6664c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
6665c6fd2807SJeff Garzik 	if (!ata_wq)
6666c6fd2807SJeff Garzik 		return -ENOMEM;
6667c6fd2807SJeff Garzik 
6668c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6669c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
6670c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
6671c6fd2807SJeff Garzik 		return -ENOMEM;
6672c6fd2807SJeff Garzik 	}
6673c6fd2807SJeff Garzik 
6674c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6675c6fd2807SJeff Garzik 	return 0;
6676c6fd2807SJeff Garzik }
6677c6fd2807SJeff Garzik 
6678c6fd2807SJeff Garzik static void __exit ata_exit(void)
6679c6fd2807SJeff Garzik {
6680c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
6681c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
6682c6fd2807SJeff Garzik }
6683c6fd2807SJeff Garzik 
6684a4625085SBrian King subsys_initcall(ata_init);
6685c6fd2807SJeff Garzik module_exit(ata_exit);
6686c6fd2807SJeff Garzik 
6687c6fd2807SJeff Garzik static unsigned long ratelimit_time;
6688c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
6689c6fd2807SJeff Garzik 
6690c6fd2807SJeff Garzik int ata_ratelimit(void)
6691c6fd2807SJeff Garzik {
6692c6fd2807SJeff Garzik 	int rc;
6693c6fd2807SJeff Garzik 	unsigned long flags;
6694c6fd2807SJeff Garzik 
6695c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6696c6fd2807SJeff Garzik 
6697c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
6698c6fd2807SJeff Garzik 		rc = 1;
6699c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
6700c6fd2807SJeff Garzik 	} else
6701c6fd2807SJeff Garzik 		rc = 0;
6702c6fd2807SJeff Garzik 
6703c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6704c6fd2807SJeff Garzik 
6705c6fd2807SJeff Garzik 	return rc;
6706c6fd2807SJeff Garzik }
6707c6fd2807SJeff Garzik 
6708c6fd2807SJeff Garzik /**
6709c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
6710c6fd2807SJeff Garzik  *	@reg: IO-mapped register
6711c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
6712c6fd2807SJeff Garzik  *	@val: Wait condition
6713c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
6714c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
6715c6fd2807SJeff Garzik  *
6716c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
6717c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
6718c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
6719c6fd2807SJeff Garzik  *
6720c6fd2807SJeff Garzik  *	(*@reg & mask) != val
6721c6fd2807SJeff Garzik  *
6722c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
6723c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
6724c6fd2807SJeff Garzik  *
6725c6fd2807SJeff Garzik  *	LOCKING:
6726c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
6727c6fd2807SJeff Garzik  *
6728c6fd2807SJeff Garzik  *	RETURNS:
6729c6fd2807SJeff Garzik  *	The final register value.
6730c6fd2807SJeff Garzik  */
6731c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6732c6fd2807SJeff Garzik 		      unsigned long interval_msec,
6733c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
6734c6fd2807SJeff Garzik {
6735c6fd2807SJeff Garzik 	unsigned long timeout;
6736c6fd2807SJeff Garzik 	u32 tmp;
6737c6fd2807SJeff Garzik 
6738c6fd2807SJeff Garzik 	tmp = ioread32(reg);
6739c6fd2807SJeff Garzik 
6740c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
6741c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
6742c6fd2807SJeff Garzik 	 * eat away the timeout.
6743c6fd2807SJeff Garzik 	 */
6744c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6745c6fd2807SJeff Garzik 
6746c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6747c6fd2807SJeff Garzik 		msleep(interval_msec);
6748c6fd2807SJeff Garzik 		tmp = ioread32(reg);
6749c6fd2807SJeff Garzik 	}
6750c6fd2807SJeff Garzik 
6751c6fd2807SJeff Garzik 	return tmp;
6752c6fd2807SJeff Garzik }
6753c6fd2807SJeff Garzik 
6754c6fd2807SJeff Garzik /*
6755c6fd2807SJeff Garzik  * Dummy port_ops
6756c6fd2807SJeff Garzik  */
6757c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
6758c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6759c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6760c6fd2807SJeff Garzik 
6761c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
6762c6fd2807SJeff Garzik {
6763c6fd2807SJeff Garzik 	return ATA_DRDY;
6764c6fd2807SJeff Garzik }
6765c6fd2807SJeff Garzik 
6766c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6767c6fd2807SJeff Garzik {
6768c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
6769c6fd2807SJeff Garzik }
6770c6fd2807SJeff Garzik 
6771c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
6772c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
6773c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
6774c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
6775c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
6776c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
6777c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
6778c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
6779c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
6780c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
6781c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
6782c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
6783c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
6784c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
6785c6fd2807SJeff Garzik };
6786c6fd2807SJeff Garzik 
678721b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
678821b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
678921b0ad4fSTejun Heo };
679021b0ad4fSTejun Heo 
6791c6fd2807SJeff Garzik /*
6792c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
6793c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
6794c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
6795c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
6796c6fd2807SJeff Garzik  */
6797c6fd2807SJeff Garzik 
6798c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6799c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6800c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6801c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
680221b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6803c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
6804c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
6805cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
6806f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
6807f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6808ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
6809f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
6810f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
68110529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
6812c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
6813c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
6814c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
6815c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
6816c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6817c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6818c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
6819c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
6820c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6821c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
682243727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
6823c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6824c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6825c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
6826c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
6827c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
6828c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
6829c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
683004351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
68310d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
68320d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6833c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
6834c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6835c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6836c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
6837c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6838c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
6839c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6840c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6841c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6842c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6843c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6844c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6845c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
684610305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
6847c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
6848c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_debounce);
6849c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_resume);
6850c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
6851c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
6852c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
6853c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
6854c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
6855b6103f6dSTejun Heo EXPORT_SYMBOL_GPL(sata_port_hardreset);
6856c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
6857c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
6858c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
6859c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
6860c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
6861c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
6862c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
6863c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
6864d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
6865c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
6866c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6867c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6868c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6869c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6870c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6871c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
6872c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
6873c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
6874c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
6875c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6876c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_online);
6877c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_offline);
68786ffa01d8STejun Heo #ifdef CONFIG_PM
6879cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
6880cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
68816ffa01d8STejun Heo #endif /* CONFIG_PM */
6882c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
6883c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
688410305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
68856919a0a6SAlan Cox EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6886c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6887c6fd2807SJeff Garzik 
6888c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6889c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
6890c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
6891c6fd2807SJeff Garzik 
6892c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6893c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
6894d491b27bSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
68951626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
689621b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
6897c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
6898c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
68996ffa01d8STejun Heo #ifdef CONFIG_PM
6900c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6901c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6902c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6903c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
69046ffa01d8STejun Heo #endif /* CONFIG_PM */
6905c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6906c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6907c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6908c6fd2807SJeff Garzik 
6909c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
6910c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6911c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
6912c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
6913c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6914c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6915c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6916c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6917c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
691883625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
691983625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
692083625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_ack);
692183625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6922a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6923be0d18dfSAlan Cox 
6924be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
6925be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
6926be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
6927be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
6928