xref: /openbmc/linux/drivers/ata/libata-core.c (revision 6746544c)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
52c6fd2807SJeff Garzik #include <scsi/scsi.h>
53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
55c6fd2807SJeff Garzik #include <linux/libata.h>
56c6fd2807SJeff Garzik #include <asm/io.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
628bc3fc47SJeff Garzik #define DRV_VERSION	"2.21"	/* must be exactly four chars */
63fda0efc5SJeff Garzik 
64fda0efc5SJeff Garzik 
65c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69c6fd2807SJeff Garzik 
70c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
71c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
72c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
74c6fd2807SJeff Garzik 
75f3187195STejun Heo unsigned int ata_print_id = 1;
76c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
77c6fd2807SJeff Garzik 
78c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
79c6fd2807SJeff Garzik 
80c6fd2807SJeff Garzik int atapi_enabled = 1;
81c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
82c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83c6fd2807SJeff Garzik 
84c6fd2807SJeff Garzik int atapi_dmadir = 0;
85c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
86c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik int libata_fua = 0;
89c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
90c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91c6fd2807SJeff Garzik 
921e999736SAlan Cox static int ata_ignore_hpa = 0;
931e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
941e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
951e999736SAlan Cox 
96c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
98c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99c6fd2807SJeff Garzik 
100d7d0dad6SJeff Garzik int libata_noacpi = 1;
101d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
10211ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
10311ef697bSKristen Carlson Accardi 
104c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
105c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
106c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
107c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
108c6fd2807SJeff Garzik 
109c6fd2807SJeff Garzik 
110c6fd2807SJeff Garzik /**
111c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
113c6fd2807SJeff Garzik  *	@fis: Buffer into which data will output
114c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
115c6fd2807SJeff Garzik  *
116c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
117c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
118c6fd2807SJeff Garzik  *
119c6fd2807SJeff Garzik  *	LOCKING:
120c6fd2807SJeff Garzik  *	Inherited from caller.
121c6fd2807SJeff Garzik  */
122c6fd2807SJeff Garzik 
123c6fd2807SJeff Garzik void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
124c6fd2807SJeff Garzik {
125c6fd2807SJeff Garzik 	fis[0] = 0x27;	/* Register - Host to Device FIS */
126c6fd2807SJeff Garzik 	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127c6fd2807SJeff Garzik 					    bit 7 indicates Command FIS */
128c6fd2807SJeff Garzik 	fis[2] = tf->command;
129c6fd2807SJeff Garzik 	fis[3] = tf->feature;
130c6fd2807SJeff Garzik 
131c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
132c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
133c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
134c6fd2807SJeff Garzik 	fis[7] = tf->device;
135c6fd2807SJeff Garzik 
136c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
137c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
138c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
139c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
140c6fd2807SJeff Garzik 
141c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
142c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
143c6fd2807SJeff Garzik 	fis[14] = 0;
144c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
145c6fd2807SJeff Garzik 
146c6fd2807SJeff Garzik 	fis[16] = 0;
147c6fd2807SJeff Garzik 	fis[17] = 0;
148c6fd2807SJeff Garzik 	fis[18] = 0;
149c6fd2807SJeff Garzik 	fis[19] = 0;
150c6fd2807SJeff Garzik }
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik /**
153c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
155c6fd2807SJeff Garzik  *	@tf: Taskfile to output
156c6fd2807SJeff Garzik  *
157c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
158c6fd2807SJeff Garzik  *
159c6fd2807SJeff Garzik  *	LOCKING:
160c6fd2807SJeff Garzik  *	Inherited from caller.
161c6fd2807SJeff Garzik  */
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
164c6fd2807SJeff Garzik {
165c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
166c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
167c6fd2807SJeff Garzik 
168c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
169c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
170c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
171c6fd2807SJeff Garzik 	tf->device	= fis[7];
172c6fd2807SJeff Garzik 
173c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
174c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
175c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
176c6fd2807SJeff Garzik 
177c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
178c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
179c6fd2807SJeff Garzik }
180c6fd2807SJeff Garzik 
181c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
182c6fd2807SJeff Garzik 	/* pio multi */
183c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
184c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
185c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
186c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
187c6fd2807SJeff Garzik 	0,
188c6fd2807SJeff Garzik 	0,
189c6fd2807SJeff Garzik 	0,
190c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
191c6fd2807SJeff Garzik 	/* pio */
192c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
193c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
194c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
195c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
196c6fd2807SJeff Garzik 	0,
197c6fd2807SJeff Garzik 	0,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	/* dma */
201c6fd2807SJeff Garzik 	ATA_CMD_READ,
202c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
203c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
204c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
205c6fd2807SJeff Garzik 	0,
206c6fd2807SJeff Garzik 	0,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
209c6fd2807SJeff Garzik };
210c6fd2807SJeff Garzik 
211c6fd2807SJeff Garzik /**
212c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
213bd056d7eSTejun Heo  *	@tf: command to examine and configure
214bd056d7eSTejun Heo  *	@dev: device tf belongs to
215c6fd2807SJeff Garzik  *
216c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
217c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
218c6fd2807SJeff Garzik  *
219c6fd2807SJeff Garzik  *	LOCKING:
220c6fd2807SJeff Garzik  *	caller.
221c6fd2807SJeff Garzik  */
222bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
223c6fd2807SJeff Garzik {
224c6fd2807SJeff Garzik 	u8 cmd;
225c6fd2807SJeff Garzik 
226c6fd2807SJeff Garzik 	int index, fua, lba48, write;
227c6fd2807SJeff Garzik 
228c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
229c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
231c6fd2807SJeff Garzik 
232c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
233c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
234c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
235bd056d7eSTejun Heo 	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
236c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
237c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
238c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
239c6fd2807SJeff Garzik 	} else {
240c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
241c6fd2807SJeff Garzik 		index = 16;
242c6fd2807SJeff Garzik 	}
243c6fd2807SJeff Garzik 
244c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
245c6fd2807SJeff Garzik 	if (cmd) {
246c6fd2807SJeff Garzik 		tf->command = cmd;
247c6fd2807SJeff Garzik 		return 0;
248c6fd2807SJeff Garzik 	}
249c6fd2807SJeff Garzik 	return -1;
250c6fd2807SJeff Garzik }
251c6fd2807SJeff Garzik 
252c6fd2807SJeff Garzik /**
25335b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
25435b649feSTejun Heo  *	@tf: ATA taskfile of interest
25535b649feSTejun Heo  *	@dev: ATA device @tf belongs to
25635b649feSTejun Heo  *
25735b649feSTejun Heo  *	LOCKING:
25835b649feSTejun Heo  *	None.
25935b649feSTejun Heo  *
26035b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
26135b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
26235b649feSTejun Heo  *	flags select the address format to use.
26335b649feSTejun Heo  *
26435b649feSTejun Heo  *	RETURNS:
26535b649feSTejun Heo  *	Block address read from @tf.
26635b649feSTejun Heo  */
26735b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
26835b649feSTejun Heo {
26935b649feSTejun Heo 	u64 block = 0;
27035b649feSTejun Heo 
27135b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
27235b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
27335b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
27435b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
27535b649feSTejun Heo 			block |= tf->hob_lbal << 24;
27635b649feSTejun Heo 		} else
27735b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
27835b649feSTejun Heo 
27935b649feSTejun Heo 		block |= tf->lbah << 16;
28035b649feSTejun Heo 		block |= tf->lbam << 8;
28135b649feSTejun Heo 		block |= tf->lbal;
28235b649feSTejun Heo 	} else {
28335b649feSTejun Heo 		u32 cyl, head, sect;
28435b649feSTejun Heo 
28535b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
28635b649feSTejun Heo 		head = tf->device & 0xf;
28735b649feSTejun Heo 		sect = tf->lbal;
28835b649feSTejun Heo 
28935b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
29035b649feSTejun Heo 	}
29135b649feSTejun Heo 
29235b649feSTejun Heo 	return block;
29335b649feSTejun Heo }
29435b649feSTejun Heo 
29535b649feSTejun Heo /**
296bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
297bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
298bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
299bd056d7eSTejun Heo  *	@block: Block address
300bd056d7eSTejun Heo  *	@n_block: Number of blocks
301bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
302bd056d7eSTejun Heo  *	@tag: tag
303bd056d7eSTejun Heo  *
304bd056d7eSTejun Heo  *	LOCKING:
305bd056d7eSTejun Heo  *	None.
306bd056d7eSTejun Heo  *
307bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
308bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
309bd056d7eSTejun Heo  *
310bd056d7eSTejun Heo  *	RETURNS:
311bd056d7eSTejun Heo  *
312bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
313bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
314bd056d7eSTejun Heo  */
315bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
317bd056d7eSTejun Heo 		    unsigned int tag)
318bd056d7eSTejun Heo {
319bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320bd056d7eSTejun Heo 	tf->flags |= tf_flags;
321bd056d7eSTejun Heo 
3226d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
323bd056d7eSTejun Heo 		/* yay, NCQ */
324bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
325bd056d7eSTejun Heo 			return -ERANGE;
326bd056d7eSTejun Heo 
327bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
328bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329bd056d7eSTejun Heo 
330bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
331bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
332bd056d7eSTejun Heo 		else
333bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
334bd056d7eSTejun Heo 
335bd056d7eSTejun Heo 		tf->nsect = tag << 3;
336bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
337bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
338bd056d7eSTejun Heo 
339bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
340bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
341bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
342bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
343bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
344bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->device = 1 << 6;
347bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
348bd056d7eSTejun Heo 			tf->device |= 1 << 7;
349bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
350bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
351bd056d7eSTejun Heo 
352bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
353bd056d7eSTejun Heo 			/* use LBA28 */
354bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
355bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
356bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
357bd056d7eSTejun Heo 				return -ERANGE;
358bd056d7eSTejun Heo 
359bd056d7eSTejun Heo 			/* use LBA48 */
360bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
361bd056d7eSTejun Heo 
362bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
363bd056d7eSTejun Heo 
364bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
365bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
366bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
367bd056d7eSTejun Heo 		} else
368bd056d7eSTejun Heo 			/* request too large even for LBA48 */
369bd056d7eSTejun Heo 			return -ERANGE;
370bd056d7eSTejun Heo 
371bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372bd056d7eSTejun Heo 			return -EINVAL;
373bd056d7eSTejun Heo 
374bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
375bd056d7eSTejun Heo 
376bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
377bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
378bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
379bd056d7eSTejun Heo 
380bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
381bd056d7eSTejun Heo 	} else {
382bd056d7eSTejun Heo 		/* CHS */
383bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
386bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
387bd056d7eSTejun Heo 			return -ERANGE;
388bd056d7eSTejun Heo 
389bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390bd056d7eSTejun Heo 			return -EINVAL;
391bd056d7eSTejun Heo 
392bd056d7eSTejun Heo 		/* Convert LBA to CHS */
393bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
394bd056d7eSTejun Heo 		cyl   = track / dev->heads;
395bd056d7eSTejun Heo 		head  = track % dev->heads;
396bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
400bd056d7eSTejun Heo 
401bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
402bd056d7eSTejun Heo 		   Cylinder: 0-65535
403bd056d7eSTejun Heo 		   Head: 0-15
404bd056d7eSTejun Heo 		   Sector: 1-255*/
405bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406bd056d7eSTejun Heo 			return -ERANGE;
407bd056d7eSTejun Heo 
408bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409bd056d7eSTejun Heo 		tf->lbal = sect;
410bd056d7eSTejun Heo 		tf->lbam = cyl;
411bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
412bd056d7eSTejun Heo 		tf->device |= head;
413bd056d7eSTejun Heo 	}
414bd056d7eSTejun Heo 
415bd056d7eSTejun Heo 	return 0;
416bd056d7eSTejun Heo }
417bd056d7eSTejun Heo 
418bd056d7eSTejun Heo /**
419c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
421c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
422c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
423c6fd2807SJeff Garzik  *
424c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
426c6fd2807SJeff Garzik  *
427c6fd2807SJeff Garzik  *	LOCKING:
428c6fd2807SJeff Garzik  *	None.
429c6fd2807SJeff Garzik  *
430c6fd2807SJeff Garzik  *	RETURNS:
431c6fd2807SJeff Garzik  *	Packed xfer_mask.
432c6fd2807SJeff Garzik  */
433c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
435c6fd2807SJeff Garzik 				      unsigned int udma_mask)
436c6fd2807SJeff Garzik {
437c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440c6fd2807SJeff Garzik }
441c6fd2807SJeff Garzik 
442c6fd2807SJeff Garzik /**
443c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
445c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
446c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
447c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
448c6fd2807SJeff Garzik  *
449c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
451c6fd2807SJeff Garzik  */
452c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
453c6fd2807SJeff Garzik 				unsigned int *pio_mask,
454c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
455c6fd2807SJeff Garzik 				unsigned int *udma_mask)
456c6fd2807SJeff Garzik {
457c6fd2807SJeff Garzik 	if (pio_mask)
458c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459c6fd2807SJeff Garzik 	if (mwdma_mask)
460c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461c6fd2807SJeff Garzik 	if (udma_mask)
462c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463c6fd2807SJeff Garzik }
464c6fd2807SJeff Garzik 
465c6fd2807SJeff Garzik static const struct ata_xfer_ent {
466c6fd2807SJeff Garzik 	int shift, bits;
467c6fd2807SJeff Garzik 	u8 base;
468c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
469c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472c6fd2807SJeff Garzik 	{ -1, },
473c6fd2807SJeff Garzik };
474c6fd2807SJeff Garzik 
475c6fd2807SJeff Garzik /**
476c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
478c6fd2807SJeff Garzik  *
479c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
480c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
481c6fd2807SJeff Garzik  *
482c6fd2807SJeff Garzik  *	LOCKING:
483c6fd2807SJeff Garzik  *	None.
484c6fd2807SJeff Garzik  *
485c6fd2807SJeff Garzik  *	RETURNS:
486c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
487c6fd2807SJeff Garzik  */
488c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489c6fd2807SJeff Garzik {
490c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
491c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
492c6fd2807SJeff Garzik 
493c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
496c6fd2807SJeff Garzik 	return 0;
497c6fd2807SJeff Garzik }
498c6fd2807SJeff Garzik 
499c6fd2807SJeff Garzik /**
500c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
502c6fd2807SJeff Garzik  *
503c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
504c6fd2807SJeff Garzik  *
505c6fd2807SJeff Garzik  *	LOCKING:
506c6fd2807SJeff Garzik  *	None.
507c6fd2807SJeff Garzik  *
508c6fd2807SJeff Garzik  *	RETURNS:
509c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
510c6fd2807SJeff Garzik  */
511c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512c6fd2807SJeff Garzik {
513c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
514c6fd2807SJeff Garzik 
515c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
518c6fd2807SJeff Garzik 	return 0;
519c6fd2807SJeff Garzik }
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik /**
522c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
524c6fd2807SJeff Garzik  *
525c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	LOCKING:
528c6fd2807SJeff Garzik  *	None.
529c6fd2807SJeff Garzik  *
530c6fd2807SJeff Garzik  *	RETURNS:
531c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
532c6fd2807SJeff Garzik  */
533c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
534c6fd2807SJeff Garzik {
535c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
536c6fd2807SJeff Garzik 
537c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539c6fd2807SJeff Garzik 			return ent->shift;
540c6fd2807SJeff Garzik 	return -1;
541c6fd2807SJeff Garzik }
542c6fd2807SJeff Garzik 
543c6fd2807SJeff Garzik /**
544c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
545c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
546c6fd2807SJeff Garzik  *
547c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
548c6fd2807SJeff Garzik  *	(highest bit in @modemask).
549c6fd2807SJeff Garzik  *
550c6fd2807SJeff Garzik  *	LOCKING:
551c6fd2807SJeff Garzik  *	None.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	RETURNS:
554c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
555c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
556c6fd2807SJeff Garzik  */
557c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
558c6fd2807SJeff Garzik {
559c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
560c6fd2807SJeff Garzik 		"PIO0",
561c6fd2807SJeff Garzik 		"PIO1",
562c6fd2807SJeff Garzik 		"PIO2",
563c6fd2807SJeff Garzik 		"PIO3",
564c6fd2807SJeff Garzik 		"PIO4",
565b352e57dSAlan Cox 		"PIO5",
566b352e57dSAlan Cox 		"PIO6",
567c6fd2807SJeff Garzik 		"MWDMA0",
568c6fd2807SJeff Garzik 		"MWDMA1",
569c6fd2807SJeff Garzik 		"MWDMA2",
570b352e57dSAlan Cox 		"MWDMA3",
571b352e57dSAlan Cox 		"MWDMA4",
572c6fd2807SJeff Garzik 		"UDMA/16",
573c6fd2807SJeff Garzik 		"UDMA/25",
574c6fd2807SJeff Garzik 		"UDMA/33",
575c6fd2807SJeff Garzik 		"UDMA/44",
576c6fd2807SJeff Garzik 		"UDMA/66",
577c6fd2807SJeff Garzik 		"UDMA/100",
578c6fd2807SJeff Garzik 		"UDMA/133",
579c6fd2807SJeff Garzik 		"UDMA7",
580c6fd2807SJeff Garzik 	};
581c6fd2807SJeff Garzik 	int highbit;
582c6fd2807SJeff Garzik 
583c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
584c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
586c6fd2807SJeff Garzik 	return "<n/a>";
587c6fd2807SJeff Garzik }
588c6fd2807SJeff Garzik 
589c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
590c6fd2807SJeff Garzik {
591c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
592c6fd2807SJeff Garzik 		"1.5 Gbps",
593c6fd2807SJeff Garzik 		"3.0 Gbps",
594c6fd2807SJeff Garzik 	};
595c6fd2807SJeff Garzik 
596c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597c6fd2807SJeff Garzik 		return "<unknown>";
598c6fd2807SJeff Garzik 	return spd_str[spd - 1];
599c6fd2807SJeff Garzik }
600c6fd2807SJeff Garzik 
601c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
602c6fd2807SJeff Garzik {
60309d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
60409d7f9b0STejun Heo 		if (ata_msg_drv(dev->ap))
605c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6064ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6074ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
608c6fd2807SJeff Garzik 		dev->class++;
609c6fd2807SJeff Garzik 	}
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik /**
613c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
614c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
615c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
616c6fd2807SJeff Garzik  *
6170d5ff566STejun Heo  *	This technique was originally described in
6180d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6190d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6200d5ff566STejun Heo  *
6210d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6220d5ff566STejun Heo  *	and if a device is present, it will respond by
6230d5ff566STejun Heo  *	correctly storing and echoing back the
6240d5ff566STejun Heo  *	ATA shadow register contents.
625c6fd2807SJeff Garzik  *
626c6fd2807SJeff Garzik  *	LOCKING:
627c6fd2807SJeff Garzik  *	caller.
628c6fd2807SJeff Garzik  */
629c6fd2807SJeff Garzik 
6300d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
631c6fd2807SJeff Garzik {
6320d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6330d5ff566STejun Heo 	u8 nsect, lbal;
6340d5ff566STejun Heo 
6350d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6360d5ff566STejun Heo 
6370d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6380d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6390d5ff566STejun Heo 
6400d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6410d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6420d5ff566STejun Heo 
6430d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6440d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6450d5ff566STejun Heo 
6460d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6470d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6480d5ff566STejun Heo 
6490d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6500d5ff566STejun Heo 		return 1;	/* we found a device */
6510d5ff566STejun Heo 
6520d5ff566STejun Heo 	return 0;		/* nothing found */
653c6fd2807SJeff Garzik }
654c6fd2807SJeff Garzik 
655c6fd2807SJeff Garzik /**
656c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
657c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
658c6fd2807SJeff Garzik  *
659c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
660c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
661c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
662c6fd2807SJeff Garzik  *
663c6fd2807SJeff Garzik  *	LOCKING:
664c6fd2807SJeff Garzik  *	None.
665c6fd2807SJeff Garzik  *
666c6fd2807SJeff Garzik  *	RETURNS:
667c6fd2807SJeff Garzik  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
668c6fd2807SJeff Garzik  *	the event of failure.
669c6fd2807SJeff Garzik  */
670c6fd2807SJeff Garzik 
671c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
672c6fd2807SJeff Garzik {
673c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
674c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
675c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
676c6fd2807SJeff Garzik 	 */
677c6fd2807SJeff Garzik 
678c6fd2807SJeff Garzik 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
679c6fd2807SJeff Garzik 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
680c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
681c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
682c6fd2807SJeff Garzik 	}
683c6fd2807SJeff Garzik 
684c6fd2807SJeff Garzik 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
685c6fd2807SJeff Garzik 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
686c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
687c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
688c6fd2807SJeff Garzik 	}
689c6fd2807SJeff Garzik 
690c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
691c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
692c6fd2807SJeff Garzik }
693c6fd2807SJeff Garzik 
694c6fd2807SJeff Garzik /**
695c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
696c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
697c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
698c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
699c6fd2807SJeff Garzik  *
700c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
701c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
702c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
703c6fd2807SJeff Garzik  *	and diagnostics.
704c6fd2807SJeff Garzik  *
705c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
706c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
707c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
708c6fd2807SJeff Garzik  *
709c6fd2807SJeff Garzik  *	LOCKING:
710c6fd2807SJeff Garzik  *	caller.
711c6fd2807SJeff Garzik  *
712c6fd2807SJeff Garzik  *	RETURNS:
713c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
714c6fd2807SJeff Garzik  */
715c6fd2807SJeff Garzik 
716a619f981SAkira Iguchi unsigned int
717c6fd2807SJeff Garzik ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
718c6fd2807SJeff Garzik {
719c6fd2807SJeff Garzik 	struct ata_taskfile tf;
720c6fd2807SJeff Garzik 	unsigned int class;
721c6fd2807SJeff Garzik 	u8 err;
722c6fd2807SJeff Garzik 
723c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
724c6fd2807SJeff Garzik 
725c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
726c6fd2807SJeff Garzik 
727c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
728c6fd2807SJeff Garzik 	err = tf.feature;
729c6fd2807SJeff Garzik 	if (r_err)
730c6fd2807SJeff Garzik 		*r_err = err;
731c6fd2807SJeff Garzik 
73293590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
73393590859SAlan Cox 	if (err == 0 && device == 0)
73493590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
73593590859SAlan Cox 		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
73693590859SAlan Cox 	else if (err == 1)
737c6fd2807SJeff Garzik 		/* do nothing */ ;
738c6fd2807SJeff Garzik 	else if ((device == 0) && (err == 0x81))
739c6fd2807SJeff Garzik 		/* do nothing */ ;
740c6fd2807SJeff Garzik 	else
741c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
742c6fd2807SJeff Garzik 
743c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
744c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
745c6fd2807SJeff Garzik 
746c6fd2807SJeff Garzik 	if (class == ATA_DEV_UNKNOWN)
747c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
748c6fd2807SJeff Garzik 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
749c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
750c6fd2807SJeff Garzik 	return class;
751c6fd2807SJeff Garzik }
752c6fd2807SJeff Garzik 
753c6fd2807SJeff Garzik /**
754c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
755c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
756c6fd2807SJeff Garzik  *	@s: string into which data is output
757c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
758c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
759c6fd2807SJeff Garzik  *
760c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
761c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
762c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
763c6fd2807SJeff Garzik  *
764c6fd2807SJeff Garzik  *	LOCKING:
765c6fd2807SJeff Garzik  *	caller.
766c6fd2807SJeff Garzik  */
767c6fd2807SJeff Garzik 
768c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
769c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
770c6fd2807SJeff Garzik {
771c6fd2807SJeff Garzik 	unsigned int c;
772c6fd2807SJeff Garzik 
773c6fd2807SJeff Garzik 	while (len > 0) {
774c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
775c6fd2807SJeff Garzik 		*s = c;
776c6fd2807SJeff Garzik 		s++;
777c6fd2807SJeff Garzik 
778c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
779c6fd2807SJeff Garzik 		*s = c;
780c6fd2807SJeff Garzik 		s++;
781c6fd2807SJeff Garzik 
782c6fd2807SJeff Garzik 		ofs++;
783c6fd2807SJeff Garzik 		len -= 2;
784c6fd2807SJeff Garzik 	}
785c6fd2807SJeff Garzik }
786c6fd2807SJeff Garzik 
787c6fd2807SJeff Garzik /**
788c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
789c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
790c6fd2807SJeff Garzik  *	@s: string into which data is output
791c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
792c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
793c6fd2807SJeff Garzik  *
794c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
795c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
796c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
797c6fd2807SJeff Garzik  *
798c6fd2807SJeff Garzik  *	LOCKING:
799c6fd2807SJeff Garzik  *	caller.
800c6fd2807SJeff Garzik  */
801c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
802c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
803c6fd2807SJeff Garzik {
804c6fd2807SJeff Garzik 	unsigned char *p;
805c6fd2807SJeff Garzik 
806c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
807c6fd2807SJeff Garzik 
808c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
809c6fd2807SJeff Garzik 
810c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
811c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
812c6fd2807SJeff Garzik 		p--;
813c6fd2807SJeff Garzik 	*p = '\0';
814c6fd2807SJeff Garzik }
815c6fd2807SJeff Garzik 
8161e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8171e999736SAlan Cox {
8181e999736SAlan Cox 	u64 sectors = 0;
8191e999736SAlan Cox 
8201e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8211e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8221e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8231e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8241e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8251e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8261e999736SAlan Cox 
8271e999736SAlan Cox 	return ++sectors;
8281e999736SAlan Cox }
8291e999736SAlan Cox 
8301e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8311e999736SAlan Cox {
8321e999736SAlan Cox 	u64 sectors = 0;
8331e999736SAlan Cox 
8341e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8351e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8361e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8371e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8381e999736SAlan Cox 
8391e999736SAlan Cox 	return ++sectors;
8401e999736SAlan Cox }
8411e999736SAlan Cox 
8421e999736SAlan Cox /**
8431e999736SAlan Cox  *	ata_read_native_max_address_ext	-	LBA48 native max query
8441e999736SAlan Cox  *	@dev: Device to query
8451e999736SAlan Cox  *
8461e999736SAlan Cox  *	Perform an LBA48 size query upon the device in question. Return the
8471e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
8481e999736SAlan Cox  */
8491e999736SAlan Cox 
8501e999736SAlan Cox static u64 ata_read_native_max_address_ext(struct ata_device *dev)
8511e999736SAlan Cox {
8521e999736SAlan Cox 	unsigned int err;
8531e999736SAlan Cox 	struct ata_taskfile tf;
8541e999736SAlan Cox 
8551e999736SAlan Cox 	ata_tf_init(dev, &tf);
8561e999736SAlan Cox 
8571e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
8581e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
8591e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8601e999736SAlan Cox 	tf.device |= 0x40;
8611e999736SAlan Cox 
8621e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8631e999736SAlan Cox 	if (err)
8641e999736SAlan Cox 		return 0;
8651e999736SAlan Cox 
8661e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
8671e999736SAlan Cox }
8681e999736SAlan Cox 
8691e999736SAlan Cox /**
8701e999736SAlan Cox  *	ata_read_native_max_address	-	LBA28 native max query
8711e999736SAlan Cox  *	@dev: Device to query
8721e999736SAlan Cox  *
8731e999736SAlan Cox  *	Performa an LBA28 size query upon the device in question. Return the
8741e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
8751e999736SAlan Cox  */
8761e999736SAlan Cox 
8771e999736SAlan Cox static u64 ata_read_native_max_address(struct ata_device *dev)
8781e999736SAlan Cox {
8791e999736SAlan Cox 	unsigned int err;
8801e999736SAlan Cox 	struct ata_taskfile tf;
8811e999736SAlan Cox 
8821e999736SAlan Cox 	ata_tf_init(dev, &tf);
8831e999736SAlan Cox 
8841e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX;
8851e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
8861e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8871e999736SAlan Cox 	tf.device |= 0x40;
8881e999736SAlan Cox 
8891e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8901e999736SAlan Cox 	if (err)
8911e999736SAlan Cox 		return 0;
8921e999736SAlan Cox 
8931e999736SAlan Cox 	return ata_tf_to_lba(&tf);
8941e999736SAlan Cox }
8951e999736SAlan Cox 
8961e999736SAlan Cox /**
8971e999736SAlan Cox  *	ata_set_native_max_address_ext	-	LBA48 native max set
8981e999736SAlan Cox  *	@dev: Device to query
8996b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9001e999736SAlan Cox  *
9011e999736SAlan Cox  *	Perform an LBA48 size set max upon the device in question. Return the
9021e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
9031e999736SAlan Cox  */
9041e999736SAlan Cox 
9051e999736SAlan Cox static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
9061e999736SAlan Cox {
9071e999736SAlan Cox 	unsigned int err;
9081e999736SAlan Cox 	struct ata_taskfile tf;
9091e999736SAlan Cox 
9101e999736SAlan Cox 	new_sectors--;
9111e999736SAlan Cox 
9121e999736SAlan Cox 	ata_tf_init(dev, &tf);
9131e999736SAlan Cox 
9141e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX_EXT;
9151e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
9161e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9171e999736SAlan Cox 	tf.device |= 0x40;
9181e999736SAlan Cox 
9191e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9201e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9211e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9221e999736SAlan Cox 
9231e999736SAlan Cox 	tf.hob_lbal = (new_sectors >> 24) & 0xff;
9241e999736SAlan Cox 	tf.hob_lbam = (new_sectors >> 32) & 0xff;
9251e999736SAlan Cox 	tf.hob_lbah = (new_sectors >> 40) & 0xff;
9261e999736SAlan Cox 
9271e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9281e999736SAlan Cox 	if (err)
9291e999736SAlan Cox 		return 0;
9301e999736SAlan Cox 
9311e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
9321e999736SAlan Cox }
9331e999736SAlan Cox 
9341e999736SAlan Cox /**
9351e999736SAlan Cox  *	ata_set_native_max_address	-	LBA28 native max set
9361e999736SAlan Cox  *	@dev: Device to query
9376b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9381e999736SAlan Cox  *
9391e999736SAlan Cox  *	Perform an LBA28 size set max upon the device in question. Return the
9401e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
9411e999736SAlan Cox  */
9421e999736SAlan Cox 
9431e999736SAlan Cox static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
9441e999736SAlan Cox {
9451e999736SAlan Cox 	unsigned int err;
9461e999736SAlan Cox 	struct ata_taskfile tf;
9471e999736SAlan Cox 
9481e999736SAlan Cox 	new_sectors--;
9491e999736SAlan Cox 
9501e999736SAlan Cox 	ata_tf_init(dev, &tf);
9511e999736SAlan Cox 
9521e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX;
9531e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9541e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9551e999736SAlan Cox 
9561e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9571e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9581e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9591e999736SAlan Cox 	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
9601e999736SAlan Cox 
9611e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9621e999736SAlan Cox 	if (err)
9631e999736SAlan Cox 		return 0;
9641e999736SAlan Cox 
9651e999736SAlan Cox 	return ata_tf_to_lba(&tf);
9661e999736SAlan Cox }
9671e999736SAlan Cox 
9681e999736SAlan Cox /**
9691e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
9701e999736SAlan Cox  *	@dev: Device to resize
9711e999736SAlan Cox  *
9721e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
9731e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
9741e999736SAlan Cox  *	the drive has the HPA feature set enabled.
9751e999736SAlan Cox  */
9761e999736SAlan Cox 
9771e999736SAlan Cox static u64 ata_hpa_resize(struct ata_device *dev)
9781e999736SAlan Cox {
9791e999736SAlan Cox 	u64 sectors = dev->n_sectors;
9801e999736SAlan Cox 	u64 hpa_sectors;
9811e999736SAlan Cox 
9821e999736SAlan Cox 	if (ata_id_has_lba48(dev->id))
9831e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address_ext(dev);
9841e999736SAlan Cox 	else
9851e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address(dev);
9861e999736SAlan Cox 
9871e999736SAlan Cox 	if (hpa_sectors > sectors) {
9881e999736SAlan Cox 		ata_dev_printk(dev, KERN_INFO,
9891e999736SAlan Cox 			"Host Protected Area detected:\n"
9901e999736SAlan Cox 			"\tcurrent size: %lld sectors\n"
9911e999736SAlan Cox 			"\tnative size: %lld sectors\n",
992bd1d5ec6SAndrew Morton 			(long long)sectors, (long long)hpa_sectors);
9931e999736SAlan Cox 
9941e999736SAlan Cox 		if (ata_ignore_hpa) {
9951e999736SAlan Cox 			if (ata_id_has_lba48(dev->id))
9961e999736SAlan Cox 				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
9971e999736SAlan Cox 			else
998bd1d5ec6SAndrew Morton 				hpa_sectors = ata_set_native_max_address(dev,
999bd1d5ec6SAndrew Morton 								hpa_sectors);
10001e999736SAlan Cox 
10011e999736SAlan Cox 			if (hpa_sectors) {
1002bd1d5ec6SAndrew Morton 				ata_dev_printk(dev, KERN_INFO, "native size "
1003bd1d5ec6SAndrew Morton 					"increased to %lld sectors\n",
1004bd1d5ec6SAndrew Morton 					(long long)hpa_sectors);
10051e999736SAlan Cox 				return hpa_sectors;
10061e999736SAlan Cox 			}
10071e999736SAlan Cox 		}
100837301a55STejun Heo 	} else if (hpa_sectors < sectors)
100937301a55STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
101037301a55STejun Heo 			       "is smaller than sectors (%lld)\n", __FUNCTION__,
101137301a55STejun Heo 			       (long long)hpa_sectors, (long long)sectors);
101237301a55STejun Heo 
10131e999736SAlan Cox 	return sectors;
10141e999736SAlan Cox }
10151e999736SAlan Cox 
1016c6fd2807SJeff Garzik static u64 ata_id_n_sectors(const u16 *id)
1017c6fd2807SJeff Garzik {
1018c6fd2807SJeff Garzik 	if (ata_id_has_lba(id)) {
1019c6fd2807SJeff Garzik 		if (ata_id_has_lba48(id))
1020c6fd2807SJeff Garzik 			return ata_id_u64(id, 100);
1021c6fd2807SJeff Garzik 		else
1022c6fd2807SJeff Garzik 			return ata_id_u32(id, 60);
1023c6fd2807SJeff Garzik 	} else {
1024c6fd2807SJeff Garzik 		if (ata_id_current_chs_valid(id))
1025c6fd2807SJeff Garzik 			return ata_id_u32(id, 57);
1026c6fd2807SJeff Garzik 		else
1027c6fd2807SJeff Garzik 			return id[1] * id[3] * id[6];
1028c6fd2807SJeff Garzik 	}
1029c6fd2807SJeff Garzik }
1030c6fd2807SJeff Garzik 
1031c6fd2807SJeff Garzik /**
103210305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
103310305f0fSAlan  *	@dev: device to identify
1034cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
103510305f0fSAlan  *
103610305f0fSAlan  *	Set up the timing values for the device based upon the identify
103710305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
103810305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
103910305f0fSAlan  *	mode correctly when possible.
104010305f0fSAlan  *
104110305f0fSAlan  *	In addition we emit similarly formatted messages to the default
104210305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
104310305f0fSAlan  *	presentation.
104410305f0fSAlan  */
104510305f0fSAlan 
104610305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
104710305f0fSAlan {
104810305f0fSAlan 	unsigned int mask;
104910305f0fSAlan 	u8 mode;
105010305f0fSAlan 
105110305f0fSAlan 	/* Pack the DMA modes */
105210305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
105310305f0fSAlan 	if (dev->id[53] & 0x04)
105410305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
105510305f0fSAlan 
105610305f0fSAlan 	/* Select the mode in use */
105710305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
105810305f0fSAlan 
105910305f0fSAlan 	if (mode != 0) {
106010305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
106110305f0fSAlan 		       ata_mode_string(mask));
106210305f0fSAlan 	} else {
106310305f0fSAlan 		/* SWDMA perhaps ? */
106410305f0fSAlan 		mode = unknown;
106510305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
106610305f0fSAlan 	}
106710305f0fSAlan 
106810305f0fSAlan 	/* Configure the device reporting */
106910305f0fSAlan 	dev->xfer_mode = mode;
107010305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
107110305f0fSAlan }
107210305f0fSAlan 
107310305f0fSAlan /**
1074c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1075c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1076c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1077c6fd2807SJeff Garzik  *
1078c6fd2807SJeff Garzik  *	This function performs no actual function.
1079c6fd2807SJeff Garzik  *
1080c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1081c6fd2807SJeff Garzik  *
1082c6fd2807SJeff Garzik  *	LOCKING:
1083c6fd2807SJeff Garzik  *	caller.
1084c6fd2807SJeff Garzik  */
1085c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1086c6fd2807SJeff Garzik {
1087c6fd2807SJeff Garzik }
1088c6fd2807SJeff Garzik 
1089c6fd2807SJeff Garzik 
1090c6fd2807SJeff Garzik /**
1091c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1092c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1093c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1094c6fd2807SJeff Garzik  *
1095c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1096c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1097c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1098c6fd2807SJeff Garzik  *
1099c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1100c6fd2807SJeff Garzik  *
1101c6fd2807SJeff Garzik  *	LOCKING:
1102c6fd2807SJeff Garzik  *	caller.
1103c6fd2807SJeff Garzik  */
1104c6fd2807SJeff Garzik 
1105c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1106c6fd2807SJeff Garzik {
1107c6fd2807SJeff Garzik 	u8 tmp;
1108c6fd2807SJeff Garzik 
1109c6fd2807SJeff Garzik 	if (device == 0)
1110c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1111c6fd2807SJeff Garzik 	else
1112c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1113c6fd2807SJeff Garzik 
11140d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1115c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1116c6fd2807SJeff Garzik }
1117c6fd2807SJeff Garzik 
1118c6fd2807SJeff Garzik /**
1119c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1120c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1121c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1122c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1123c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1124c6fd2807SJeff Garzik  *
1125c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1126c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1127c6fd2807SJeff Garzik  *	ATA channel.
1128c6fd2807SJeff Garzik  *
1129c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1130c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1131c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1132c6fd2807SJeff Garzik  *
1133c6fd2807SJeff Garzik  *	LOCKING:
1134c6fd2807SJeff Garzik  *	caller.
1135c6fd2807SJeff Garzik  */
1136c6fd2807SJeff Garzik 
1137c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1138c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1139c6fd2807SJeff Garzik {
1140c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
114144877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
114244877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1143c6fd2807SJeff Garzik 
1144c6fd2807SJeff Garzik 	if (wait)
1145c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1146c6fd2807SJeff Garzik 
1147c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1148c6fd2807SJeff Garzik 
1149c6fd2807SJeff Garzik 	if (wait) {
1150c6fd2807SJeff Garzik 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1151c6fd2807SJeff Garzik 			msleep(150);
1152c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1153c6fd2807SJeff Garzik 	}
1154c6fd2807SJeff Garzik }
1155c6fd2807SJeff Garzik 
1156c6fd2807SJeff Garzik /**
1157c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1158c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1159c6fd2807SJeff Garzik  *
1160c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1161c6fd2807SJeff Garzik  *	page.
1162c6fd2807SJeff Garzik  *
1163c6fd2807SJeff Garzik  *	LOCKING:
1164c6fd2807SJeff Garzik  *	caller.
1165c6fd2807SJeff Garzik  */
1166c6fd2807SJeff Garzik 
1167c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1168c6fd2807SJeff Garzik {
1169c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1170c6fd2807SJeff Garzik 		"53==0x%04x  "
1171c6fd2807SJeff Garzik 		"63==0x%04x  "
1172c6fd2807SJeff Garzik 		"64==0x%04x  "
1173c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1174c6fd2807SJeff Garzik 		id[49],
1175c6fd2807SJeff Garzik 		id[53],
1176c6fd2807SJeff Garzik 		id[63],
1177c6fd2807SJeff Garzik 		id[64],
1178c6fd2807SJeff Garzik 		id[75]);
1179c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1180c6fd2807SJeff Garzik 		"81==0x%04x  "
1181c6fd2807SJeff Garzik 		"82==0x%04x  "
1182c6fd2807SJeff Garzik 		"83==0x%04x  "
1183c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1184c6fd2807SJeff Garzik 		id[80],
1185c6fd2807SJeff Garzik 		id[81],
1186c6fd2807SJeff Garzik 		id[82],
1187c6fd2807SJeff Garzik 		id[83],
1188c6fd2807SJeff Garzik 		id[84]);
1189c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1190c6fd2807SJeff Garzik 		"93==0x%04x\n",
1191c6fd2807SJeff Garzik 		id[88],
1192c6fd2807SJeff Garzik 		id[93]);
1193c6fd2807SJeff Garzik }
1194c6fd2807SJeff Garzik 
1195c6fd2807SJeff Garzik /**
1196c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1197c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1198c6fd2807SJeff Garzik  *
1199c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1200c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1201c6fd2807SJeff Garzik  *
1202c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1203c6fd2807SJeff Garzik  *
1204c6fd2807SJeff Garzik  *	LOCKING:
1205c6fd2807SJeff Garzik  *	None.
1206c6fd2807SJeff Garzik  *
1207c6fd2807SJeff Garzik  *	RETURNS:
1208c6fd2807SJeff Garzik  *	Computed xfermask
1209c6fd2807SJeff Garzik  */
1210c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1211c6fd2807SJeff Garzik {
1212c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1213c6fd2807SJeff Garzik 
1214c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1215c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1216c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1217c6fd2807SJeff Garzik 		pio_mask <<= 3;
1218c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1219c6fd2807SJeff Garzik 	} else {
1220c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1221c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1222c6fd2807SJeff Garzik 		 * a mask.
1223c6fd2807SJeff Garzik 		 */
12247a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
122546767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
122646767aebSAlan Cox                 	pio_mask = (2 << mode) - 1;
122746767aebSAlan Cox 		else
122846767aebSAlan Cox 			pio_mask = 1;
1229c6fd2807SJeff Garzik 
1230c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1231c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1232c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1233c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1234c6fd2807SJeff Garzik 		 * will get this right anyway
1235c6fd2807SJeff Garzik 		 */
1236c6fd2807SJeff Garzik 	}
1237c6fd2807SJeff Garzik 
1238c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1239c6fd2807SJeff Garzik 
1240b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1241b352e57dSAlan Cox 		/*
1242b352e57dSAlan Cox 		 *	Process compact flash extended modes
1243b352e57dSAlan Cox 		 */
1244b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1245b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1246b352e57dSAlan Cox 
1247b352e57dSAlan Cox 		if (pio)
1248b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1249b352e57dSAlan Cox 		if (pio > 1)
1250b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1251b352e57dSAlan Cox 		if (dma)
1252b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1253b352e57dSAlan Cox 		if (dma > 1)
1254b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1255b352e57dSAlan Cox 	}
1256b352e57dSAlan Cox 
1257c6fd2807SJeff Garzik 	udma_mask = 0;
1258c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1259c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1260c6fd2807SJeff Garzik 
1261c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1262c6fd2807SJeff Garzik }
1263c6fd2807SJeff Garzik 
1264c6fd2807SJeff Garzik /**
1265c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1266c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1267c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
126865f27f38SDavid Howells  *	@data: data for @fn to use
1269c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1270c6fd2807SJeff Garzik  *
1271c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1272c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1273c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1274c6fd2807SJeff Garzik  *	one task is active at any given time.
1275c6fd2807SJeff Garzik  *
1276c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1277c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1278c6fd2807SJeff Garzik  *	synchronization.
1279c6fd2807SJeff Garzik  *
1280c6fd2807SJeff Garzik  *	LOCKING:
1281c6fd2807SJeff Garzik  *	Inherited from caller.
1282c6fd2807SJeff Garzik  */
128365f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1284c6fd2807SJeff Garzik 			 unsigned long delay)
1285c6fd2807SJeff Garzik {
1286c6fd2807SJeff Garzik 	int rc;
1287c6fd2807SJeff Garzik 
1288c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1289c6fd2807SJeff Garzik 		return;
1290c6fd2807SJeff Garzik 
129165f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
129265f27f38SDavid Howells 	ap->port_task_data = data;
1293c6fd2807SJeff Garzik 
1294c6fd2807SJeff Garzik 	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1295c6fd2807SJeff Garzik 
1296c6fd2807SJeff Garzik 	/* rc == 0 means that another user is using port task */
1297c6fd2807SJeff Garzik 	WARN_ON(rc == 0);
1298c6fd2807SJeff Garzik }
1299c6fd2807SJeff Garzik 
1300c6fd2807SJeff Garzik /**
1301c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1302c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1303c6fd2807SJeff Garzik  *
1304c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1305c6fd2807SJeff Garzik  *	be running or scheduled.
1306c6fd2807SJeff Garzik  *
1307c6fd2807SJeff Garzik  *	LOCKING:
1308c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1309c6fd2807SJeff Garzik  */
1310c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1311c6fd2807SJeff Garzik {
1312c6fd2807SJeff Garzik 	unsigned long flags;
1313c6fd2807SJeff Garzik 
1314c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1315c6fd2807SJeff Garzik 
1316c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1317c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1318c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1319c6fd2807SJeff Garzik 
1320c6fd2807SJeff Garzik 	DPRINTK("flush #1\n");
132128e53bddSOleg Nesterov 	cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1322c6fd2807SJeff Garzik 
1323c6fd2807SJeff Garzik 	/*
1324c6fd2807SJeff Garzik 	 * At this point, if a task is running, it's guaranteed to see
1325c6fd2807SJeff Garzik 	 * the FLUSH flag; thus, it will never queue pio tasks again.
1326c6fd2807SJeff Garzik 	 * Cancel and flush.
1327c6fd2807SJeff Garzik 	 */
1328c6fd2807SJeff Garzik 	if (!cancel_delayed_work(&ap->port_task)) {
1329c6fd2807SJeff Garzik 		if (ata_msg_ctl(ap))
1330c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1331c6fd2807SJeff Garzik 					__FUNCTION__);
133228e53bddSOleg Nesterov 		cancel_work_sync(&ap->port_task.work);
1333c6fd2807SJeff Garzik 	}
1334c6fd2807SJeff Garzik 
1335c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1336c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1337c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1338c6fd2807SJeff Garzik 
1339c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1340c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1341c6fd2807SJeff Garzik }
1342c6fd2807SJeff Garzik 
13437102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1344c6fd2807SJeff Garzik {
1345c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1346c6fd2807SJeff Garzik 
1347c6fd2807SJeff Garzik 	complete(waiting);
1348c6fd2807SJeff Garzik }
1349c6fd2807SJeff Garzik 
1350c6fd2807SJeff Garzik /**
13512432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1352c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1353c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1354c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1355c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13562432697bSTejun Heo  *	@sg: sg list for the data buffer of the command
13572432697bSTejun Heo  *	@n_elem: Number of sg entries
1358c6fd2807SJeff Garzik  *
1359c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1360c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1361c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1362c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1363c6fd2807SJeff Garzik  *	clean up after timeout.
1364c6fd2807SJeff Garzik  *
1365c6fd2807SJeff Garzik  *	LOCKING:
1366c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1367c6fd2807SJeff Garzik  *
1368c6fd2807SJeff Garzik  *	RETURNS:
1369c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1370c6fd2807SJeff Garzik  */
13712432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1372c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
13732432697bSTejun Heo 			      int dma_dir, struct scatterlist *sg,
13742432697bSTejun Heo 			      unsigned int n_elem)
1375c6fd2807SJeff Garzik {
1376c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1377c6fd2807SJeff Garzik 	u8 command = tf->command;
1378c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1379c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1380c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1381c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1382c6fd2807SJeff Garzik 	unsigned long flags;
1383c6fd2807SJeff Garzik 	unsigned int err_mask;
1384c6fd2807SJeff Garzik 	int rc;
1385c6fd2807SJeff Garzik 
1386c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1387c6fd2807SJeff Garzik 
1388c6fd2807SJeff Garzik 	/* no internal command while frozen */
1389c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1390c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1391c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1392c6fd2807SJeff Garzik 	}
1393c6fd2807SJeff Garzik 
1394c6fd2807SJeff Garzik 	/* initialize internal qc */
1395c6fd2807SJeff Garzik 
1396c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1397c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1398c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1399c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1400c6fd2807SJeff Garzik 	 */
1401c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1402c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1403c6fd2807SJeff Garzik 	else
1404c6fd2807SJeff Garzik 		tag = 0;
1405c6fd2807SJeff Garzik 
1406c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1407c6fd2807SJeff Garzik 		BUG();
1408c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1409c6fd2807SJeff Garzik 
1410c6fd2807SJeff Garzik 	qc->tag = tag;
1411c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1412c6fd2807SJeff Garzik 	qc->ap = ap;
1413c6fd2807SJeff Garzik 	qc->dev = dev;
1414c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1415c6fd2807SJeff Garzik 
1416c6fd2807SJeff Garzik 	preempted_tag = ap->active_tag;
1417c6fd2807SJeff Garzik 	preempted_sactive = ap->sactive;
1418c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1419c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
1420c6fd2807SJeff Garzik 	ap->sactive = 0;
1421c6fd2807SJeff Garzik 	ap->qc_active = 0;
1422c6fd2807SJeff Garzik 
1423c6fd2807SJeff Garzik 	/* prepare & issue qc */
1424c6fd2807SJeff Garzik 	qc->tf = *tf;
1425c6fd2807SJeff Garzik 	if (cdb)
1426c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1427c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1428c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1429c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14302432697bSTejun Heo 		unsigned int i, buflen = 0;
14312432697bSTejun Heo 
14322432697bSTejun Heo 		for (i = 0; i < n_elem; i++)
14332432697bSTejun Heo 			buflen += sg[i].length;
14342432697bSTejun Heo 
14352432697bSTejun Heo 		ata_sg_init(qc, sg, n_elem);
143649c80429SBrian King 		qc->nbytes = buflen;
1437c6fd2807SJeff Garzik 	}
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	qc->private_data = &wait;
1440c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1443c6fd2807SJeff Garzik 
1444c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1445c6fd2807SJeff Garzik 
1446c6fd2807SJeff Garzik 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1449c6fd2807SJeff Garzik 
1450c6fd2807SJeff Garzik 	if (!rc) {
1451c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1454c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1455c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1456c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1457c6fd2807SJeff Garzik 		 */
1458c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1459c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1460c6fd2807SJeff Garzik 
1461c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1462c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1463c6fd2807SJeff Garzik 			else
1464c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1465c6fd2807SJeff Garzik 
1466c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1467c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1468c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1469c6fd2807SJeff Garzik 		}
1470c6fd2807SJeff Garzik 
1471c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1472c6fd2807SJeff Garzik 	}
1473c6fd2807SJeff Garzik 
1474c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1475c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1476c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1477c6fd2807SJeff Garzik 
1478a51d644aSTejun Heo 	/* perform minimal error analysis */
1479a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1480a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1481a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1482a51d644aSTejun Heo 
1483a51d644aSTejun Heo 		if (!qc->err_mask)
1484c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1485a51d644aSTejun Heo 
1486a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1487a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1488c6fd2807SJeff Garzik 	}
1489c6fd2807SJeff Garzik 
1490c6fd2807SJeff Garzik 	/* finish up */
1491c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1492c6fd2807SJeff Garzik 
1493c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1494c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1495c6fd2807SJeff Garzik 
1496c6fd2807SJeff Garzik 	ata_qc_free(qc);
1497c6fd2807SJeff Garzik 	ap->active_tag = preempted_tag;
1498c6fd2807SJeff Garzik 	ap->sactive = preempted_sactive;
1499c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1500c6fd2807SJeff Garzik 
1501c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1502c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1503c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1504c6fd2807SJeff Garzik 	 * port.
1505c6fd2807SJeff Garzik 	 *
1506c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1507c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1508c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1509c6fd2807SJeff Garzik 	 *
1510c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1511c6fd2807SJeff Garzik 	 */
1512c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1513c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1514c6fd2807SJeff Garzik 		ata_port_probe(ap);
1515c6fd2807SJeff Garzik 	}
1516c6fd2807SJeff Garzik 
1517c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1518c6fd2807SJeff Garzik 
1519c6fd2807SJeff Garzik 	return err_mask;
1520c6fd2807SJeff Garzik }
1521c6fd2807SJeff Garzik 
1522c6fd2807SJeff Garzik /**
152333480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15242432697bSTejun Heo  *	@dev: Device to which the command is sent
15252432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15262432697bSTejun Heo  *	@cdb: CDB for packet command
15272432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15282432697bSTejun Heo  *	@buf: Data buffer of the command
15292432697bSTejun Heo  *	@buflen: Length of data buffer
15302432697bSTejun Heo  *
15312432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15322432697bSTejun Heo  *	buffer instead of sg list.
15332432697bSTejun Heo  *
15342432697bSTejun Heo  *	LOCKING:
15352432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15362432697bSTejun Heo  *
15372432697bSTejun Heo  *	RETURNS:
15382432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15392432697bSTejun Heo  */
15402432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15412432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15422432697bSTejun Heo 			   int dma_dir, void *buf, unsigned int buflen)
15432432697bSTejun Heo {
154433480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
154533480a0eSTejun Heo 	unsigned int n_elem = 0;
15462432697bSTejun Heo 
154733480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
154833480a0eSTejun Heo 		WARN_ON(!buf);
15492432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
155033480a0eSTejun Heo 		psg = &sg;
155133480a0eSTejun Heo 		n_elem++;
155233480a0eSTejun Heo 	}
15532432697bSTejun Heo 
155433480a0eSTejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
15552432697bSTejun Heo }
15562432697bSTejun Heo 
15572432697bSTejun Heo /**
1558c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1559c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1560c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1561c6fd2807SJeff Garzik  *
1562c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1563c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1564c6fd2807SJeff Garzik  *
1565c6fd2807SJeff Garzik  *	LOCKING:
1566c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1567c6fd2807SJeff Garzik  *
1568c6fd2807SJeff Garzik  *	RETURNS:
1569c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1570c6fd2807SJeff Garzik  */
1571c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1572c6fd2807SJeff Garzik {
1573c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1574c6fd2807SJeff Garzik 
1575c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1576c6fd2807SJeff Garzik 
1577c6fd2807SJeff Garzik 	tf.command = cmd;
1578c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1579c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1580c6fd2807SJeff Garzik 
1581c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1582c6fd2807SJeff Garzik }
1583c6fd2807SJeff Garzik 
1584c6fd2807SJeff Garzik /**
1585c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1586c6fd2807SJeff Garzik  *	@adev: ATA device
1587c6fd2807SJeff Garzik  *
1588c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1589c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1590c6fd2807SJeff Garzik  */
1591c6fd2807SJeff Garzik 
1592c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1593c6fd2807SJeff Garzik {
1594432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1595432729f0SAlan Cox 	   as the caller should know this */
1596432729f0SAlan Cox 	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1597c6fd2807SJeff Garzik 		return 0;
1598432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1599432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1600c6fd2807SJeff Garzik 		return 1;
1601432729f0SAlan Cox 	/* We turn it on when possible */
1602432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1603432729f0SAlan Cox 		return 1;
1604432729f0SAlan Cox 	return 0;
1605432729f0SAlan Cox }
1606c6fd2807SJeff Garzik 
1607432729f0SAlan Cox /**
1608432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1609432729f0SAlan Cox  *	@adev: ATA device
1610432729f0SAlan Cox  *
1611432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1612432729f0SAlan Cox  *	-1 if no iordy mode is available.
1613432729f0SAlan Cox  */
1614432729f0SAlan Cox 
1615432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1616432729f0SAlan Cox {
1617c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1618c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1619432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1620c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1621c6fd2807SJeff Garzik 		if (pio) {
1622c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1623c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1624432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1625432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1626c6fd2807SJeff Garzik 		}
1627c6fd2807SJeff Garzik 	}
1628432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1629c6fd2807SJeff Garzik }
1630c6fd2807SJeff Garzik 
1631c6fd2807SJeff Garzik /**
1632c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1633c6fd2807SJeff Garzik  *	@dev: target device
1634c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1635bff04647STejun Heo  *	@flags: ATA_READID_* flags
1636c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1637c6fd2807SJeff Garzik  *
1638c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1639c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1640c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1641c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1642c6fd2807SJeff Garzik  *
1643c6fd2807SJeff Garzik  *	LOCKING:
1644c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1645c6fd2807SJeff Garzik  *
1646c6fd2807SJeff Garzik  *	RETURNS:
1647c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1648c6fd2807SJeff Garzik  */
1649c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1650bff04647STejun Heo 		    unsigned int flags, u16 *id)
1651c6fd2807SJeff Garzik {
1652c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1653c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1654c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1655c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1656c6fd2807SJeff Garzik 	const char *reason;
165754936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1658c6fd2807SJeff Garzik 	int rc;
1659c6fd2807SJeff Garzik 
1660c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
166144877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1662c6fd2807SJeff Garzik 
1663c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1664c6fd2807SJeff Garzik  retry:
1665c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1666c6fd2807SJeff Garzik 
1667c6fd2807SJeff Garzik 	switch (class) {
1668c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1669c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1670c6fd2807SJeff Garzik 		break;
1671c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1672c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1673c6fd2807SJeff Garzik 		break;
1674c6fd2807SJeff Garzik 	default:
1675c6fd2807SJeff Garzik 		rc = -ENODEV;
1676c6fd2807SJeff Garzik 		reason = "unsupported class";
1677c6fd2807SJeff Garzik 		goto err_out;
1678c6fd2807SJeff Garzik 	}
1679c6fd2807SJeff Garzik 
1680c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
168181afe893STejun Heo 
168281afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
168381afe893STejun Heo 	 * sure those are properly initialized.
168481afe893STejun Heo 	 */
168581afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
168681afe893STejun Heo 
168781afe893STejun Heo 	/* Device presence detection is unreliable on some
168881afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
168981afe893STejun Heo 	 */
169081afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1691c6fd2807SJeff Garzik 
1692c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1693c6fd2807SJeff Garzik 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1694c6fd2807SJeff Garzik 	if (err_mask) {
1695800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
169655a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
169744877b4eSTejun Heo 				ap->print_id, dev->devno);
169855a8e2c8STejun Heo 			return -ENOENT;
169955a8e2c8STejun Heo 		}
170055a8e2c8STejun Heo 
170154936f8bSTejun Heo 		/* Device or controller might have reported the wrong
170254936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
170354936f8bSTejun Heo 		 * the current one is aborted by the device.
170454936f8bSTejun Heo 		 */
170554936f8bSTejun Heo 		if (may_fallback &&
170654936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
170754936f8bSTejun Heo 			may_fallback = 0;
170854936f8bSTejun Heo 
170954936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
171054936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
171154936f8bSTejun Heo 			else
171254936f8bSTejun Heo 				class = ATA_DEV_ATA;
171354936f8bSTejun Heo 			goto retry;
171454936f8bSTejun Heo 		}
171554936f8bSTejun Heo 
1716c6fd2807SJeff Garzik 		rc = -EIO;
1717c6fd2807SJeff Garzik 		reason = "I/O error";
1718c6fd2807SJeff Garzik 		goto err_out;
1719c6fd2807SJeff Garzik 	}
1720c6fd2807SJeff Garzik 
172154936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
172254936f8bSTejun Heo 	 * successfully at least once.
172354936f8bSTejun Heo 	 */
172454936f8bSTejun Heo 	may_fallback = 0;
172554936f8bSTejun Heo 
1726c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1727c6fd2807SJeff Garzik 
1728c6fd2807SJeff Garzik 	/* sanity check */
1729c6fd2807SJeff Garzik 	rc = -EINVAL;
17306070068bSAlan Cox 	reason = "device reports invalid type";
17314a3381feSJeff Garzik 
17324a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17334a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17344a3381feSJeff Garzik 			goto err_out;
17354a3381feSJeff Garzik 	} else {
17364a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1737c6fd2807SJeff Garzik 			goto err_out;
1738c6fd2807SJeff Garzik 	}
1739c6fd2807SJeff Garzik 
1740169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1741169439c2SMark Lord 		tried_spinup = 1;
1742169439c2SMark Lord 		/*
1743169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1744169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1745169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1746169439c2SMark Lord 		 */
1747169439c2SMark Lord 		ata_tf_init(dev, &tf);
1748169439c2SMark Lord 		tf.command = ATA_CMD_SET_FEATURES;
1749169439c2SMark Lord 		tf.feature = SETFEATURES_SPINUP;
1750169439c2SMark Lord 		tf.protocol = ATA_PROT_NODATA;
1751169439c2SMark Lord 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1752169439c2SMark Lord 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1753169439c2SMark Lord 		if (err_mask) {
1754169439c2SMark Lord 			rc = -EIO;
1755169439c2SMark Lord 			reason = "SPINUP failed";
1756169439c2SMark Lord 			goto err_out;
1757169439c2SMark Lord 		}
1758169439c2SMark Lord 		/*
1759169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1760169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1761169439c2SMark Lord 		 */
1762169439c2SMark Lord 		if (id[2] == 0x37c8)
1763169439c2SMark Lord 			goto retry;
1764169439c2SMark Lord 	}
1765169439c2SMark Lord 
1766bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1767c6fd2807SJeff Garzik 		/*
1768c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1769c6fd2807SJeff Garzik 		 * SRST RESET
1770c6fd2807SJeff Garzik 		 * IDENTIFY
1771c6fd2807SJeff Garzik 		 * INITIALIZE DEVICE PARAMETERS
1772c6fd2807SJeff Garzik 		 * anything else..
1773c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
1774c6fd2807SJeff Garzik 		 */
1775c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1776c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1777c6fd2807SJeff Garzik 			if (err_mask) {
1778c6fd2807SJeff Garzik 				rc = -EIO;
1779c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1780c6fd2807SJeff Garzik 				goto err_out;
1781c6fd2807SJeff Garzik 			}
1782c6fd2807SJeff Garzik 
1783c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1784c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1785c6fd2807SJeff Garzik 			 */
1786bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1787c6fd2807SJeff Garzik 			goto retry;
1788c6fd2807SJeff Garzik 		}
1789c6fd2807SJeff Garzik 	}
1790c6fd2807SJeff Garzik 
1791c6fd2807SJeff Garzik 	*p_class = class;
1792c6fd2807SJeff Garzik 
1793c6fd2807SJeff Garzik 	return 0;
1794c6fd2807SJeff Garzik 
1795c6fd2807SJeff Garzik  err_out:
1796c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1797c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1798c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1799c6fd2807SJeff Garzik 	return rc;
1800c6fd2807SJeff Garzik }
1801c6fd2807SJeff Garzik 
1802c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1803c6fd2807SJeff Garzik {
1804c6fd2807SJeff Garzik 	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1805c6fd2807SJeff Garzik }
1806c6fd2807SJeff Garzik 
1807c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1808c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1809c6fd2807SJeff Garzik {
1810c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1811c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1812c6fd2807SJeff Garzik 
1813c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1814c6fd2807SJeff Garzik 		desc[0] = '\0';
1815c6fd2807SJeff Garzik 		return;
1816c6fd2807SJeff Garzik 	}
18176919a0a6SAlan Cox 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
18186919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
18196919a0a6SAlan Cox 		return;
18206919a0a6SAlan Cox 	}
1821c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1822cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1823c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1824c6fd2807SJeff Garzik 	}
1825c6fd2807SJeff Garzik 
1826c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1827c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1828c6fd2807SJeff Garzik 	else
1829c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1830c6fd2807SJeff Garzik }
1831c6fd2807SJeff Garzik 
1832c6fd2807SJeff Garzik /**
1833c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1834c6fd2807SJeff Garzik  *	@dev: Target device to configure
1835c6fd2807SJeff Garzik  *
1836c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1837c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1838c6fd2807SJeff Garzik  *
1839c6fd2807SJeff Garzik  *	LOCKING:
1840c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1841c6fd2807SJeff Garzik  *
1842c6fd2807SJeff Garzik  *	RETURNS:
1843c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1844c6fd2807SJeff Garzik  */
1845efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1846c6fd2807SJeff Garzik {
1847c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
18486746544cSTejun Heo 	struct ata_eh_context *ehc = &ap->eh_context;
18496746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1850c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1851c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1852b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
18533f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
18543f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1855c6fd2807SJeff Garzik 	int rc;
1856c6fd2807SJeff Garzik 
1857c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
185844877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
185944877b4eSTejun Heo 			       __FUNCTION__);
1860c6fd2807SJeff Garzik 		return 0;
1861c6fd2807SJeff Garzik 	}
1862c6fd2807SJeff Garzik 
1863c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
186444877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1865c6fd2807SJeff Garzik 
18666746544cSTejun Heo 	/* let ACPI work its magic */
18676746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
18686746544cSTejun Heo 	if (rc)
18696746544cSTejun Heo 		return rc;
187008573a86SKristen Carlson Accardi 
1871c6fd2807SJeff Garzik 	/* print device capabilities */
1872c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1873c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1874c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1875c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1876c6fd2807SJeff Garzik 			       __FUNCTION__,
1877c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1878c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1879c6fd2807SJeff Garzik 
1880c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1881c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1882c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1883c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1884c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1885c6fd2807SJeff Garzik 	dev->cylinders = 0;
1886c6fd2807SJeff Garzik 	dev->heads = 0;
1887c6fd2807SJeff Garzik 	dev->sectors = 0;
1888c6fd2807SJeff Garzik 
1889c6fd2807SJeff Garzik 	/*
1890c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1891c6fd2807SJeff Garzik 	 */
1892c6fd2807SJeff Garzik 
1893c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1894c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1895c6fd2807SJeff Garzik 
1896c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1897c6fd2807SJeff Garzik 		ata_dump_id(id);
1898c6fd2807SJeff Garzik 
1899ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1900ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1901ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1902ef143d57SAlbert Lee 
1903ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1904ef143d57SAlbert Lee 			sizeof(modelbuf));
1905ef143d57SAlbert Lee 
1906c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1907c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1908b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1909b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
191044877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
191144877b4eSTejun Heo 					       "supports DRM functions and may "
191244877b4eSTejun Heo 					       "not be fully accessable.\n");
1913b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
1914b352e57dSAlan Cox 		}
1915b352e57dSAlan Cox 		else
1916b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1917b352e57dSAlan Cox 
1918c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1919c6fd2807SJeff Garzik 
19203f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19213f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19223f64f565SEric D. Mudama 
1923c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1924c6fd2807SJeff Garzik 			const char *lba_desc;
1925c6fd2807SJeff Garzik 			char ncq_desc[20];
1926c6fd2807SJeff Garzik 
1927c6fd2807SJeff Garzik 			lba_desc = "LBA";
1928c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1929c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1930c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1931c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19326fc49adbSTejun Heo 
19336fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19346fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19356fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1936c6fd2807SJeff Garzik 			}
1937c6fd2807SJeff Garzik 
19381e999736SAlan Cox 			if (ata_id_hpa_enabled(dev->id))
19391e999736SAlan Cox 				dev->n_sectors = ata_hpa_resize(dev);
19401e999736SAlan Cox 
1941c6fd2807SJeff Garzik 			/* config NCQ */
1942c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1943c6fd2807SJeff Garzik 
1944c6fd2807SJeff Garzik 			/* print device info to dmesg */
19453f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
19463f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19473f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19483f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
19493f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19503f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19513f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
1952c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
19533f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
19543f64f565SEric D. Mudama 			}
1955c6fd2807SJeff Garzik 		} else {
1956c6fd2807SJeff Garzik 			/* CHS */
1957c6fd2807SJeff Garzik 
1958c6fd2807SJeff Garzik 			/* Default translation */
1959c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
1960c6fd2807SJeff Garzik 			dev->heads	= id[3];
1961c6fd2807SJeff Garzik 			dev->sectors	= id[6];
1962c6fd2807SJeff Garzik 
1963c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
1964c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
1965c6fd2807SJeff Garzik 				dev->cylinders = id[54];
1966c6fd2807SJeff Garzik 				dev->heads     = id[55];
1967c6fd2807SJeff Garzik 				dev->sectors   = id[56];
1968c6fd2807SJeff Garzik 			}
1969c6fd2807SJeff Garzik 
1970c6fd2807SJeff Garzik 			/* print device info to dmesg */
19713f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
1972c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
19733f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19743f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
19753f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19763f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19773f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
19783f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
19793f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
19803f64f565SEric D. Mudama 					dev->heads, dev->sectors);
19813f64f565SEric D. Mudama 			}
1982c6fd2807SJeff Garzik 		}
1983c6fd2807SJeff Garzik 
1984c6fd2807SJeff Garzik 		dev->cdb_len = 16;
1985c6fd2807SJeff Garzik 	}
1986c6fd2807SJeff Garzik 
1987c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
1988c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
1989c6fd2807SJeff Garzik 		char *cdb_intr_string = "";
1990c6fd2807SJeff Garzik 
1991c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
1992c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1993c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1994c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1995c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
1996c6fd2807SJeff Garzik 			rc = -EINVAL;
1997c6fd2807SJeff Garzik 			goto err_out_nosup;
1998c6fd2807SJeff Garzik 		}
1999c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2000c6fd2807SJeff Garzik 
2001c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2002c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2003c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2004c6fd2807SJeff Garzik 		}
2005c6fd2807SJeff Garzik 
2006c6fd2807SJeff Garzik 		/* print device info to dmesg */
2007c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2008ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2009ef143d57SAlbert Lee 				       "ATAPI: %s, %s, max %s%s\n",
2010ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2011c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2012c6fd2807SJeff Garzik 				       cdb_intr_string);
2013c6fd2807SJeff Garzik 	}
2014c6fd2807SJeff Garzik 
2015914ed354STejun Heo 	/* determine max_sectors */
2016914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2017914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2018914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2019914ed354STejun Heo 
202093590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
202193590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
202293590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
202393590859SAlan Cox 		   idiot */
202493590859SAlan Cox                 if (print_info) {
202593590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
202693590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
202793590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
202893590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
202993590859SAlan Cox 		}
203093590859SAlan Cox 	}
203193590859SAlan Cox 
2032c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2033c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2034c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2035c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2036c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2037c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2038c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2039c6fd2807SJeff Garzik 	}
2040c6fd2807SJeff Garzik 
204118d6e9d5SAlbert Lee 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
204203ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
204303ec52deSTejun Heo 					 dev->max_sectors);
204418d6e9d5SAlbert Lee 
2045c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2046cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2047c6fd2807SJeff Garzik 
2048c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2049c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2050c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2051c6fd2807SJeff Garzik 	return 0;
2052c6fd2807SJeff Garzik 
2053c6fd2807SJeff Garzik err_out_nosup:
2054c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2055c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2056c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2057c6fd2807SJeff Garzik 	return rc;
2058c6fd2807SJeff Garzik }
2059c6fd2807SJeff Garzik 
2060c6fd2807SJeff Garzik /**
20612e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2062be0d18dfSAlan Cox  *	@ap: port
2063be0d18dfSAlan Cox  *
20642e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2065be0d18dfSAlan Cox  *	detection.
2066be0d18dfSAlan Cox  */
2067be0d18dfSAlan Cox 
2068be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2069be0d18dfSAlan Cox {
2070be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2071be0d18dfSAlan Cox }
2072be0d18dfSAlan Cox 
2073be0d18dfSAlan Cox /**
20742e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2075be0d18dfSAlan Cox  *	@ap: port
2076be0d18dfSAlan Cox  *
20772e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2078be0d18dfSAlan Cox  *	detection.
2079be0d18dfSAlan Cox  */
2080be0d18dfSAlan Cox 
2081be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2082be0d18dfSAlan Cox {
2083be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2084be0d18dfSAlan Cox }
2085be0d18dfSAlan Cox 
2086be0d18dfSAlan Cox /**
2087be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2088be0d18dfSAlan Cox  *	@ap: port
2089be0d18dfSAlan Cox  *
2090be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2091be0d18dfSAlan Cox  */
2092be0d18dfSAlan Cox 
2093be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2094be0d18dfSAlan Cox {
2095be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2096be0d18dfSAlan Cox }
2097be0d18dfSAlan Cox 
2098be0d18dfSAlan Cox /**
2099be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2100be0d18dfSAlan Cox  *	@ap: port
2101be0d18dfSAlan Cox  *
2102be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2103be0d18dfSAlan Cox  */
2104be0d18dfSAlan Cox 
2105be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2106be0d18dfSAlan Cox {
2107be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2108be0d18dfSAlan Cox }
2109be0d18dfSAlan Cox 
2110be0d18dfSAlan Cox /**
2111c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2112c6fd2807SJeff Garzik  *	@ap: Bus to probe
2113c6fd2807SJeff Garzik  *
2114c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2115c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2116c6fd2807SJeff Garzik  *	the bus.
2117c6fd2807SJeff Garzik  *
2118c6fd2807SJeff Garzik  *	LOCKING:
2119c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2120c6fd2807SJeff Garzik  *
2121c6fd2807SJeff Garzik  *	RETURNS:
2122c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2123c6fd2807SJeff Garzik  */
2124c6fd2807SJeff Garzik 
2125c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2126c6fd2807SJeff Garzik {
2127c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2128c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
21294ae72a1eSTejun Heo 	int i, rc;
2130c6fd2807SJeff Garzik 	struct ata_device *dev;
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 	ata_port_probe(ap);
2133c6fd2807SJeff Garzik 
2134c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2135c6fd2807SJeff Garzik 		tries[i] = ATA_PROBE_MAX_TRIES;
2136c6fd2807SJeff Garzik 
2137c6fd2807SJeff Garzik  retry:
2138c6fd2807SJeff Garzik 	/* reset and determine device classes */
2139c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2140c6fd2807SJeff Garzik 
2141c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2142c6fd2807SJeff Garzik 		dev = &ap->device[i];
2143c6fd2807SJeff Garzik 
2144c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2145c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2146c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2147c6fd2807SJeff Garzik 		else
2148c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2149c6fd2807SJeff Garzik 
2150c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2151c6fd2807SJeff Garzik 	}
2152c6fd2807SJeff Garzik 
2153c6fd2807SJeff Garzik 	ata_port_probe(ap);
2154c6fd2807SJeff Garzik 
2155c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2156c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2157c6fd2807SJeff Garzik 
2158c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2159c6fd2807SJeff Garzik 		ap->device[i].pio_mode = XFER_PIO_0;
2160c6fd2807SJeff Garzik 
2161f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2162f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2163f31f0cc2SJeff Garzik 	   the slave device */
2164f31f0cc2SJeff Garzik 
2165f31f0cc2SJeff Garzik 	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2166c6fd2807SJeff Garzik 		dev = &ap->device[i];
2167c6fd2807SJeff Garzik 
2168c6fd2807SJeff Garzik 		if (tries[i])
2169c6fd2807SJeff Garzik 			dev->class = classes[i];
2170c6fd2807SJeff Garzik 
2171c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2172c6fd2807SJeff Garzik 			continue;
2173c6fd2807SJeff Garzik 
2174bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2175bff04647STejun Heo 				     dev->id);
2176c6fd2807SJeff Garzik 		if (rc)
2177c6fd2807SJeff Garzik 			goto fail;
2178f31f0cc2SJeff Garzik 	}
2179f31f0cc2SJeff Garzik 
2180be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2181be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2182be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2183be0d18dfSAlan Cox 
2184f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2185f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2186f31f0cc2SJeff Garzik 
2187f31f0cc2SJeff Garzik 	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2188f31f0cc2SJeff Garzik 		dev = &ap->device[i];
2189f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2190f31f0cc2SJeff Garzik 			continue;
2191c6fd2807SJeff Garzik 
2192efdaedc4STejun Heo 		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2193efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
2194efdaedc4STejun Heo 		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2195c6fd2807SJeff Garzik 		if (rc)
2196c6fd2807SJeff Garzik 			goto fail;
2197c6fd2807SJeff Garzik 	}
2198c6fd2807SJeff Garzik 
2199c6fd2807SJeff Garzik 	/* configure transfer mode */
2200c6fd2807SJeff Garzik 	rc = ata_set_mode(ap, &dev);
22014ae72a1eSTejun Heo 	if (rc)
2202c6fd2807SJeff Garzik 		goto fail;
2203c6fd2807SJeff Garzik 
2204c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2205c6fd2807SJeff Garzik 		if (ata_dev_enabled(&ap->device[i]))
2206c6fd2807SJeff Garzik 			return 0;
2207c6fd2807SJeff Garzik 
2208c6fd2807SJeff Garzik 	/* no device present, disable port */
2209c6fd2807SJeff Garzik 	ata_port_disable(ap);
2210c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
2211c6fd2807SJeff Garzik 	return -ENODEV;
2212c6fd2807SJeff Garzik 
2213c6fd2807SJeff Garzik  fail:
22144ae72a1eSTejun Heo 	tries[dev->devno]--;
22154ae72a1eSTejun Heo 
2216c6fd2807SJeff Garzik 	switch (rc) {
2217c6fd2807SJeff Garzik 	case -EINVAL:
22184ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2219c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2220c6fd2807SJeff Garzik 		break;
22214ae72a1eSTejun Heo 
22224ae72a1eSTejun Heo 	case -ENODEV:
22234ae72a1eSTejun Heo 		/* give it just one more chance */
22244ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2225c6fd2807SJeff Garzik 	case -EIO:
22264ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
22274ae72a1eSTejun Heo 			/* This is the last chance, better to slow
22284ae72a1eSTejun Heo 			 * down than lose it.
22294ae72a1eSTejun Heo 			 */
2230c6fd2807SJeff Garzik 			sata_down_spd_limit(ap);
22314ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
22324ae72a1eSTejun Heo 		}
2233c6fd2807SJeff Garzik 	}
2234c6fd2807SJeff Garzik 
22354ae72a1eSTejun Heo 	if (!tries[dev->devno])
2236c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2237c6fd2807SJeff Garzik 
2238c6fd2807SJeff Garzik 	goto retry;
2239c6fd2807SJeff Garzik }
2240c6fd2807SJeff Garzik 
2241c6fd2807SJeff Garzik /**
2242c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2243c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2244c6fd2807SJeff Garzik  *
2245c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2246c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2247c6fd2807SJeff Garzik  *
2248cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2249c6fd2807SJeff Garzik  *	serialization.
2250c6fd2807SJeff Garzik  */
2251c6fd2807SJeff Garzik 
2252c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2253c6fd2807SJeff Garzik {
2254c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2255c6fd2807SJeff Garzik }
2256c6fd2807SJeff Garzik 
2257c6fd2807SJeff Garzik /**
2258c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2259c6fd2807SJeff Garzik  *	@ap: SATA port to printk link status about
2260c6fd2807SJeff Garzik  *
2261c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2262c6fd2807SJeff Garzik  *
2263c6fd2807SJeff Garzik  *	LOCKING:
2264c6fd2807SJeff Garzik  *	None.
2265c6fd2807SJeff Garzik  */
226643727fbcSJeff Garzik void sata_print_link_status(struct ata_port *ap)
2267c6fd2807SJeff Garzik {
2268c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2269c6fd2807SJeff Garzik 
2270c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2271c6fd2807SJeff Garzik 		return;
2272c6fd2807SJeff Garzik 	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2273c6fd2807SJeff Garzik 
2274c6fd2807SJeff Garzik 	if (ata_port_online(ap)) {
2275c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2276c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2277c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2278c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2279c6fd2807SJeff Garzik 	} else {
2280c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2281c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2282c6fd2807SJeff Garzik 				sstatus, scontrol);
2283c6fd2807SJeff Garzik 	}
2284c6fd2807SJeff Garzik }
2285c6fd2807SJeff Garzik 
2286c6fd2807SJeff Garzik /**
2287c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2288c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2289c6fd2807SJeff Garzik  *
2290c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2291c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2292c6fd2807SJeff Garzik  *	clear any reset condition.
2293c6fd2807SJeff Garzik  *
2294c6fd2807SJeff Garzik  *	LOCKING:
2295c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2296c6fd2807SJeff Garzik  *
2297c6fd2807SJeff Garzik  */
2298c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2299c6fd2807SJeff Garzik {
2300c6fd2807SJeff Garzik 	u32 sstatus;
2301c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2302c6fd2807SJeff Garzik 
2303c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2304c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2305c6fd2807SJeff Garzik 		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2306c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2307c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2308c6fd2807SJeff Garzik 		mdelay(1);
2309c6fd2807SJeff Garzik 	}
2310c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2311c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2312c6fd2807SJeff Garzik 
2313c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2314c6fd2807SJeff Garzik 	do {
2315c6fd2807SJeff Garzik 		msleep(200);
2316c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2317c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2318c6fd2807SJeff Garzik 			break;
2319c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2320c6fd2807SJeff Garzik 
2321c6fd2807SJeff Garzik 	/* print link status */
2322c6fd2807SJeff Garzik 	sata_print_link_status(ap);
2323c6fd2807SJeff Garzik 
2324c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2325c6fd2807SJeff Garzik 	if (!ata_port_offline(ap))
2326c6fd2807SJeff Garzik 		ata_port_probe(ap);
2327c6fd2807SJeff Garzik 	else
2328c6fd2807SJeff Garzik 		ata_port_disable(ap);
2329c6fd2807SJeff Garzik 
2330c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2331c6fd2807SJeff Garzik 		return;
2332c6fd2807SJeff Garzik 
2333c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2334c6fd2807SJeff Garzik 		ata_port_disable(ap);
2335c6fd2807SJeff Garzik 		return;
2336c6fd2807SJeff Garzik 	}
2337c6fd2807SJeff Garzik 
2338c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2339c6fd2807SJeff Garzik }
2340c6fd2807SJeff Garzik 
2341c6fd2807SJeff Garzik /**
2342c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2343c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2344c6fd2807SJeff Garzik  *
2345c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2346c6fd2807SJeff Garzik  *	the bus for devices.
2347c6fd2807SJeff Garzik  *
2348c6fd2807SJeff Garzik  *	LOCKING:
2349c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2350c6fd2807SJeff Garzik  *
2351c6fd2807SJeff Garzik  */
2352c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2353c6fd2807SJeff Garzik {
2354c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2355c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2356c6fd2807SJeff Garzik 		return;
2357c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2358c6fd2807SJeff Garzik }
2359c6fd2807SJeff Garzik 
2360c6fd2807SJeff Garzik /**
2361c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2362c6fd2807SJeff Garzik  *	@adev: device
2363c6fd2807SJeff Garzik  *
2364c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2365c6fd2807SJeff Garzik  *	present NULL is returned
2366c6fd2807SJeff Garzik  */
2367c6fd2807SJeff Garzik 
2368c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2369c6fd2807SJeff Garzik {
2370c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
2371c6fd2807SJeff Garzik 	struct ata_device *pair = &ap->device[1 - adev->devno];
2372c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2373c6fd2807SJeff Garzik 		return NULL;
2374c6fd2807SJeff Garzik 	return pair;
2375c6fd2807SJeff Garzik }
2376c6fd2807SJeff Garzik 
2377c6fd2807SJeff Garzik /**
2378c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2379c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2380c6fd2807SJeff Garzik  *
2381c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2382c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2383c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2384c6fd2807SJeff Garzik  *	on this port.
2385c6fd2807SJeff Garzik  *
2386cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2387c6fd2807SJeff Garzik  *	serialization.
2388c6fd2807SJeff Garzik  */
2389c6fd2807SJeff Garzik 
2390c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2391c6fd2807SJeff Garzik {
2392c6fd2807SJeff Garzik 	ap->device[0].class = ATA_DEV_NONE;
2393c6fd2807SJeff Garzik 	ap->device[1].class = ATA_DEV_NONE;
2394c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2395c6fd2807SJeff Garzik }
2396c6fd2807SJeff Garzik 
2397c6fd2807SJeff Garzik /**
2398c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2399c6fd2807SJeff Garzik  *	@ap: Port to adjust SATA spd limit for
2400c6fd2807SJeff Garzik  *
2401c6fd2807SJeff Garzik  *	Adjust SATA spd limit of @ap downward.  Note that this
2402c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2403c6fd2807SJeff Garzik  *	using sata_set_spd().
2404c6fd2807SJeff Garzik  *
2405c6fd2807SJeff Garzik  *	LOCKING:
2406c6fd2807SJeff Garzik  *	Inherited from caller.
2407c6fd2807SJeff Garzik  *
2408c6fd2807SJeff Garzik  *	RETURNS:
2409c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2410c6fd2807SJeff Garzik  */
2411c6fd2807SJeff Garzik int sata_down_spd_limit(struct ata_port *ap)
2412c6fd2807SJeff Garzik {
2413c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2414c6fd2807SJeff Garzik 	int rc, highbit;
2415c6fd2807SJeff Garzik 
2416c6fd2807SJeff Garzik 	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2417c6fd2807SJeff Garzik 	if (rc)
2418c6fd2807SJeff Garzik 		return rc;
2419c6fd2807SJeff Garzik 
2420c6fd2807SJeff Garzik 	mask = ap->sata_spd_limit;
2421c6fd2807SJeff Garzik 	if (mask <= 1)
2422c6fd2807SJeff Garzik 		return -EINVAL;
2423c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2424c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2425c6fd2807SJeff Garzik 
2426c6fd2807SJeff Garzik 	spd = (sstatus >> 4) & 0xf;
2427c6fd2807SJeff Garzik 	if (spd <= 1)
2428c6fd2807SJeff Garzik 		return -EINVAL;
2429c6fd2807SJeff Garzik 	spd--;
2430c6fd2807SJeff Garzik 	mask &= (1 << spd) - 1;
2431c6fd2807SJeff Garzik 	if (!mask)
2432c6fd2807SJeff Garzik 		return -EINVAL;
2433c6fd2807SJeff Garzik 
2434c6fd2807SJeff Garzik 	ap->sata_spd_limit = mask;
2435c6fd2807SJeff Garzik 
2436c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2437c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2438c6fd2807SJeff Garzik 
2439c6fd2807SJeff Garzik 	return 0;
2440c6fd2807SJeff Garzik }
2441c6fd2807SJeff Garzik 
2442c6fd2807SJeff Garzik static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2443c6fd2807SJeff Garzik {
2444c6fd2807SJeff Garzik 	u32 spd, limit;
2445c6fd2807SJeff Garzik 
2446c6fd2807SJeff Garzik 	if (ap->sata_spd_limit == UINT_MAX)
2447c6fd2807SJeff Garzik 		limit = 0;
2448c6fd2807SJeff Garzik 	else
2449c6fd2807SJeff Garzik 		limit = fls(ap->sata_spd_limit);
2450c6fd2807SJeff Garzik 
2451c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2452c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2453c6fd2807SJeff Garzik 
2454c6fd2807SJeff Garzik 	return spd != limit;
2455c6fd2807SJeff Garzik }
2456c6fd2807SJeff Garzik 
2457c6fd2807SJeff Garzik /**
2458c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2459c6fd2807SJeff Garzik  *	@ap: Port in question
2460c6fd2807SJeff Garzik  *
2461c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2462c6fd2807SJeff Garzik  *	@ap->sata_spd_limit.  This function is used to determine
2463c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2464c6fd2807SJeff Garzik  *	configuration.
2465c6fd2807SJeff Garzik  *
2466c6fd2807SJeff Garzik  *	LOCKING:
2467c6fd2807SJeff Garzik  *	Inherited from caller.
2468c6fd2807SJeff Garzik  *
2469c6fd2807SJeff Garzik  *	RETURNS:
2470c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2471c6fd2807SJeff Garzik  */
2472c6fd2807SJeff Garzik int sata_set_spd_needed(struct ata_port *ap)
2473c6fd2807SJeff Garzik {
2474c6fd2807SJeff Garzik 	u32 scontrol;
2475c6fd2807SJeff Garzik 
2476c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2477c6fd2807SJeff Garzik 		return 0;
2478c6fd2807SJeff Garzik 
2479c6fd2807SJeff Garzik 	return __sata_set_spd_needed(ap, &scontrol);
2480c6fd2807SJeff Garzik }
2481c6fd2807SJeff Garzik 
2482c6fd2807SJeff Garzik /**
2483c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2484c6fd2807SJeff Garzik  *	@ap: Port to set SATA spd for
2485c6fd2807SJeff Garzik  *
2486c6fd2807SJeff Garzik  *	Set SATA spd of @ap according to sata_spd_limit.
2487c6fd2807SJeff Garzik  *
2488c6fd2807SJeff Garzik  *	LOCKING:
2489c6fd2807SJeff Garzik  *	Inherited from caller.
2490c6fd2807SJeff Garzik  *
2491c6fd2807SJeff Garzik  *	RETURNS:
2492c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2493c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2494c6fd2807SJeff Garzik  */
2495c6fd2807SJeff Garzik int sata_set_spd(struct ata_port *ap)
2496c6fd2807SJeff Garzik {
2497c6fd2807SJeff Garzik 	u32 scontrol;
2498c6fd2807SJeff Garzik 	int rc;
2499c6fd2807SJeff Garzik 
2500c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2501c6fd2807SJeff Garzik 		return rc;
2502c6fd2807SJeff Garzik 
2503c6fd2807SJeff Garzik 	if (!__sata_set_spd_needed(ap, &scontrol))
2504c6fd2807SJeff Garzik 		return 0;
2505c6fd2807SJeff Garzik 
2506c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2507c6fd2807SJeff Garzik 		return rc;
2508c6fd2807SJeff Garzik 
2509c6fd2807SJeff Garzik 	return 1;
2510c6fd2807SJeff Garzik }
2511c6fd2807SJeff Garzik 
2512c6fd2807SJeff Garzik /*
2513c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2514c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2515c6fd2807SJeff Garzik  */
2516c6fd2807SJeff Garzik /*
2517b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2518c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2519b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2520b352e57dSAlan Cox  *
2521b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2522c6fd2807SJeff Garzik  */
2523c6fd2807SJeff Garzik 
2524c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2525c6fd2807SJeff Garzik 
2526c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2527c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2528c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2529c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2530c6fd2807SJeff Garzik 
2531b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2532b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2533c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2534c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2535c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2536c6fd2807SJeff Garzik 
2537c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2538c6fd2807SJeff Garzik 
2539c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2540c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2541c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2542c6fd2807SJeff Garzik 
2543c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2544c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2545c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2546c6fd2807SJeff Garzik 
2547b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2548b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2549c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2550c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2551c6fd2807SJeff Garzik 
2552c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2553c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2554c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2555c6fd2807SJeff Garzik 
2556c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2557c6fd2807SJeff Garzik 
2558c6fd2807SJeff Garzik 	{ 0xFF }
2559c6fd2807SJeff Garzik };
2560c6fd2807SJeff Garzik 
2561c6fd2807SJeff Garzik #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2562c6fd2807SJeff Garzik #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2563c6fd2807SJeff Garzik 
2564c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2565c6fd2807SJeff Garzik {
2566c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2567c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2568c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2569c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2570c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2571c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2572c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2573c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2574c6fd2807SJeff Garzik }
2575c6fd2807SJeff Garzik 
2576c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2577c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2578c6fd2807SJeff Garzik {
2579c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2580c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2581c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2582c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2583c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2584c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2585c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2586c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2587c6fd2807SJeff Garzik }
2588c6fd2807SJeff Garzik 
2589c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2590c6fd2807SJeff Garzik {
2591c6fd2807SJeff Garzik 	const struct ata_timing *t;
2592c6fd2807SJeff Garzik 
2593c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2594c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2595c6fd2807SJeff Garzik 			return NULL;
2596c6fd2807SJeff Garzik 	return t;
2597c6fd2807SJeff Garzik }
2598c6fd2807SJeff Garzik 
2599c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2600c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2601c6fd2807SJeff Garzik {
2602c6fd2807SJeff Garzik 	const struct ata_timing *s;
2603c6fd2807SJeff Garzik 	struct ata_timing p;
2604c6fd2807SJeff Garzik 
2605c6fd2807SJeff Garzik 	/*
2606c6fd2807SJeff Garzik 	 * Find the mode.
2607c6fd2807SJeff Garzik 	 */
2608c6fd2807SJeff Garzik 
2609c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2610c6fd2807SJeff Garzik 		return -EINVAL;
2611c6fd2807SJeff Garzik 
2612c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2613c6fd2807SJeff Garzik 
2614c6fd2807SJeff Garzik 	/*
2615c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2616c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2617c6fd2807SJeff Garzik 	 */
2618c6fd2807SJeff Garzik 
2619c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2620c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2621c6fd2807SJeff Garzik 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2622c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2623c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2624c6fd2807SJeff Garzik 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2625c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2626c6fd2807SJeff Garzik 		}
2627c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2628c6fd2807SJeff Garzik 	}
2629c6fd2807SJeff Garzik 
2630c6fd2807SJeff Garzik 	/*
2631c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2632c6fd2807SJeff Garzik 	 */
2633c6fd2807SJeff Garzik 
2634c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2635c6fd2807SJeff Garzik 
2636c6fd2807SJeff Garzik 	/*
2637c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2638c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2639c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2640c6fd2807SJeff Garzik 	 */
2641c6fd2807SJeff Garzik 
2642fd3367afSAlan 	if (speed > XFER_PIO_6) {
2643c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2644c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2645c6fd2807SJeff Garzik 	}
2646c6fd2807SJeff Garzik 
2647c6fd2807SJeff Garzik 	/*
2648c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2649c6fd2807SJeff Garzik 	 */
2650c6fd2807SJeff Garzik 
2651c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2652c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2653c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2654c6fd2807SJeff Garzik 	}
2655c6fd2807SJeff Garzik 
2656c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2657c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2658c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2659c6fd2807SJeff Garzik 	}
26604f701d1eSAlan Cox 
26614f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
26624f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
26634f701d1eSAlan Cox 	   if so we must correct this */
26644f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
26654f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2666c6fd2807SJeff Garzik 
2667c6fd2807SJeff Garzik 	return 0;
2668c6fd2807SJeff Garzik }
2669c6fd2807SJeff Garzik 
2670c6fd2807SJeff Garzik /**
2671c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2672c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2673458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2674c6fd2807SJeff Garzik  *
2675c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2676c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2677c6fd2807SJeff Garzik  *	will apply the limit.
2678c6fd2807SJeff Garzik  *
2679c6fd2807SJeff Garzik  *	LOCKING:
2680c6fd2807SJeff Garzik  *	Inherited from caller.
2681c6fd2807SJeff Garzik  *
2682c6fd2807SJeff Garzik  *	RETURNS:
2683c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2684c6fd2807SJeff Garzik  */
2685458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2686c6fd2807SJeff Garzik {
2687458337dbSTejun Heo 	char buf[32];
2688458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2689458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2690458337dbSTejun Heo 	int quiet, highbit;
2691c6fd2807SJeff Garzik 
2692458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2693458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2694458337dbSTejun Heo 
2695458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2696458337dbSTejun Heo 						  dev->mwdma_mask,
2697c6fd2807SJeff Garzik 						  dev->udma_mask);
2698458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2699c6fd2807SJeff Garzik 
2700458337dbSTejun Heo 	switch (sel) {
2701458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2702458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2703458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2704458337dbSTejun Heo 		break;
2705458337dbSTejun Heo 
2706458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2707458337dbSTejun Heo 		if (udma_mask) {
2708458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2709458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2710458337dbSTejun Heo 			if (!udma_mask)
2711458337dbSTejun Heo 				return -ENOENT;
2712458337dbSTejun Heo 		} else if (mwdma_mask) {
2713458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2714458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2715458337dbSTejun Heo 			if (!mwdma_mask)
2716458337dbSTejun Heo 				return -ENOENT;
2717458337dbSTejun Heo 		}
2718458337dbSTejun Heo 		break;
2719458337dbSTejun Heo 
2720458337dbSTejun Heo 	case ATA_DNXFER_40C:
2721458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2722458337dbSTejun Heo 		break;
2723458337dbSTejun Heo 
2724458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2725458337dbSTejun Heo 		pio_mask &= 1;
2726458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2727458337dbSTejun Heo 		mwdma_mask = 0;
2728458337dbSTejun Heo 		udma_mask = 0;
2729458337dbSTejun Heo 		break;
2730458337dbSTejun Heo 
2731458337dbSTejun Heo 	default:
2732458337dbSTejun Heo 		BUG();
2733458337dbSTejun Heo 	}
2734458337dbSTejun Heo 
2735458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2736458337dbSTejun Heo 
2737458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2738458337dbSTejun Heo 		return -ENOENT;
2739458337dbSTejun Heo 
2740458337dbSTejun Heo 	if (!quiet) {
2741458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2742458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2743458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2744458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2745458337dbSTejun Heo 		else
2746458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2747458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2748458337dbSTejun Heo 
2749458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2750458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2751458337dbSTejun Heo 	}
2752c6fd2807SJeff Garzik 
2753c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2754c6fd2807SJeff Garzik 			    &dev->udma_mask);
2755c6fd2807SJeff Garzik 
2756c6fd2807SJeff Garzik 	return 0;
2757c6fd2807SJeff Garzik }
2758c6fd2807SJeff Garzik 
2759c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2760c6fd2807SJeff Garzik {
2761baa1e78aSTejun Heo 	struct ata_eh_context *ehc = &dev->ap->eh_context;
2762c6fd2807SJeff Garzik 	unsigned int err_mask;
2763c6fd2807SJeff Garzik 	int rc;
2764c6fd2807SJeff Garzik 
2765c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2766c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2767c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2768c6fd2807SJeff Garzik 
2769c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
277011750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
277111750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
277211750a40SAlan         	err_mask &= ~AC_ERR_DEV;
277311750a40SAlan 
2774c6fd2807SJeff Garzik 	if (err_mask) {
2775c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2776c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2777c6fd2807SJeff Garzik 		return -EIO;
2778c6fd2807SJeff Garzik 	}
2779c6fd2807SJeff Garzik 
2780baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2781c6fd2807SJeff Garzik 	rc = ata_dev_revalidate(dev, 0);
2782baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2783c6fd2807SJeff Garzik 	if (rc)
2784c6fd2807SJeff Garzik 		return rc;
2785c6fd2807SJeff Garzik 
2786c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2787c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2788c6fd2807SJeff Garzik 
2789c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2790c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2791c6fd2807SJeff Garzik 	return 0;
2792c6fd2807SJeff Garzik }
2793c6fd2807SJeff Garzik 
2794c6fd2807SJeff Garzik /**
279504351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2796c6fd2807SJeff Garzik  *	@ap: port on which timings will be programmed
2797c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2798c6fd2807SJeff Garzik  *
279904351821SAlan  *	Standard implementation of the function used to tune and set
280004351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
280104351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2802c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2803c6fd2807SJeff Garzik  *
2804c6fd2807SJeff Garzik  *	LOCKING:
2805c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2806c6fd2807SJeff Garzik  *
2807c6fd2807SJeff Garzik  *	RETURNS:
2808c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2809c6fd2807SJeff Garzik  */
281004351821SAlan 
281104351821SAlan int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2812c6fd2807SJeff Garzik {
2813c6fd2807SJeff Garzik 	struct ata_device *dev;
2814c6fd2807SJeff Garzik 	int i, rc = 0, used_dma = 0, found = 0;
2815c6fd2807SJeff Garzik 
2816c6fd2807SJeff Garzik 
2817c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2818c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2819c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2820c6fd2807SJeff Garzik 
2821c6fd2807SJeff Garzik 		dev = &ap->device[i];
2822c6fd2807SJeff Garzik 
2823c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2824c6fd2807SJeff Garzik 			continue;
2825c6fd2807SJeff Garzik 
2826c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2827c6fd2807SJeff Garzik 
2828c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2829c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2830c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2831c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2832c6fd2807SJeff Garzik 
2833c6fd2807SJeff Garzik 		found = 1;
2834c6fd2807SJeff Garzik 		if (dev->dma_mode)
2835c6fd2807SJeff Garzik 			used_dma = 1;
2836c6fd2807SJeff Garzik 	}
2837c6fd2807SJeff Garzik 	if (!found)
2838c6fd2807SJeff Garzik 		goto out;
2839c6fd2807SJeff Garzik 
2840c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2841c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2842c6fd2807SJeff Garzik 		dev = &ap->device[i];
2843c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2844c6fd2807SJeff Garzik 			continue;
2845c6fd2807SJeff Garzik 
2846c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2847c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2848c6fd2807SJeff Garzik 			rc = -EINVAL;
2849c6fd2807SJeff Garzik 			goto out;
2850c6fd2807SJeff Garzik 		}
2851c6fd2807SJeff Garzik 
2852c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2853c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2854c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2855c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2856c6fd2807SJeff Garzik 	}
2857c6fd2807SJeff Garzik 
2858c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2859c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2860c6fd2807SJeff Garzik 		dev = &ap->device[i];
2861c6fd2807SJeff Garzik 
2862c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2863c6fd2807SJeff Garzik 			continue;
2864c6fd2807SJeff Garzik 
2865c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2866c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2867c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2868c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2869c6fd2807SJeff Garzik 	}
2870c6fd2807SJeff Garzik 
2871c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2872c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2873c6fd2807SJeff Garzik 		dev = &ap->device[i];
2874c6fd2807SJeff Garzik 
287518d90debSAlan 		/* don't update suspended devices' xfer mode */
28769666f400STejun Heo 		if (!ata_dev_enabled(dev))
2877c6fd2807SJeff Garzik 			continue;
2878c6fd2807SJeff Garzik 
2879c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
2880c6fd2807SJeff Garzik 		if (rc)
2881c6fd2807SJeff Garzik 			goto out;
2882c6fd2807SJeff Garzik 	}
2883c6fd2807SJeff Garzik 
2884c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
2885c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
2886c6fd2807SJeff Garzik 	 */
2887cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2888032af1ceSAlan 		ap->host->simplex_claimed = ap;
2889c6fd2807SJeff Garzik 
2890c6fd2807SJeff Garzik  out:
2891c6fd2807SJeff Garzik 	if (rc)
2892c6fd2807SJeff Garzik 		*r_failed_dev = dev;
2893c6fd2807SJeff Garzik 	return rc;
2894c6fd2807SJeff Garzik }
2895c6fd2807SJeff Garzik 
2896c6fd2807SJeff Garzik /**
289704351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
289804351821SAlan  *	@ap: port on which timings will be programmed
289904351821SAlan  *	@r_failed_dev: out paramter for failed device
290004351821SAlan  *
290104351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
290204351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
290304351821SAlan  *	returned in @r_failed_dev.
290404351821SAlan  *
290504351821SAlan  *	LOCKING:
290604351821SAlan  *	PCI/etc. bus probe sem.
290704351821SAlan  *
290804351821SAlan  *	RETURNS:
290904351821SAlan  *	0 on success, negative errno otherwise
291004351821SAlan  */
291104351821SAlan int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
291204351821SAlan {
291304351821SAlan 	/* has private set_mode? */
291404351821SAlan 	if (ap->ops->set_mode)
291504351821SAlan 		return ap->ops->set_mode(ap, r_failed_dev);
291604351821SAlan 	return ata_do_set_mode(ap, r_failed_dev);
291704351821SAlan }
291804351821SAlan 
291904351821SAlan /**
2920c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
2921c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
2922c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
2923c6fd2807SJeff Garzik  *
2924c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
2925c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
2926c6fd2807SJeff Garzik  *	other threads.
2927c6fd2807SJeff Garzik  *
2928c6fd2807SJeff Garzik  *	LOCKING:
2929cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
2930c6fd2807SJeff Garzik  */
2931c6fd2807SJeff Garzik 
2932c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
2933c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
2934c6fd2807SJeff Garzik {
2935c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
2936c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
2937c6fd2807SJeff Garzik }
2938c6fd2807SJeff Garzik 
2939c6fd2807SJeff Garzik /**
2940c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
2941c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
2942c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
2943c6fd2807SJeff Garzik  *	@tmout: overall timeout
2944c6fd2807SJeff Garzik  *
2945c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
2946c6fd2807SJeff Garzik  *	or a timeout occurs.
2947c6fd2807SJeff Garzik  *
2948d1adc1bbSTejun Heo  *	LOCKING:
2949d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
2950d1adc1bbSTejun Heo  *
2951d1adc1bbSTejun Heo  *	RETURNS:
2952d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
2953c6fd2807SJeff Garzik  */
2954d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
2955c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
2956c6fd2807SJeff Garzik {
2957c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
2958c6fd2807SJeff Garzik 	u8 status;
2959c6fd2807SJeff Garzik 
2960c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
2961c6fd2807SJeff Garzik 	timer_start = jiffies;
2962c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
2963d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2964d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2965c6fd2807SJeff Garzik 		msleep(50);
2966c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
2967c6fd2807SJeff Garzik 	}
2968c6fd2807SJeff Garzik 
2969d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
2970c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
297135aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
297235aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
2973c6fd2807SJeff Garzik 
2974c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
2975d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2976d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2977c6fd2807SJeff Garzik 		msleep(50);
2978c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
2979c6fd2807SJeff Garzik 	}
2980c6fd2807SJeff Garzik 
2981d1adc1bbSTejun Heo 	if (status == 0xff)
2982d1adc1bbSTejun Heo 		return -ENODEV;
2983d1adc1bbSTejun Heo 
2984c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
2985c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
298635aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
298735aa7a43SJeff Garzik 				tmout / HZ, status);
2988d1adc1bbSTejun Heo 		return -EBUSY;
2989c6fd2807SJeff Garzik 	}
2990c6fd2807SJeff Garzik 
2991c6fd2807SJeff Garzik 	return 0;
2992c6fd2807SJeff Garzik }
2993c6fd2807SJeff Garzik 
2994d4b2bab4STejun Heo /**
2995d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
2996d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
2997d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
2998d4b2bab4STejun Heo  *
2999d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3000d4b2bab4STejun Heo  *	occurs.
3001d4b2bab4STejun Heo  *
3002d4b2bab4STejun Heo  *	LOCKING:
3003d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3004d4b2bab4STejun Heo  *
3005d4b2bab4STejun Heo  *	RETURNS:
3006d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3007d4b2bab4STejun Heo  */
3008d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3009d4b2bab4STejun Heo {
3010d4b2bab4STejun Heo 	unsigned long start = jiffies;
3011d4b2bab4STejun Heo 	int warned = 0;
3012d4b2bab4STejun Heo 
3013d4b2bab4STejun Heo 	while (1) {
3014d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3015d4b2bab4STejun Heo 		unsigned long now = jiffies;
3016d4b2bab4STejun Heo 
3017d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3018d4b2bab4STejun Heo 			return 0;
3019fd7fe701STejun Heo 		if (!ata_port_online(ap) && status == 0xff)
3020d4b2bab4STejun Heo 			return -ENODEV;
3021d4b2bab4STejun Heo 		if (time_after(now, deadline))
3022d4b2bab4STejun Heo 			return -EBUSY;
3023d4b2bab4STejun Heo 
3024d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3025d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3026d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3027d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3028d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3029d4b2bab4STejun Heo 			warned = 1;
3030d4b2bab4STejun Heo 		}
3031d4b2bab4STejun Heo 
3032d4b2bab4STejun Heo 		msleep(50);
3033d4b2bab4STejun Heo 	}
3034d4b2bab4STejun Heo }
3035d4b2bab4STejun Heo 
3036d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3037d4b2bab4STejun Heo 			      unsigned long deadline)
3038c6fd2807SJeff Garzik {
3039c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3040c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3041c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
30429b89391cSTejun Heo 	int rc, ret = 0;
3043c6fd2807SJeff Garzik 
3044c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3045c6fd2807SJeff Garzik 	 * BSY bit to clear
3046c6fd2807SJeff Garzik 	 */
3047d4b2bab4STejun Heo 	if (dev0) {
3048d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30499b89391cSTejun Heo 		if (rc) {
30509b89391cSTejun Heo 			if (rc != -ENODEV)
3051d4b2bab4STejun Heo 				return rc;
30529b89391cSTejun Heo 			ret = rc;
30539b89391cSTejun Heo 		}
3054d4b2bab4STejun Heo 	}
3055c6fd2807SJeff Garzik 
3056e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3057e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3058c6fd2807SJeff Garzik 	 */
3059e141d999STejun Heo 	if (dev1) {
3060e141d999STejun Heo 		int i;
3061c6fd2807SJeff Garzik 
3062c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3063e141d999STejun Heo 
3064e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3065e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3066e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3067e141d999STejun Heo 		 */
3068e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3069e141d999STejun Heo 			u8 nsect, lbal;
3070e141d999STejun Heo 
30710d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
30720d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3073c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3074c6fd2807SJeff Garzik 				break;
3075c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3076c6fd2807SJeff Garzik 		}
3077e141d999STejun Heo 
3078d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30799b89391cSTejun Heo 		if (rc) {
30809b89391cSTejun Heo 			if (rc != -ENODEV)
3081d4b2bab4STejun Heo 				return rc;
30829b89391cSTejun Heo 			ret = rc;
30839b89391cSTejun Heo 		}
3084d4b2bab4STejun Heo 	}
3085c6fd2807SJeff Garzik 
3086c6fd2807SJeff Garzik 	/* is all this really necessary? */
3087c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3088c6fd2807SJeff Garzik 	if (dev1)
3089c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3090c6fd2807SJeff Garzik 	if (dev0)
3091c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3092d4b2bab4STejun Heo 
30939b89391cSTejun Heo 	return ret;
3094c6fd2807SJeff Garzik }
3095c6fd2807SJeff Garzik 
3096d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3097d4b2bab4STejun Heo 			     unsigned long deadline)
3098c6fd2807SJeff Garzik {
3099c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3100c6fd2807SJeff Garzik 
310144877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3102c6fd2807SJeff Garzik 
3103c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
31040d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3105c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31060d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3107c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31080d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3109c6fd2807SJeff Garzik 
3110c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3111c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3112c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3113c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3114c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3115c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3116c6fd2807SJeff Garzik 	 * delay here as well.
3117c6fd2807SJeff Garzik 	 *
3118c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3119c6fd2807SJeff Garzik 	 */
3120c6fd2807SJeff Garzik 	msleep(150);
3121c6fd2807SJeff Garzik 
3122c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3123c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3124c6fd2807SJeff Garzik 	 * pulldown resistor.
3125c6fd2807SJeff Garzik 	 */
3126d1adc1bbSTejun Heo 	if (ata_check_status(ap) == 0xFF)
31279b89391cSTejun Heo 		return -ENODEV;
3128c6fd2807SJeff Garzik 
3129d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3130c6fd2807SJeff Garzik }
3131c6fd2807SJeff Garzik 
3132c6fd2807SJeff Garzik /**
3133c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3134c6fd2807SJeff Garzik  *	@ap: port to reset
3135c6fd2807SJeff Garzik  *
3136c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3137c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3138c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3139c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3140c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3141c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3142c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3143c6fd2807SJeff Garzik  *
3144c6fd2807SJeff Garzik  *	LOCKING:
3145c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3146cca3974eSJeff Garzik  *	Obtains host lock.
3147c6fd2807SJeff Garzik  *
3148c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3149c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3150c6fd2807SJeff Garzik  */
3151c6fd2807SJeff Garzik 
3152c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3153c6fd2807SJeff Garzik {
3154c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3155c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3156c6fd2807SJeff Garzik 	u8 err;
3157c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
31589b89391cSTejun Heo 	int rc;
3159c6fd2807SJeff Garzik 
316044877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3161c6fd2807SJeff Garzik 
3162c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3163c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3164c6fd2807SJeff Garzik 		dev0 = 1;
3165c6fd2807SJeff Garzik 	else {
3166c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3167c6fd2807SJeff Garzik 		if (slave_possible)
3168c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3169c6fd2807SJeff Garzik 	}
3170c6fd2807SJeff Garzik 
3171c6fd2807SJeff Garzik 	if (dev0)
3172c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3173c6fd2807SJeff Garzik 	if (dev1)
3174c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3175c6fd2807SJeff Garzik 
3176c6fd2807SJeff Garzik 	/* select device 0 again */
3177c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3178c6fd2807SJeff Garzik 
3179c6fd2807SJeff Garzik 	/* issue bus reset */
31809b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
31819b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
31829b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3183c6fd2807SJeff Garzik 			goto err_out;
31849b89391cSTejun Heo 	}
3185c6fd2807SJeff Garzik 
3186c6fd2807SJeff Garzik 	/*
3187c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3188c6fd2807SJeff Garzik 	 */
3189c6fd2807SJeff Garzik 	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3190c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
3191c6fd2807SJeff Garzik 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3192c6fd2807SJeff Garzik 
3193c6fd2807SJeff Garzik 	/* re-enable interrupts */
319483625006SAkira Iguchi 	ap->ops->irq_on(ap);
3195c6fd2807SJeff Garzik 
3196c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3197c6fd2807SJeff Garzik 	if (ap->device[1].class != ATA_DEV_NONE)
3198c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3199c6fd2807SJeff Garzik 	if (ap->device[0].class != ATA_DEV_NONE)
3200c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3201c6fd2807SJeff Garzik 
3202c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
3203c6fd2807SJeff Garzik 	if ((ap->device[0].class == ATA_DEV_NONE) &&
3204c6fd2807SJeff Garzik 	    (ap->device[1].class == ATA_DEV_NONE))
3205c6fd2807SJeff Garzik 		goto err_out;
3206c6fd2807SJeff Garzik 
3207c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3208c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
32090d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3210c6fd2807SJeff Garzik 	}
3211c6fd2807SJeff Garzik 
3212c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3213c6fd2807SJeff Garzik 	return;
3214c6fd2807SJeff Garzik 
3215c6fd2807SJeff Garzik err_out:
3216c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3217c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
3218c6fd2807SJeff Garzik 
3219c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3220c6fd2807SJeff Garzik }
3221c6fd2807SJeff Garzik 
3222c6fd2807SJeff Garzik /**
3223c6fd2807SJeff Garzik  *	sata_phy_debounce - debounce SATA phy status
3224c6fd2807SJeff Garzik  *	@ap: ATA port to debounce SATA phy status for
3225c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3226d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3227c6fd2807SJeff Garzik  *
3228c6fd2807SJeff Garzik  *	Make sure SStatus of @ap reaches stable state, determined by
3229c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3230c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3231d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3232d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3233c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3234c6fd2807SJeff Garzik  *
3235d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3236d4b2bab4STejun Heo  *	two is used.
3237d4b2bab4STejun Heo  *
3238c6fd2807SJeff Garzik  *	LOCKING:
3239c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3240c6fd2807SJeff Garzik  *
3241c6fd2807SJeff Garzik  *	RETURNS:
3242c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3243c6fd2807SJeff Garzik  */
3244d4b2bab4STejun Heo int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3245d4b2bab4STejun Heo 		      unsigned long deadline)
3246c6fd2807SJeff Garzik {
3247c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3248d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3249d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3250c6fd2807SJeff Garzik 	u32 last, cur;
3251c6fd2807SJeff Garzik 	int rc;
3252c6fd2807SJeff Garzik 
3253d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3254d4b2bab4STejun Heo 	if (time_before(t, deadline))
3255d4b2bab4STejun Heo 		deadline = t;
3256d4b2bab4STejun Heo 
3257c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3258c6fd2807SJeff Garzik 		return rc;
3259c6fd2807SJeff Garzik 	cur &= 0xf;
3260c6fd2807SJeff Garzik 
3261c6fd2807SJeff Garzik 	last = cur;
3262c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3263c6fd2807SJeff Garzik 
3264c6fd2807SJeff Garzik 	while (1) {
3265c6fd2807SJeff Garzik 		msleep(interval_msec);
3266c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3267c6fd2807SJeff Garzik 			return rc;
3268c6fd2807SJeff Garzik 		cur &= 0xf;
3269c6fd2807SJeff Garzik 
3270c6fd2807SJeff Garzik 		/* DET stable? */
3271c6fd2807SJeff Garzik 		if (cur == last) {
3272d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3273c6fd2807SJeff Garzik 				continue;
3274c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3275c6fd2807SJeff Garzik 				return 0;
3276c6fd2807SJeff Garzik 			continue;
3277c6fd2807SJeff Garzik 		}
3278c6fd2807SJeff Garzik 
3279c6fd2807SJeff Garzik 		/* unstable, start over */
3280c6fd2807SJeff Garzik 		last = cur;
3281c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3282c6fd2807SJeff Garzik 
3283d4b2bab4STejun Heo 		/* check deadline */
3284d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3285c6fd2807SJeff Garzik 			return -EBUSY;
3286c6fd2807SJeff Garzik 	}
3287c6fd2807SJeff Garzik }
3288c6fd2807SJeff Garzik 
3289c6fd2807SJeff Garzik /**
3290c6fd2807SJeff Garzik  *	sata_phy_resume - resume SATA phy
3291c6fd2807SJeff Garzik  *	@ap: ATA port to resume SATA phy for
3292c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3293d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3294c6fd2807SJeff Garzik  *
3295c6fd2807SJeff Garzik  *	Resume SATA phy of @ap and debounce it.
3296c6fd2807SJeff Garzik  *
3297c6fd2807SJeff Garzik  *	LOCKING:
3298c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3299c6fd2807SJeff Garzik  *
3300c6fd2807SJeff Garzik  *	RETURNS:
3301c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3302c6fd2807SJeff Garzik  */
3303d4b2bab4STejun Heo int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3304d4b2bab4STejun Heo 		    unsigned long deadline)
3305c6fd2807SJeff Garzik {
3306c6fd2807SJeff Garzik 	u32 scontrol;
3307c6fd2807SJeff Garzik 	int rc;
3308c6fd2807SJeff Garzik 
3309c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3310c6fd2807SJeff Garzik 		return rc;
3311c6fd2807SJeff Garzik 
3312c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3313c6fd2807SJeff Garzik 
3314c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3315c6fd2807SJeff Garzik 		return rc;
3316c6fd2807SJeff Garzik 
3317c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3318c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3319c6fd2807SJeff Garzik 	 */
3320c6fd2807SJeff Garzik 	msleep(200);
3321c6fd2807SJeff Garzik 
3322d4b2bab4STejun Heo 	return sata_phy_debounce(ap, params, deadline);
3323c6fd2807SJeff Garzik }
3324c6fd2807SJeff Garzik 
3325c6fd2807SJeff Garzik /**
3326c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3327c6fd2807SJeff Garzik  *	@ap: ATA port to be reset
3328d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3329c6fd2807SJeff Garzik  *
3330b8cffc6aSTejun Heo  *	@ap is about to be reset.  Initialize it.  Failure from
3331b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3332b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3333b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3334b8cffc6aSTejun Heo  *	should just whine, not fail.
3335c6fd2807SJeff Garzik  *
3336c6fd2807SJeff Garzik  *	LOCKING:
3337c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3338c6fd2807SJeff Garzik  *
3339c6fd2807SJeff Garzik  *	RETURNS:
3340c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3341c6fd2807SJeff Garzik  */
3342d4b2bab4STejun Heo int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3343c6fd2807SJeff Garzik {
3344c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
3345c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3346c6fd2807SJeff Garzik 	int rc;
3347c6fd2807SJeff Garzik 
334831daabdaSTejun Heo 	/* handle link resume */
3349c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3350c6fd2807SJeff Garzik 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3351c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3352c6fd2807SJeff Garzik 
3353c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3354c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3355c6fd2807SJeff Garzik 		return 0;
3356c6fd2807SJeff Garzik 
3357c6fd2807SJeff Garzik 	/* if SATA, resume phy */
3358c6fd2807SJeff Garzik 	if (ap->cbl == ATA_CBL_SATA) {
3359d4b2bab4STejun Heo 		rc = sata_phy_resume(ap, timing, deadline);
3360b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3361b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3362c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3363c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3364c6fd2807SJeff Garzik 	}
3365c6fd2807SJeff Garzik 
3366c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3367c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3368c6fd2807SJeff Garzik 	 */
3369b8cffc6aSTejun Heo 	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3370b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
33716dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3372b8cffc6aSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "device not ready "
3373b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3374b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3375b8cffc6aSTejun Heo 		}
3376b8cffc6aSTejun Heo 	}
3377c6fd2807SJeff Garzik 
3378c6fd2807SJeff Garzik 	return 0;
3379c6fd2807SJeff Garzik }
3380c6fd2807SJeff Garzik 
3381c6fd2807SJeff Garzik /**
3382c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3383c6fd2807SJeff Garzik  *	@ap: port to reset
3384c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3385d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3386c6fd2807SJeff Garzik  *
3387c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3388c6fd2807SJeff Garzik  *
3389c6fd2807SJeff Garzik  *	LOCKING:
3390c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3391c6fd2807SJeff Garzik  *
3392c6fd2807SJeff Garzik  *	RETURNS:
3393c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3394c6fd2807SJeff Garzik  */
3395d4b2bab4STejun Heo int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3396d4b2bab4STejun Heo 		      unsigned long deadline)
3397c6fd2807SJeff Garzik {
3398c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3399d4b2bab4STejun Heo 	unsigned int devmask = 0;
3400d4b2bab4STejun Heo 	int rc;
3401c6fd2807SJeff Garzik 	u8 err;
3402c6fd2807SJeff Garzik 
3403c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3404c6fd2807SJeff Garzik 
3405c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3406c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3407c6fd2807SJeff Garzik 		goto out;
3408c6fd2807SJeff Garzik 	}
3409c6fd2807SJeff Garzik 
3410c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3411c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3412c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3413c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3414c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3415c6fd2807SJeff Garzik 
3416c6fd2807SJeff Garzik 	/* select device 0 again */
3417c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3418c6fd2807SJeff Garzik 
3419c6fd2807SJeff Garzik 	/* issue bus reset */
3420c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3421d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
34229b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
34239b89391cSTejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3424d4b2bab4STejun Heo 		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3425d4b2bab4STejun Heo 		return rc;
3426c6fd2807SJeff Garzik 	}
3427c6fd2807SJeff Garzik 
3428c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
3429c6fd2807SJeff Garzik 	classes[0] = ata_dev_try_classify(ap, 0, &err);
3430c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
3431c6fd2807SJeff Garzik 		classes[1] = ata_dev_try_classify(ap, 1, &err);
3432c6fd2807SJeff Garzik 
3433c6fd2807SJeff Garzik  out:
3434c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3435c6fd2807SJeff Garzik 	return 0;
3436c6fd2807SJeff Garzik }
3437c6fd2807SJeff Garzik 
3438c6fd2807SJeff Garzik /**
3439b6103f6dSTejun Heo  *	sata_port_hardreset - reset port via SATA phy reset
3440c6fd2807SJeff Garzik  *	@ap: port to reset
3441b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3442d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3443c6fd2807SJeff Garzik  *
3444c6fd2807SJeff Garzik  *	SATA phy-reset host port using DET bits of SControl register.
3445c6fd2807SJeff Garzik  *
3446c6fd2807SJeff Garzik  *	LOCKING:
3447c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3448c6fd2807SJeff Garzik  *
3449c6fd2807SJeff Garzik  *	RETURNS:
3450c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3451c6fd2807SJeff Garzik  */
3452d4b2bab4STejun Heo int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3453d4b2bab4STejun Heo 			unsigned long deadline)
3454c6fd2807SJeff Garzik {
3455c6fd2807SJeff Garzik 	u32 scontrol;
3456c6fd2807SJeff Garzik 	int rc;
3457c6fd2807SJeff Garzik 
3458c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3459c6fd2807SJeff Garzik 
3460c6fd2807SJeff Garzik 	if (sata_set_spd_needed(ap)) {
3461c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3462c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3463c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3464c6fd2807SJeff Garzik 		 * and Sil3124.
3465c6fd2807SJeff Garzik 		 */
3466c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3467b6103f6dSTejun Heo 			goto out;
3468c6fd2807SJeff Garzik 
3469cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3470c6fd2807SJeff Garzik 
3471c6fd2807SJeff Garzik 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3472b6103f6dSTejun Heo 			goto out;
3473c6fd2807SJeff Garzik 
3474c6fd2807SJeff Garzik 		sata_set_spd(ap);
3475c6fd2807SJeff Garzik 	}
3476c6fd2807SJeff Garzik 
3477c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3478c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3479b6103f6dSTejun Heo 		goto out;
3480c6fd2807SJeff Garzik 
3481c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3482c6fd2807SJeff Garzik 
3483c6fd2807SJeff Garzik 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3484b6103f6dSTejun Heo 		goto out;
3485c6fd2807SJeff Garzik 
3486c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3487c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3488c6fd2807SJeff Garzik 	 */
3489c6fd2807SJeff Garzik 	msleep(1);
3490c6fd2807SJeff Garzik 
3491c6fd2807SJeff Garzik 	/* bring phy back */
3492d4b2bab4STejun Heo 	rc = sata_phy_resume(ap, timing, deadline);
3493b6103f6dSTejun Heo  out:
3494b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3495b6103f6dSTejun Heo 	return rc;
3496b6103f6dSTejun Heo }
3497b6103f6dSTejun Heo 
3498b6103f6dSTejun Heo /**
3499b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3500b6103f6dSTejun Heo  *	@ap: port to reset
3501b6103f6dSTejun Heo  *	@class: resulting class of attached device
3502d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3503b6103f6dSTejun Heo  *
3504b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3505b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3506b6103f6dSTejun Heo  *
3507b6103f6dSTejun Heo  *	LOCKING:
3508b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3509b6103f6dSTejun Heo  *
3510b6103f6dSTejun Heo  *	RETURNS:
3511b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3512b6103f6dSTejun Heo  */
3513d4b2bab4STejun Heo int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3514d4b2bab4STejun Heo 		       unsigned long deadline)
3515b6103f6dSTejun Heo {
3516b6103f6dSTejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3517b6103f6dSTejun Heo 	int rc;
3518b6103f6dSTejun Heo 
3519b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3520b6103f6dSTejun Heo 
3521b6103f6dSTejun Heo 	/* do hardreset */
3522d4b2bab4STejun Heo 	rc = sata_port_hardreset(ap, timing, deadline);
3523b6103f6dSTejun Heo 	if (rc) {
3524b6103f6dSTejun Heo 		ata_port_printk(ap, KERN_ERR,
3525b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3526b6103f6dSTejun Heo 		return rc;
3527b6103f6dSTejun Heo 	}
3528c6fd2807SJeff Garzik 
3529c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3530c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3531c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3532c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3533c6fd2807SJeff Garzik 		return 0;
3534c6fd2807SJeff Garzik 	}
3535c6fd2807SJeff Garzik 
353634fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
353734fee227STejun Heo 	msleep(150);
353834fee227STejun Heo 
3539d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
35409b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
35419b89391cSTejun Heo 	if (rc) {
3542c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR,
3543d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3544d4b2bab4STejun Heo 		return rc;
3545c6fd2807SJeff Garzik 	}
3546c6fd2807SJeff Garzik 
3547c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3548c6fd2807SJeff Garzik 
3549c6fd2807SJeff Garzik 	*class = ata_dev_try_classify(ap, 0, NULL);
3550c6fd2807SJeff Garzik 
3551c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3552c6fd2807SJeff Garzik 	return 0;
3553c6fd2807SJeff Garzik }
3554c6fd2807SJeff Garzik 
3555c6fd2807SJeff Garzik /**
3556c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3557c6fd2807SJeff Garzik  *	@ap: the target ata_port
3558c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3559c6fd2807SJeff Garzik  *
3560c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3561c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3562c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3563c6fd2807SJeff Garzik  *
3564c6fd2807SJeff Garzik  *	LOCKING:
3565c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3566c6fd2807SJeff Garzik  */
3567c6fd2807SJeff Garzik void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3568c6fd2807SJeff Garzik {
3569c6fd2807SJeff Garzik 	u32 serror;
3570c6fd2807SJeff Garzik 
3571c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3572c6fd2807SJeff Garzik 
3573c6fd2807SJeff Garzik 	/* print link status */
3574c6fd2807SJeff Garzik 	sata_print_link_status(ap);
3575c6fd2807SJeff Garzik 
3576c6fd2807SJeff Garzik 	/* clear SError */
3577c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3578c6fd2807SJeff Garzik 		sata_scr_write(ap, SCR_ERROR, serror);
3579c6fd2807SJeff Garzik 
3580c6fd2807SJeff Garzik 	/* re-enable interrupts */
358183625006SAkira Iguchi 	if (!ap->ops->error_handler)
358283625006SAkira Iguchi 		ap->ops->irq_on(ap);
3583c6fd2807SJeff Garzik 
3584c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3585c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3586c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3587c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3588c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3589c6fd2807SJeff Garzik 
3590c6fd2807SJeff Garzik 	/* bail out if no device is present */
3591c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3592c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3593c6fd2807SJeff Garzik 		return;
3594c6fd2807SJeff Garzik 	}
3595c6fd2807SJeff Garzik 
3596c6fd2807SJeff Garzik 	/* set up device control */
35970d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
35980d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3599c6fd2807SJeff Garzik 
3600c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3601c6fd2807SJeff Garzik }
3602c6fd2807SJeff Garzik 
3603c6fd2807SJeff Garzik /**
3604c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3605c6fd2807SJeff Garzik  *	@dev: device to compare against
3606c6fd2807SJeff Garzik  *	@new_class: class of the new device
3607c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3608c6fd2807SJeff Garzik  *
3609c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3610c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3611c6fd2807SJeff Garzik  *	@new_id.
3612c6fd2807SJeff Garzik  *
3613c6fd2807SJeff Garzik  *	LOCKING:
3614c6fd2807SJeff Garzik  *	None.
3615c6fd2807SJeff Garzik  *
3616c6fd2807SJeff Garzik  *	RETURNS:
3617c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3618c6fd2807SJeff Garzik  */
3619c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3620c6fd2807SJeff Garzik 			       const u16 *new_id)
3621c6fd2807SJeff Garzik {
3622c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3623a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3624a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3625c6fd2807SJeff Garzik 
3626c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3627c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3628c6fd2807SJeff Garzik 			       dev->class, new_class);
3629c6fd2807SJeff Garzik 		return 0;
3630c6fd2807SJeff Garzik 	}
3631c6fd2807SJeff Garzik 
3632a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3633a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3634a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3635a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3636c6fd2807SJeff Garzik 
3637c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3638c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3639c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3640c6fd2807SJeff Garzik 		return 0;
3641c6fd2807SJeff Garzik 	}
3642c6fd2807SJeff Garzik 
3643c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3644c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3645c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3646c6fd2807SJeff Garzik 		return 0;
3647c6fd2807SJeff Garzik 	}
3648c6fd2807SJeff Garzik 
3649c6fd2807SJeff Garzik 	return 1;
3650c6fd2807SJeff Garzik }
3651c6fd2807SJeff Garzik 
3652c6fd2807SJeff Garzik /**
3653fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
36543fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3655bff04647STejun Heo  *	@readid_flags: read ID flags
3656c6fd2807SJeff Garzik  *
3657c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3658c6fd2807SJeff Garzik  *	the port.
3659c6fd2807SJeff Garzik  *
3660c6fd2807SJeff Garzik  *	LOCKING:
3661c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3662c6fd2807SJeff Garzik  *
3663c6fd2807SJeff Garzik  *	RETURNS:
3664c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3665c6fd2807SJeff Garzik  */
3666fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3667c6fd2807SJeff Garzik {
3668c6fd2807SJeff Garzik 	unsigned int class = dev->class;
3669c6fd2807SJeff Garzik 	u16 *id = (void *)dev->ap->sector_buf;
3670c6fd2807SJeff Garzik 	int rc;
3671c6fd2807SJeff Garzik 
3672c6fd2807SJeff Garzik 	/* read ID data */
3673bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3674c6fd2807SJeff Garzik 	if (rc)
3675fe30911bSTejun Heo 		return rc;
3676c6fd2807SJeff Garzik 
3677c6fd2807SJeff Garzik 	/* is the device still there? */
3678fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3679fe30911bSTejun Heo 		return -ENODEV;
3680c6fd2807SJeff Garzik 
3681c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3682fe30911bSTejun Heo 	return 0;
3683fe30911bSTejun Heo }
3684fe30911bSTejun Heo 
3685fe30911bSTejun Heo /**
3686fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3687fe30911bSTejun Heo  *	@dev: device to revalidate
3688fe30911bSTejun Heo  *	@readid_flags: read ID flags
3689fe30911bSTejun Heo  *
3690fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3691fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3692fe30911bSTejun Heo  *
3693fe30911bSTejun Heo  *	LOCKING:
3694fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3695fe30911bSTejun Heo  *
3696fe30911bSTejun Heo  *	RETURNS:
3697fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3698fe30911bSTejun Heo  */
3699fe30911bSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3700fe30911bSTejun Heo {
37016ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3702fe30911bSTejun Heo 	int rc;
3703fe30911bSTejun Heo 
3704fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3705fe30911bSTejun Heo 		return -ENODEV;
3706fe30911bSTejun Heo 
3707fe30911bSTejun Heo 	/* re-read ID */
3708fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3709fe30911bSTejun Heo 	if (rc)
3710fe30911bSTejun Heo 		goto fail;
3711c6fd2807SJeff Garzik 
3712c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3713efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
37146ddcd3b0STejun Heo 	if (rc)
37156ddcd3b0STejun Heo 		goto fail;
37166ddcd3b0STejun Heo 
37176ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
37186ddcd3b0STejun Heo 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
37196ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
37206ddcd3b0STejun Heo 			       "%llu != %llu\n",
37216ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
37226ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
37236ddcd3b0STejun Heo 		rc = -ENODEV;
37246ddcd3b0STejun Heo 		goto fail;
37256ddcd3b0STejun Heo 	}
37266ddcd3b0STejun Heo 
3727c6fd2807SJeff Garzik 	return 0;
3728c6fd2807SJeff Garzik 
3729c6fd2807SJeff Garzik  fail:
3730c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3731c6fd2807SJeff Garzik 	return rc;
3732c6fd2807SJeff Garzik }
3733c6fd2807SJeff Garzik 
37346919a0a6SAlan Cox struct ata_blacklist_entry {
37356919a0a6SAlan Cox 	const char *model_num;
37366919a0a6SAlan Cox 	const char *model_rev;
37376919a0a6SAlan Cox 	unsigned long horkage;
37386919a0a6SAlan Cox };
37396919a0a6SAlan Cox 
37406919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
37416919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
37426919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
37436919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
37446919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
37456919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
37466919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
37476919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
37486919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
37496919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
37506919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
37516919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
37526919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
37536919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
37546919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
37556919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
37566919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
37576919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
37586919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
37596919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
37606919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
37616919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
37626919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
37636919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
37646919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
37656919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
37666919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
37676919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
37686919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
37696919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
37706919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
377139f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
37725acd50f6STejun Heo 	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
37736919a0a6SAlan Cox 
377418d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
377540a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
377618d6e9d5SAlbert Lee 
37776919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
37786919a0a6SAlan Cox 
37796919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
37806919a0a6SAlan Cox 	/* NCQ is slow */
37816919a0a6SAlan Cox         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
378209125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
378309125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
37847acfaf30SPaul Rolland 	/* NCQ is broken */
37857acfaf30SPaul Rolland 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
3786471e44b2SJeff Garzik 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
378796442925SJens Axboe 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
378896442925SJens Axboe 	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
378936e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
379036e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
379136e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
379236e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
379336e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3794bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
3795bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
37962f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
3797e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
37982f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
37996919a0a6SAlan Cox 
38006919a0a6SAlan Cox 	/* Devices with NCQ limits */
38016919a0a6SAlan Cox 
38026919a0a6SAlan Cox 	/* End Marker */
38036919a0a6SAlan Cox 	{ }
3804c6fd2807SJeff Garzik };
3805c6fd2807SJeff Garzik 
38066919a0a6SAlan Cox unsigned long ata_device_blacklisted(const struct ata_device *dev)
3807c6fd2807SJeff Garzik {
38088bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
38098bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
38106919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3811c6fd2807SJeff Garzik 
38128bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
38138bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3814c6fd2807SJeff Garzik 
38156919a0a6SAlan Cox 	while (ad->model_num) {
38168bfa79fcSTejun Heo 		if (!strcmp(ad->model_num, model_num)) {
38176919a0a6SAlan Cox 			if (ad->model_rev == NULL)
38186919a0a6SAlan Cox 				return ad->horkage;
38198bfa79fcSTejun Heo 			if (!strcmp(ad->model_rev, model_rev))
38206919a0a6SAlan Cox 				return ad->horkage;
3821c6fd2807SJeff Garzik 		}
38226919a0a6SAlan Cox 		ad++;
3823c6fd2807SJeff Garzik 	}
3824c6fd2807SJeff Garzik 	return 0;
3825c6fd2807SJeff Garzik }
3826c6fd2807SJeff Garzik 
38276919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
38286919a0a6SAlan Cox {
38296919a0a6SAlan Cox 	/* We don't support polling DMA.
38306919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
38316919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
38326919a0a6SAlan Cox 	 */
38336919a0a6SAlan Cox 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
38346919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
38356919a0a6SAlan Cox 		return 1;
38366919a0a6SAlan Cox 	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
38376919a0a6SAlan Cox }
38386919a0a6SAlan Cox 
3839c6fd2807SJeff Garzik /**
3840c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
3841c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
3842c6fd2807SJeff Garzik  *
3843c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
3844c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
3845c6fd2807SJeff Garzik  *	known limits including host controller limits, device
3846c6fd2807SJeff Garzik  *	blacklist, etc...
3847c6fd2807SJeff Garzik  *
3848c6fd2807SJeff Garzik  *	LOCKING:
3849c6fd2807SJeff Garzik  *	None.
3850c6fd2807SJeff Garzik  */
3851c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
3852c6fd2807SJeff Garzik {
3853c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
3854cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
3855c6fd2807SJeff Garzik 	unsigned long xfer_mask;
3856c6fd2807SJeff Garzik 
3857c6fd2807SJeff Garzik 	/* controller modes available */
3858c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3859c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
3860c6fd2807SJeff Garzik 
38618343f889SRobert Hancock 	/* drive modes available */
3862c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3863c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
3864c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
3865c6fd2807SJeff Garzik 
3866b352e57dSAlan Cox 	/*
3867b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3868b352e57dSAlan Cox 	 *	cable
3869b352e57dSAlan Cox 	 */
3870b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
3871b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
3872b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3873b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
3874b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3875b352e57dSAlan Cox 	}
3876b352e57dSAlan Cox 
3877c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
3878c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3879c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
3880c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
3881c6fd2807SJeff Garzik 	}
3882c6fd2807SJeff Garzik 
388314d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
388414d66ab7SPetr Vandrovec             host->simplex_claimed && host->simplex_claimed != ap) {
3885c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3886c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3887c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
3888c6fd2807SJeff Garzik 	}
3889c6fd2807SJeff Garzik 
3890e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
3891e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
3892e424675fSJeff Garzik 
3893c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
3894a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3895c6fd2807SJeff Garzik 
38968343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
38978343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
38988343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
38998343f889SRobert Hancock 	 * solely limited by the cable.
39008343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
39018343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
39028343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
39038343f889SRobert Hancock 	 */
39048343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
39058343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
39068343f889SRobert Hancock 		if((ap->cbl == ATA_CBL_PATA40) ||
39078343f889SRobert Hancock    		    (ata_drive_40wire(dev->id) &&
39088343f889SRobert Hancock 		     (ap->cbl == ATA_CBL_PATA_UNK ||
39098343f889SRobert Hancock                      ap->cbl == ATA_CBL_PATA80))) {
39108343f889SRobert Hancock 		      	ata_dev_printk(dev, KERN_WARNING,
39118343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
39128343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
39138343f889SRobert Hancock 		}
39148343f889SRobert Hancock 
3915c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3916c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
3917c6fd2807SJeff Garzik }
3918c6fd2807SJeff Garzik 
3919c6fd2807SJeff Garzik /**
3920c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3921c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3922c6fd2807SJeff Garzik  *
3923c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
3924c6fd2807SJeff Garzik  *	on port @ap.
3925c6fd2807SJeff Garzik  *
3926c6fd2807SJeff Garzik  *	LOCKING:
3927c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3928c6fd2807SJeff Garzik  *
3929c6fd2807SJeff Garzik  *	RETURNS:
3930c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3931c6fd2807SJeff Garzik  */
3932c6fd2807SJeff Garzik 
3933c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3934c6fd2807SJeff Garzik {
3935c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3936c6fd2807SJeff Garzik 	unsigned int err_mask;
3937c6fd2807SJeff Garzik 
3938c6fd2807SJeff Garzik 	/* set up set-features taskfile */
3939c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
3940c6fd2807SJeff Garzik 
3941464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
3942464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
3943464cf177STejun Heo 	 */
3944c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3945c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
3946c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
3947464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3948c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3949c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
3950c6fd2807SJeff Garzik 
3951c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3952c6fd2807SJeff Garzik 
3953c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3954c6fd2807SJeff Garzik 	return err_mask;
3955c6fd2807SJeff Garzik }
3956c6fd2807SJeff Garzik 
3957c6fd2807SJeff Garzik /**
3958c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
3959c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3960c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
3961c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
3962c6fd2807SJeff Garzik  *
3963c6fd2807SJeff Garzik  *	LOCKING:
3964c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3965c6fd2807SJeff Garzik  *
3966c6fd2807SJeff Garzik  *	RETURNS:
3967c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3968c6fd2807SJeff Garzik  */
3969c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
3970c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
3971c6fd2807SJeff Garzik {
3972c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3973c6fd2807SJeff Garzik 	unsigned int err_mask;
3974c6fd2807SJeff Garzik 
3975c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
3976c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3977c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
3978c6fd2807SJeff Garzik 
3979c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
3980c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
3981c6fd2807SJeff Garzik 
3982c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3983c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3984c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3985c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3986c6fd2807SJeff Garzik 	tf.nsect = sectors;
3987c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3988c6fd2807SJeff Garzik 
3989c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3990c6fd2807SJeff Garzik 
3991c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3992c6fd2807SJeff Garzik 	return err_mask;
3993c6fd2807SJeff Garzik }
3994c6fd2807SJeff Garzik 
3995c6fd2807SJeff Garzik /**
3996c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
3997c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
3998c6fd2807SJeff Garzik  *
3999c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4000c6fd2807SJeff Garzik  *
4001c6fd2807SJeff Garzik  *	LOCKING:
4002cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4003c6fd2807SJeff Garzik  */
400470e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4005c6fd2807SJeff Garzik {
4006c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4007c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4008c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4009c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4010c6fd2807SJeff Garzik 
4011c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4012c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4013c6fd2807SJeff Garzik 
4014c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4015c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4016c6fd2807SJeff Garzik 
4017c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4018c6fd2807SJeff Garzik 
4019c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4020c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4021c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4022c6fd2807SJeff Garzik 	 */
4023c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4024c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4025c6fd2807SJeff Garzik 
4026c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4027c6fd2807SJeff Garzik 		if (qc->n_elem)
4028c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4029c6fd2807SJeff Garzik 		/* restore last sg */
4030c6fd2807SJeff Garzik 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4031c6fd2807SJeff Garzik 		if (pad_buf) {
4032c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
4033c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4034c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4035c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4036c6fd2807SJeff Garzik 		}
4037c6fd2807SJeff Garzik 	} else {
4038c6fd2807SJeff Garzik 		if (qc->n_elem)
4039c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4040c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4041c6fd2807SJeff Garzik 				dir);
4042c6fd2807SJeff Garzik 		/* restore sg */
4043c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4044c6fd2807SJeff Garzik 		if (pad_buf)
4045c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4046c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4047c6fd2807SJeff Garzik 	}
4048c6fd2807SJeff Garzik 
4049c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4050c6fd2807SJeff Garzik 	qc->__sg = NULL;
4051c6fd2807SJeff Garzik }
4052c6fd2807SJeff Garzik 
4053c6fd2807SJeff Garzik /**
4054c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4055c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4056c6fd2807SJeff Garzik  *
4057c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4058c6fd2807SJeff Garzik  *	associated with the current disk command.
4059c6fd2807SJeff Garzik  *
4060c6fd2807SJeff Garzik  *	LOCKING:
4061cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4062c6fd2807SJeff Garzik  *
4063c6fd2807SJeff Garzik  */
4064c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4065c6fd2807SJeff Garzik {
4066c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4067c6fd2807SJeff Garzik 	struct scatterlist *sg;
4068c6fd2807SJeff Garzik 	unsigned int idx;
4069c6fd2807SJeff Garzik 
4070c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4071c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4072c6fd2807SJeff Garzik 
4073c6fd2807SJeff Garzik 	idx = 0;
4074c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4075c6fd2807SJeff Garzik 		u32 addr, offset;
4076c6fd2807SJeff Garzik 		u32 sg_len, len;
4077c6fd2807SJeff Garzik 
4078c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4079c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4080c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4081c6fd2807SJeff Garzik 		 */
4082c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4083c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4084c6fd2807SJeff Garzik 
4085c6fd2807SJeff Garzik 		while (sg_len) {
4086c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4087c6fd2807SJeff Garzik 			len = sg_len;
4088c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4089c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4090c6fd2807SJeff Garzik 
4091c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4092c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4093c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4094c6fd2807SJeff Garzik 
4095c6fd2807SJeff Garzik 			idx++;
4096c6fd2807SJeff Garzik 			sg_len -= len;
4097c6fd2807SJeff Garzik 			addr += len;
4098c6fd2807SJeff Garzik 		}
4099c6fd2807SJeff Garzik 	}
4100c6fd2807SJeff Garzik 
4101c6fd2807SJeff Garzik 	if (idx)
4102c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4103c6fd2807SJeff Garzik }
4104b9a4197eSTejun Heo 
4105c6fd2807SJeff Garzik /**
4106c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4107c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4108c6fd2807SJeff Garzik  *
4109c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4110c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4111c6fd2807SJeff Garzik  *	supplied PACKET command.
4112c6fd2807SJeff Garzik  *
4113c6fd2807SJeff Garzik  *	LOCKING:
4114cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4115c6fd2807SJeff Garzik  *
4116c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4117c6fd2807SJeff Garzik  *               nonzero otherwise
4118c6fd2807SJeff Garzik  */
4119c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4120c6fd2807SJeff Garzik {
4121c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4122c6fd2807SJeff Garzik 
4123b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4124b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4125b9a4197eSTejun Heo 	 */
4126b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
41276f23a31dSAlbert Lee 		return 1;
41286f23a31dSAlbert Lee 
4129c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4130b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4131c6fd2807SJeff Garzik 
4132b9a4197eSTejun Heo 	return 0;
4133c6fd2807SJeff Garzik }
4134b9a4197eSTejun Heo 
4135c6fd2807SJeff Garzik /**
4136c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4137c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4138c6fd2807SJeff Garzik  *
4139c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4140c6fd2807SJeff Garzik  *
4141c6fd2807SJeff Garzik  *	LOCKING:
4142cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4143c6fd2807SJeff Garzik  */
4144c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4145c6fd2807SJeff Garzik {
4146c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4147c6fd2807SJeff Garzik 		return;
4148c6fd2807SJeff Garzik 
4149c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4150c6fd2807SJeff Garzik }
4151c6fd2807SJeff Garzik 
4152c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4153c6fd2807SJeff Garzik 
4154c6fd2807SJeff Garzik /**
4155c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4156c6fd2807SJeff Garzik  *	@qc: Command to be associated
4157c6fd2807SJeff Garzik  *	@buf: Memory buffer
4158c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4159c6fd2807SJeff Garzik  *
4160c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4161c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4162c6fd2807SJeff Garzik  *
4163c6fd2807SJeff Garzik  *	LOCKING:
4164cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4165c6fd2807SJeff Garzik  */
4166c6fd2807SJeff Garzik 
4167c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4168c6fd2807SJeff Garzik {
4169c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4170c6fd2807SJeff Garzik 
4171c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4172c6fd2807SJeff Garzik 	qc->n_elem = 1;
4173c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4174c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4175c6fd2807SJeff Garzik 	qc->nbytes = buflen;
4176c6fd2807SJeff Garzik 
417761c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4178c6fd2807SJeff Garzik }
4179c6fd2807SJeff Garzik 
4180c6fd2807SJeff Garzik /**
4181c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4182c6fd2807SJeff Garzik  *	@qc: Command to be associated
4183c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4184c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4185c6fd2807SJeff Garzik  *
4186c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4187c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4188c6fd2807SJeff Garzik  *	elements.
4189c6fd2807SJeff Garzik  *
4190c6fd2807SJeff Garzik  *	LOCKING:
4191cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4192c6fd2807SJeff Garzik  */
4193c6fd2807SJeff Garzik 
4194c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4195c6fd2807SJeff Garzik 		 unsigned int n_elem)
4196c6fd2807SJeff Garzik {
4197c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4198c6fd2807SJeff Garzik 	qc->__sg = sg;
4199c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4200c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
4201c6fd2807SJeff Garzik }
4202c6fd2807SJeff Garzik 
4203c6fd2807SJeff Garzik /**
4204c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4205c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4206c6fd2807SJeff Garzik  *
4207c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4208c6fd2807SJeff Garzik  *
4209c6fd2807SJeff Garzik  *	LOCKING:
4210cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4211c6fd2807SJeff Garzik  *
4212c6fd2807SJeff Garzik  *	RETURNS:
4213c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4214c6fd2807SJeff Garzik  */
4215c6fd2807SJeff Garzik 
4216c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4217c6fd2807SJeff Garzik {
4218c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4219c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4220c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4221c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4222c6fd2807SJeff Garzik 	int trim_sg = 0;
4223c6fd2807SJeff Garzik 
4224c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4225c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4226c6fd2807SJeff Garzik 	if (qc->pad_len) {
4227c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4228c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4229c6fd2807SJeff Garzik 
4230c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4231c6fd2807SJeff Garzik 
4232c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4233c6fd2807SJeff Garzik 
4234c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4235c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4236c6fd2807SJeff Garzik 			       qc->pad_len);
4237c6fd2807SJeff Garzik 
4238c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4239c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4240c6fd2807SJeff Garzik 		/* trim sg */
4241c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4242c6fd2807SJeff Garzik 		if (sg->length == 0)
4243c6fd2807SJeff Garzik 			trim_sg = 1;
4244c6fd2807SJeff Garzik 
4245c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4246c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4247c6fd2807SJeff Garzik 	}
4248c6fd2807SJeff Garzik 
4249c6fd2807SJeff Garzik 	if (trim_sg) {
4250c6fd2807SJeff Garzik 		qc->n_elem--;
4251c6fd2807SJeff Garzik 		goto skip_map;
4252c6fd2807SJeff Garzik 	}
4253c6fd2807SJeff Garzik 
4254c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4255c6fd2807SJeff Garzik 				     sg->length, dir);
4256c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4257c6fd2807SJeff Garzik 		/* restore sg */
4258c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4259c6fd2807SJeff Garzik 		return -1;
4260c6fd2807SJeff Garzik 	}
4261c6fd2807SJeff Garzik 
4262c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4263c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4264c6fd2807SJeff Garzik 
4265c6fd2807SJeff Garzik skip_map:
4266c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4267c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4268c6fd2807SJeff Garzik 
4269c6fd2807SJeff Garzik 	return 0;
4270c6fd2807SJeff Garzik }
4271c6fd2807SJeff Garzik 
4272c6fd2807SJeff Garzik /**
4273c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4274c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4275c6fd2807SJeff Garzik  *
4276c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4277c6fd2807SJeff Garzik  *
4278c6fd2807SJeff Garzik  *	LOCKING:
4279cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4280c6fd2807SJeff Garzik  *
4281c6fd2807SJeff Garzik  *	RETURNS:
4282c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4283c6fd2807SJeff Garzik  *
4284c6fd2807SJeff Garzik  */
4285c6fd2807SJeff Garzik 
4286c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4287c6fd2807SJeff Garzik {
4288c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4289c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4290c6fd2807SJeff Garzik 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4291c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4292c6fd2807SJeff Garzik 
429344877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4294c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4295c6fd2807SJeff Garzik 
4296c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4297c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4298c6fd2807SJeff Garzik 	if (qc->pad_len) {
4299c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4300c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4301c6fd2807SJeff Garzik 		unsigned int offset;
4302c6fd2807SJeff Garzik 
4303c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4304c6fd2807SJeff Garzik 
4305c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4306c6fd2807SJeff Garzik 
4307c6fd2807SJeff Garzik 		/*
4308c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4309c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4310c6fd2807SJeff Garzik 		 */
4311c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4312c6fd2807SJeff Garzik 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4313c6fd2807SJeff Garzik 		psg->offset = offset_in_page(offset);
4314c6fd2807SJeff Garzik 
4315c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4316c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4317c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4318c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4319c6fd2807SJeff Garzik 		}
4320c6fd2807SJeff Garzik 
4321c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4322c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4323c6fd2807SJeff Garzik 		/* trim last sg */
4324c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4325c6fd2807SJeff Garzik 		if (lsg->length == 0)
4326c6fd2807SJeff Garzik 			trim_sg = 1;
4327c6fd2807SJeff Garzik 
4328c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4329c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4330c6fd2807SJeff Garzik 	}
4331c6fd2807SJeff Garzik 
4332c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4333c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4334c6fd2807SJeff Garzik 		pre_n_elem--;
4335c6fd2807SJeff Garzik 
4336c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4337c6fd2807SJeff Garzik 		n_elem = 0;
4338c6fd2807SJeff Garzik 		goto skip_map;
4339c6fd2807SJeff Garzik 	}
4340c6fd2807SJeff Garzik 
4341c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4342c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4343c6fd2807SJeff Garzik 	if (n_elem < 1) {
4344c6fd2807SJeff Garzik 		/* restore last sg */
4345c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4346c6fd2807SJeff Garzik 		return -1;
4347c6fd2807SJeff Garzik 	}
4348c6fd2807SJeff Garzik 
4349c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4350c6fd2807SJeff Garzik 
4351c6fd2807SJeff Garzik skip_map:
4352c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4353c6fd2807SJeff Garzik 
4354c6fd2807SJeff Garzik 	return 0;
4355c6fd2807SJeff Garzik }
4356c6fd2807SJeff Garzik 
4357c6fd2807SJeff Garzik /**
4358c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4359c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4360c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4361c6fd2807SJeff Garzik  *
4362c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4363c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4364c6fd2807SJeff Garzik  *	vice-versa.
4365c6fd2807SJeff Garzik  *
4366c6fd2807SJeff Garzik  *	LOCKING:
4367c6fd2807SJeff Garzik  *	Inherited from caller.
4368c6fd2807SJeff Garzik  */
4369c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4370c6fd2807SJeff Garzik {
4371c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4372c6fd2807SJeff Garzik 	unsigned int i;
4373c6fd2807SJeff Garzik 
4374c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4375c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4376c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4377c6fd2807SJeff Garzik }
4378c6fd2807SJeff Garzik 
4379c6fd2807SJeff Garzik /**
43800d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4381c6fd2807SJeff Garzik  *	@adev: device to target
4382c6fd2807SJeff Garzik  *	@buf: data buffer
4383c6fd2807SJeff Garzik  *	@buflen: buffer length
4384c6fd2807SJeff Garzik  *	@write_data: read/write
4385c6fd2807SJeff Garzik  *
4386c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4387c6fd2807SJeff Garzik  *
4388c6fd2807SJeff Garzik  *	LOCKING:
4389c6fd2807SJeff Garzik  *	Inherited from caller.
4390c6fd2807SJeff Garzik  */
43910d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4392c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4393c6fd2807SJeff Garzik {
4394c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
4395c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4396c6fd2807SJeff Garzik 
4397c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4398c6fd2807SJeff Garzik 	if (write_data)
43990d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4400c6fd2807SJeff Garzik 	else
44010d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4402c6fd2807SJeff Garzik 
4403c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4404c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4405c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4406c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4407c6fd2807SJeff Garzik 
4408c6fd2807SJeff Garzik 		if (write_data) {
4409c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
44100d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4411c6fd2807SJeff Garzik 		} else {
44120d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4413c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4414c6fd2807SJeff Garzik 		}
4415c6fd2807SJeff Garzik 	}
4416c6fd2807SJeff Garzik }
4417c6fd2807SJeff Garzik 
4418c6fd2807SJeff Garzik /**
44190d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4420c6fd2807SJeff Garzik  *	@adev: device to target
4421c6fd2807SJeff Garzik  *	@buf: data buffer
4422c6fd2807SJeff Garzik  *	@buflen: buffer length
4423c6fd2807SJeff Garzik  *	@write_data: read/write
4424c6fd2807SJeff Garzik  *
4425c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4426c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4427c6fd2807SJeff Garzik  *
4428c6fd2807SJeff Garzik  *	LOCKING:
4429c6fd2807SJeff Garzik  *	Inherited from caller.
4430c6fd2807SJeff Garzik  */
44310d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4432c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4433c6fd2807SJeff Garzik {
4434c6fd2807SJeff Garzik 	unsigned long flags;
4435c6fd2807SJeff Garzik 	local_irq_save(flags);
44360d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4437c6fd2807SJeff Garzik 	local_irq_restore(flags);
4438c6fd2807SJeff Garzik }
4439c6fd2807SJeff Garzik 
4440c6fd2807SJeff Garzik 
4441c6fd2807SJeff Garzik /**
44425a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4443c6fd2807SJeff Garzik  *	@qc: Command on going
4444c6fd2807SJeff Garzik  *
44455a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4446c6fd2807SJeff Garzik  *
4447c6fd2807SJeff Garzik  *	LOCKING:
4448c6fd2807SJeff Garzik  *	Inherited from caller.
4449c6fd2807SJeff Garzik  */
4450c6fd2807SJeff Garzik 
4451c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4452c6fd2807SJeff Garzik {
4453c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4454c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4455c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4456c6fd2807SJeff Garzik 	struct page *page;
4457c6fd2807SJeff Garzik 	unsigned int offset;
4458c6fd2807SJeff Garzik 	unsigned char *buf;
4459c6fd2807SJeff Garzik 
44605a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4461c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4462c6fd2807SJeff Garzik 
4463c6fd2807SJeff Garzik 	page = sg[qc->cursg].page;
4464726f0785STejun Heo 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4465c6fd2807SJeff Garzik 
4466c6fd2807SJeff Garzik 	/* get the current page and offset */
4467c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4468c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4469c6fd2807SJeff Garzik 
4470c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4471c6fd2807SJeff Garzik 
4472c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4473c6fd2807SJeff Garzik 		unsigned long flags;
4474c6fd2807SJeff Garzik 
4475c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4476c6fd2807SJeff Garzik 		local_irq_save(flags);
4477c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4478c6fd2807SJeff Garzik 
4479c6fd2807SJeff Garzik 		/* do the actual data transfer */
44805a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4481c6fd2807SJeff Garzik 
4482c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4483c6fd2807SJeff Garzik 		local_irq_restore(flags);
4484c6fd2807SJeff Garzik 	} else {
4485c6fd2807SJeff Garzik 		buf = page_address(page);
44865a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4487c6fd2807SJeff Garzik 	}
4488c6fd2807SJeff Garzik 
44895a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
44905a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4491c6fd2807SJeff Garzik 
4492726f0785STejun Heo 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4493c6fd2807SJeff Garzik 		qc->cursg++;
4494c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4495c6fd2807SJeff Garzik 	}
4496c6fd2807SJeff Garzik }
4497c6fd2807SJeff Garzik 
4498c6fd2807SJeff Garzik /**
44995a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4500c6fd2807SJeff Garzik  *	@qc: Command on going
4501c6fd2807SJeff Garzik  *
45025a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4503c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4504c6fd2807SJeff Garzik  *
4505c6fd2807SJeff Garzik  *	LOCKING:
4506c6fd2807SJeff Garzik  *	Inherited from caller.
4507c6fd2807SJeff Garzik  */
4508c6fd2807SJeff Garzik 
4509c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4510c6fd2807SJeff Garzik {
4511c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4512c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4513c6fd2807SJeff Garzik 		unsigned int nsect;
4514c6fd2807SJeff Garzik 
4515c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4516c6fd2807SJeff Garzik 
45175a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4518726f0785STejun Heo 			    qc->dev->multi_count);
4519c6fd2807SJeff Garzik 		while (nsect--)
4520c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4521c6fd2807SJeff Garzik 	} else
4522c6fd2807SJeff Garzik 		ata_pio_sector(qc);
4523c6fd2807SJeff Garzik }
4524c6fd2807SJeff Garzik 
4525c6fd2807SJeff Garzik /**
4526c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4527c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4528c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4529c6fd2807SJeff Garzik  *
4530c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4531c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4532c6fd2807SJeff Garzik  *
4533c6fd2807SJeff Garzik  *	LOCKING:
4534c6fd2807SJeff Garzik  *	caller.
4535c6fd2807SJeff Garzik  */
4536c6fd2807SJeff Garzik 
4537c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4538c6fd2807SJeff Garzik {
4539c6fd2807SJeff Garzik 	/* send SCSI cdb */
4540c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4541c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4542c6fd2807SJeff Garzik 
4543c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4544c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4545c6fd2807SJeff Garzik 
4546c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4547c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4548c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4549c6fd2807SJeff Garzik 		break;
4550c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4551c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4552c6fd2807SJeff Garzik 		break;
4553c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4554c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4555c6fd2807SJeff Garzik 		/* initiate bmdma */
4556c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4557c6fd2807SJeff Garzik 		break;
4558c6fd2807SJeff Garzik 	}
4559c6fd2807SJeff Garzik }
4560c6fd2807SJeff Garzik 
4561c6fd2807SJeff Garzik /**
4562c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4563c6fd2807SJeff Garzik  *	@qc: Command on going
4564c6fd2807SJeff Garzik  *	@bytes: number of bytes
4565c6fd2807SJeff Garzik  *
4566c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4567c6fd2807SJeff Garzik  *
4568c6fd2807SJeff Garzik  *	LOCKING:
4569c6fd2807SJeff Garzik  *	Inherited from caller.
4570c6fd2807SJeff Garzik  *
4571c6fd2807SJeff Garzik  */
4572c6fd2807SJeff Garzik 
4573c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4574c6fd2807SJeff Garzik {
4575c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4576c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4577c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4578c6fd2807SJeff Garzik 	struct page *page;
4579c6fd2807SJeff Garzik 	unsigned char *buf;
4580c6fd2807SJeff Garzik 	unsigned int offset, count;
4581c6fd2807SJeff Garzik 
4582c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4583c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4584c6fd2807SJeff Garzik 
4585c6fd2807SJeff Garzik next_sg:
4586c6fd2807SJeff Garzik 	if (unlikely(qc->cursg >= qc->n_elem)) {
4587c6fd2807SJeff Garzik 		/*
4588c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4589c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4590c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4591c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4592c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4593c6fd2807SJeff Garzik 		 */
4594c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4595c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4596c6fd2807SJeff Garzik 		unsigned int i;
4597c6fd2807SJeff Garzik 
4598c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4599c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4600c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4601c6fd2807SJeff Garzik 
4602c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4603c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4604c6fd2807SJeff Garzik 
4605c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4606c6fd2807SJeff Garzik 		return;
4607c6fd2807SJeff Garzik 	}
4608c6fd2807SJeff Garzik 
4609c6fd2807SJeff Garzik 	sg = &qc->__sg[qc->cursg];
4610c6fd2807SJeff Garzik 
4611c6fd2807SJeff Garzik 	page = sg->page;
4612c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4613c6fd2807SJeff Garzik 
4614c6fd2807SJeff Garzik 	/* get the current page and offset */
4615c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4616c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4617c6fd2807SJeff Garzik 
4618c6fd2807SJeff Garzik 	/* don't overrun current sg */
4619c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
4620c6fd2807SJeff Garzik 
4621c6fd2807SJeff Garzik 	/* don't cross page boundaries */
4622c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4623c6fd2807SJeff Garzik 
4624c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4625c6fd2807SJeff Garzik 
4626c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4627c6fd2807SJeff Garzik 		unsigned long flags;
4628c6fd2807SJeff Garzik 
4629c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
4630c6fd2807SJeff Garzik 		local_irq_save(flags);
4631c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4632c6fd2807SJeff Garzik 
4633c6fd2807SJeff Garzik 		/* do the actual data transfer */
4634c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4635c6fd2807SJeff Garzik 
4636c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4637c6fd2807SJeff Garzik 		local_irq_restore(flags);
4638c6fd2807SJeff Garzik 	} else {
4639c6fd2807SJeff Garzik 		buf = page_address(page);
4640c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4641c6fd2807SJeff Garzik 	}
4642c6fd2807SJeff Garzik 
4643c6fd2807SJeff Garzik 	bytes -= count;
4644c6fd2807SJeff Garzik 	qc->curbytes += count;
4645c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
4646c6fd2807SJeff Garzik 
4647c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
4648c6fd2807SJeff Garzik 		qc->cursg++;
4649c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4650c6fd2807SJeff Garzik 	}
4651c6fd2807SJeff Garzik 
4652c6fd2807SJeff Garzik 	if (bytes)
4653c6fd2807SJeff Garzik 		goto next_sg;
4654c6fd2807SJeff Garzik }
4655c6fd2807SJeff Garzik 
4656c6fd2807SJeff Garzik /**
4657c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4658c6fd2807SJeff Garzik  *	@qc: Command on going
4659c6fd2807SJeff Garzik  *
4660c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4661c6fd2807SJeff Garzik  *
4662c6fd2807SJeff Garzik  *	LOCKING:
4663c6fd2807SJeff Garzik  *	Inherited from caller.
4664c6fd2807SJeff Garzik  */
4665c6fd2807SJeff Garzik 
4666c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4667c6fd2807SJeff Garzik {
4668c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4669c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
4670c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
4671c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4672c6fd2807SJeff Garzik 
4673c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
4674c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
4675c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
4676c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4677c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
4678c6fd2807SJeff Garzik 	 */
4679c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
4680c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
4681c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
4682c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
4683c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
4684c6fd2807SJeff Garzik 
4685c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
4686c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
4687c6fd2807SJeff Garzik 		goto err_out;
4688c6fd2807SJeff Garzik 
4689c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
4690c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4691c6fd2807SJeff Garzik 	if (do_write != i_write)
4692c6fd2807SJeff Garzik 		goto err_out;
4693c6fd2807SJeff Garzik 
469444877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4695c6fd2807SJeff Garzik 
4696c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
4697c6fd2807SJeff Garzik 
4698c6fd2807SJeff Garzik 	return;
4699c6fd2807SJeff Garzik 
4700c6fd2807SJeff Garzik err_out:
4701c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4702c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
4703c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
4704c6fd2807SJeff Garzik }
4705c6fd2807SJeff Garzik 
4706c6fd2807SJeff Garzik /**
4707c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4708c6fd2807SJeff Garzik  *	@ap: the target ata_port
4709c6fd2807SJeff Garzik  *	@qc: qc on going
4710c6fd2807SJeff Garzik  *
4711c6fd2807SJeff Garzik  *	RETURNS:
4712c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
4713c6fd2807SJeff Garzik  */
4714c6fd2807SJeff Garzik 
4715c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4716c6fd2807SJeff Garzik {
4717c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4718c6fd2807SJeff Garzik 		return 1;
4719c6fd2807SJeff Garzik 
4720c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4721c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
4722c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4723c6fd2807SJeff Garzik 		    return 1;
4724c6fd2807SJeff Garzik 
4725c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
4726c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4727c6fd2807SJeff Garzik 			return 1;
4728c6fd2807SJeff Garzik 	}
4729c6fd2807SJeff Garzik 
4730c6fd2807SJeff Garzik 	return 0;
4731c6fd2807SJeff Garzik }
4732c6fd2807SJeff Garzik 
4733c6fd2807SJeff Garzik /**
4734c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
4735c6fd2807SJeff Garzik  *	@qc: Command to complete
4736c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4737c6fd2807SJeff Garzik  *
4738c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
4739c6fd2807SJeff Garzik  *
4740c6fd2807SJeff Garzik  *	LOCKING:
4741cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
4742c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
4743c6fd2807SJeff Garzik  */
4744c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4745c6fd2807SJeff Garzik {
4746c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4747c6fd2807SJeff Garzik 	unsigned long flags;
4748c6fd2807SJeff Garzik 
4749c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
4750c6fd2807SJeff Garzik 		if (in_wq) {
4751c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4752c6fd2807SJeff Garzik 
4753cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
4754cca3974eSJeff Garzik 			 * released.
4755c6fd2807SJeff Garzik 			 */
4756c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
4757c6fd2807SJeff Garzik 			if (qc) {
4758c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
475983625006SAkira Iguchi 					ap->ops->irq_on(ap);
4760c6fd2807SJeff Garzik 					ata_qc_complete(qc);
4761c6fd2807SJeff Garzik 				} else
4762c6fd2807SJeff Garzik 					ata_port_freeze(ap);
4763c6fd2807SJeff Garzik 			}
4764c6fd2807SJeff Garzik 
4765c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4766c6fd2807SJeff Garzik 		} else {
4767c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4768c6fd2807SJeff Garzik 				ata_qc_complete(qc);
4769c6fd2807SJeff Garzik 			else
4770c6fd2807SJeff Garzik 				ata_port_freeze(ap);
4771c6fd2807SJeff Garzik 		}
4772c6fd2807SJeff Garzik 	} else {
4773c6fd2807SJeff Garzik 		if (in_wq) {
4774c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
477583625006SAkira Iguchi 			ap->ops->irq_on(ap);
4776c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4777c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4778c6fd2807SJeff Garzik 		} else
4779c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4780c6fd2807SJeff Garzik 	}
4781c6fd2807SJeff Garzik }
4782c6fd2807SJeff Garzik 
4783c6fd2807SJeff Garzik /**
4784c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
4785c6fd2807SJeff Garzik  *	@ap: the target ata_port
4786c6fd2807SJeff Garzik  *	@qc: qc on going
4787c6fd2807SJeff Garzik  *	@status: current device status
4788c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4789c6fd2807SJeff Garzik  *
4790c6fd2807SJeff Garzik  *	RETURNS:
4791c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
4792c6fd2807SJeff Garzik  */
4793c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4794c6fd2807SJeff Garzik 		 u8 status, int in_wq)
4795c6fd2807SJeff Garzik {
4796c6fd2807SJeff Garzik 	unsigned long flags = 0;
4797c6fd2807SJeff Garzik 	int poll_next;
4798c6fd2807SJeff Garzik 
4799c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4800c6fd2807SJeff Garzik 
4801c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
4802c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
4803c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4804c6fd2807SJeff Garzik 	 */
4805c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4806c6fd2807SJeff Garzik 
4807c6fd2807SJeff Garzik fsm_start:
4808c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
480944877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4810c6fd2807SJeff Garzik 
4811c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
4812c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
4813c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
4814c6fd2807SJeff Garzik 
4815c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
4816c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
4817c6fd2807SJeff Garzik 		 * takes over after sending the data.
4818c6fd2807SJeff Garzik 		 */
4819c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4820c6fd2807SJeff Garzik 
4821c6fd2807SJeff Garzik 		/* check device status */
4822c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
4823c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
4824c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
4825c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
4826c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4827c6fd2807SJeff Garzik 			else
4828c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
4829c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4830c6fd2807SJeff Garzik 
4831c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4832c6fd2807SJeff Garzik 			goto fsm_start;
4833c6fd2807SJeff Garzik 		}
4834c6fd2807SJeff Garzik 
4835c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
4836c6fd2807SJeff Garzik 		 * when it finds something wrong.
4837c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
4838c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
4839c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
4840c6fd2807SJeff Garzik 		 */
4841c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
484244877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
484344877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
4844c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
4845c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4846c6fd2807SJeff Garzik 			goto fsm_start;
4847c6fd2807SJeff Garzik 		}
4848c6fd2807SJeff Garzik 
4849c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
4850c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
4851c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
4852c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
4853c6fd2807SJeff Garzik 		 */
4854c6fd2807SJeff Garzik 		if (in_wq)
4855c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4856c6fd2807SJeff Garzik 
4857c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
4858c6fd2807SJeff Garzik 			/* PIO data out protocol.
4859c6fd2807SJeff Garzik 			 * send first data block.
4860c6fd2807SJeff Garzik 			 */
4861c6fd2807SJeff Garzik 
4862c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
4863c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
4864c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
4865c6fd2807SJeff Garzik 			 */
4866c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
4867c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4868c6fd2807SJeff Garzik 			ata_altstatus(ap); /* flush */
4869c6fd2807SJeff Garzik 		} else
4870c6fd2807SJeff Garzik 			/* send CDB */
4871c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
4872c6fd2807SJeff Garzik 
4873c6fd2807SJeff Garzik 		if (in_wq)
4874c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4875c6fd2807SJeff Garzik 
4876c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
4877c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
4878c6fd2807SJeff Garzik 		 */
4879c6fd2807SJeff Garzik 		break;
4880c6fd2807SJeff Garzik 
4881c6fd2807SJeff Garzik 	case HSM_ST:
4882c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
4883c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4884c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
4885c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
4886c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
4887c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
4888c6fd2807SJeff Garzik 				 */
4889c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
4890c6fd2807SJeff Garzik 				goto fsm_start;
4891c6fd2807SJeff Garzik 			}
4892c6fd2807SJeff Garzik 
4893c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
4894c6fd2807SJeff Garzik 			 * when it finds something wrong.
4895c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
4896c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
4897c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
4898c6fd2807SJeff Garzik 			 */
4899c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
490044877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
490144877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
490244877b4eSTejun Heo 						status);
4903c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4904c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4905c6fd2807SJeff Garzik 				goto fsm_start;
4906c6fd2807SJeff Garzik 			}
4907c6fd2807SJeff Garzik 
4908c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
4909c6fd2807SJeff Garzik 
4910c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4911c6fd2807SJeff Garzik 				/* bad ireason reported by device */
4912c6fd2807SJeff Garzik 				goto fsm_start;
4913c6fd2807SJeff Garzik 
4914c6fd2807SJeff Garzik 		} else {
4915c6fd2807SJeff Garzik 			/* ATA PIO protocol */
4916c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
4917c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
4918c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
4919c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
4920c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
4921c6fd2807SJeff Garzik 				else
492255a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
492355a8e2c8STejun Heo 					 * Phantom devices also trigger this
492455a8e2c8STejun Heo 					 * condition.  Mark hint.
492555a8e2c8STejun Heo 					 */
492655a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
492755a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
4928c6fd2807SJeff Garzik 
4929c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4930c6fd2807SJeff Garzik 				goto fsm_start;
4931c6fd2807SJeff Garzik 			}
4932c6fd2807SJeff Garzik 
4933c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
4934c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
4935c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
4936c6fd2807SJeff Garzik 			 * block of junk data before changing the
4937c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
4938c6fd2807SJeff Garzik 			 *
4939c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
4940c6fd2807SJeff Garzik 			 * sense since the data block has been
4941c6fd2807SJeff Garzik 			 * transferred to the device.
4942c6fd2807SJeff Garzik 			 */
4943c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4944c6fd2807SJeff Garzik 				/* data might be corrputed */
4945c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4946c6fd2807SJeff Garzik 
4947c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4948c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
4949c6fd2807SJeff Garzik 					ata_altstatus(ap);
4950c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
4951c6fd2807SJeff Garzik 				}
4952c6fd2807SJeff Garzik 
4953c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
4954c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
4955c6fd2807SJeff Garzik 
4956c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
4957c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
4958c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
4959c6fd2807SJeff Garzik 				 */
4960c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4961c6fd2807SJeff Garzik 				goto fsm_start;
4962c6fd2807SJeff Garzik 			}
4963c6fd2807SJeff Garzik 
4964c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4965c6fd2807SJeff Garzik 
4966c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
4967c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4968c6fd2807SJeff Garzik 				/* all data read */
4969c6fd2807SJeff Garzik 				ata_altstatus(ap);
4970c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
4971c6fd2807SJeff Garzik 				goto fsm_start;
4972c6fd2807SJeff Garzik 			}
4973c6fd2807SJeff Garzik 		}
4974c6fd2807SJeff Garzik 
4975c6fd2807SJeff Garzik 		ata_altstatus(ap); /* flush */
4976c6fd2807SJeff Garzik 		poll_next = 1;
4977c6fd2807SJeff Garzik 		break;
4978c6fd2807SJeff Garzik 
4979c6fd2807SJeff Garzik 	case HSM_ST_LAST:
4980c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
4981c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
4982c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4983c6fd2807SJeff Garzik 			goto fsm_start;
4984c6fd2807SJeff Garzik 		}
4985c6fd2807SJeff Garzik 
4986c6fd2807SJeff Garzik 		/* no more data to transfer */
4987c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
498844877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
4989c6fd2807SJeff Garzik 
4990c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
4991c6fd2807SJeff Garzik 
4992c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
4993c6fd2807SJeff Garzik 
4994c6fd2807SJeff Garzik 		/* complete taskfile transaction */
4995c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
4996c6fd2807SJeff Garzik 
4997c6fd2807SJeff Garzik 		poll_next = 0;
4998c6fd2807SJeff Garzik 		break;
4999c6fd2807SJeff Garzik 
5000c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5001c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5002c6fd2807SJeff Garzik 		 * know what's wrong and recover
5003c6fd2807SJeff Garzik 		 */
5004c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5005c6fd2807SJeff Garzik 
5006c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5009c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5010c6fd2807SJeff Garzik 
5011c6fd2807SJeff Garzik 		poll_next = 0;
5012c6fd2807SJeff Garzik 		break;
5013c6fd2807SJeff Garzik 	default:
5014c6fd2807SJeff Garzik 		poll_next = 0;
5015c6fd2807SJeff Garzik 		BUG();
5016c6fd2807SJeff Garzik 	}
5017c6fd2807SJeff Garzik 
5018c6fd2807SJeff Garzik 	return poll_next;
5019c6fd2807SJeff Garzik }
5020c6fd2807SJeff Garzik 
502165f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5022c6fd2807SJeff Garzik {
502365f27f38SDavid Howells 	struct ata_port *ap =
502465f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
502565f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5026c6fd2807SJeff Garzik 	u8 status;
5027c6fd2807SJeff Garzik 	int poll_next;
5028c6fd2807SJeff Garzik 
5029c6fd2807SJeff Garzik fsm_start:
5030c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5031c6fd2807SJeff Garzik 
5032c6fd2807SJeff Garzik 	/*
5033c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5034c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5035c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5036c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5037c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5038c6fd2807SJeff Garzik 	 */
5039c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5040c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5041c6fd2807SJeff Garzik 		msleep(2);
5042c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5043c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5044c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5045c6fd2807SJeff Garzik 			return;
5046c6fd2807SJeff Garzik 		}
5047c6fd2807SJeff Garzik 	}
5048c6fd2807SJeff Garzik 
5049c6fd2807SJeff Garzik 	/* move the HSM */
5050c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5051c6fd2807SJeff Garzik 
5052c6fd2807SJeff Garzik 	/* another command or interrupt handler
5053c6fd2807SJeff Garzik 	 * may be running at this point.
5054c6fd2807SJeff Garzik 	 */
5055c6fd2807SJeff Garzik 	if (poll_next)
5056c6fd2807SJeff Garzik 		goto fsm_start;
5057c6fd2807SJeff Garzik }
5058c6fd2807SJeff Garzik 
5059c6fd2807SJeff Garzik /**
5060c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5061c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5062c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5063c6fd2807SJeff Garzik  *
5064c6fd2807SJeff Garzik  *	LOCKING:
5065c6fd2807SJeff Garzik  *	None.
5066c6fd2807SJeff Garzik  */
5067c6fd2807SJeff Garzik 
5068c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5069c6fd2807SJeff Garzik {
5070c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5071c6fd2807SJeff Garzik 	unsigned int i;
5072c6fd2807SJeff Garzik 
5073c6fd2807SJeff Garzik 	/* no command while frozen */
5074c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5075c6fd2807SJeff Garzik 		return NULL;
5076c6fd2807SJeff Garzik 
5077c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5078c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5079c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5080c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5081c6fd2807SJeff Garzik 			break;
5082c6fd2807SJeff Garzik 		}
5083c6fd2807SJeff Garzik 
5084c6fd2807SJeff Garzik 	if (qc)
5085c6fd2807SJeff Garzik 		qc->tag = i;
5086c6fd2807SJeff Garzik 
5087c6fd2807SJeff Garzik 	return qc;
5088c6fd2807SJeff Garzik }
5089c6fd2807SJeff Garzik 
5090c6fd2807SJeff Garzik /**
5091c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5092c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5093c6fd2807SJeff Garzik  *
5094c6fd2807SJeff Garzik  *	LOCKING:
5095c6fd2807SJeff Garzik  *	None.
5096c6fd2807SJeff Garzik  */
5097c6fd2807SJeff Garzik 
5098c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5099c6fd2807SJeff Garzik {
5100c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5101c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5102c6fd2807SJeff Garzik 
5103c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5104c6fd2807SJeff Garzik 	if (qc) {
5105c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5106c6fd2807SJeff Garzik 		qc->ap = ap;
5107c6fd2807SJeff Garzik 		qc->dev = dev;
5108c6fd2807SJeff Garzik 
5109c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5110c6fd2807SJeff Garzik 	}
5111c6fd2807SJeff Garzik 
5112c6fd2807SJeff Garzik 	return qc;
5113c6fd2807SJeff Garzik }
5114c6fd2807SJeff Garzik 
5115c6fd2807SJeff Garzik /**
5116c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5117c6fd2807SJeff Garzik  *	@qc: Command to complete
5118c6fd2807SJeff Garzik  *
5119c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5120c6fd2807SJeff Garzik  *	in case something prevents using it.
5121c6fd2807SJeff Garzik  *
5122c6fd2807SJeff Garzik  *	LOCKING:
5123cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5124c6fd2807SJeff Garzik  */
5125c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5126c6fd2807SJeff Garzik {
5127c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5128c6fd2807SJeff Garzik 	unsigned int tag;
5129c6fd2807SJeff Garzik 
5130c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5131c6fd2807SJeff Garzik 
5132c6fd2807SJeff Garzik 	qc->flags = 0;
5133c6fd2807SJeff Garzik 	tag = qc->tag;
5134c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5135c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5136c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5137c6fd2807SJeff Garzik 	}
5138c6fd2807SJeff Garzik }
5139c6fd2807SJeff Garzik 
5140c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5141c6fd2807SJeff Garzik {
5142c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5143c6fd2807SJeff Garzik 
5144c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5145c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5146c6fd2807SJeff Garzik 
5147c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5148c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5149c6fd2807SJeff Garzik 
5150c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5151c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ)
5152c6fd2807SJeff Garzik 		ap->sactive &= ~(1 << qc->tag);
5153c6fd2807SJeff Garzik 	else
5154c6fd2807SJeff Garzik 		ap->active_tag = ATA_TAG_POISON;
5155c6fd2807SJeff Garzik 
5156c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5157c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5158c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5159c6fd2807SJeff Garzik 	 */
5160c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5161c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5162c6fd2807SJeff Garzik 
5163c6fd2807SJeff Garzik 	/* call completion callback */
5164c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5165c6fd2807SJeff Garzik }
5166c6fd2807SJeff Garzik 
516739599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
516839599a53STejun Heo {
516939599a53STejun Heo 	struct ata_port *ap = qc->ap;
517039599a53STejun Heo 
517139599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
51724742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
517339599a53STejun Heo }
517439599a53STejun Heo 
5175c6fd2807SJeff Garzik /**
5176c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5177c6fd2807SJeff Garzik  *	@qc: Command to complete
5178c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5179c6fd2807SJeff Garzik  *
5180c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5181c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5182c6fd2807SJeff Garzik  *
5183c6fd2807SJeff Garzik  *	LOCKING:
5184cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5185c6fd2807SJeff Garzik  */
5186c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5187c6fd2807SJeff Garzik {
5188c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5189c6fd2807SJeff Garzik 
5190c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5191c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5192c6fd2807SJeff Garzik 	 *
5193c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5194c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5195c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5196c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5197c6fd2807SJeff Garzik 	 *
5198c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5199c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5200c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5201c6fd2807SJeff Garzik 	 * taken care of.
5202c6fd2807SJeff Garzik 	 */
5203c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5204c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5207c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5208c6fd2807SJeff Garzik 
5209c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5210c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5211c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
521239599a53STejun Heo 				fill_result_tf(qc);
5213c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5214c6fd2807SJeff Garzik 				return;
5215c6fd2807SJeff Garzik 			}
5216c6fd2807SJeff Garzik 		}
5217c6fd2807SJeff Garzik 
5218c6fd2807SJeff Garzik 		/* read result TF if requested */
5219c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
522039599a53STejun Heo 			fill_result_tf(qc);
5221c6fd2807SJeff Garzik 
5222c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5223c6fd2807SJeff Garzik 	} else {
5224c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5225c6fd2807SJeff Garzik 			return;
5226c6fd2807SJeff Garzik 
5227c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5228c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
522939599a53STejun Heo 			fill_result_tf(qc);
5230c6fd2807SJeff Garzik 
5231c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5232c6fd2807SJeff Garzik 	}
5233c6fd2807SJeff Garzik }
5234c6fd2807SJeff Garzik 
5235c6fd2807SJeff Garzik /**
5236c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5237c6fd2807SJeff Garzik  *	@ap: port in question
5238c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5239c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5240c6fd2807SJeff Garzik  *
5241c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5242c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5243c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5244c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5245c6fd2807SJeff Garzik  *
5246c6fd2807SJeff Garzik  *	LOCKING:
5247cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5248c6fd2807SJeff Garzik  *
5249c6fd2807SJeff Garzik  *	RETURNS:
5250c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5251c6fd2807SJeff Garzik  */
5252c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5253c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5254c6fd2807SJeff Garzik {
5255c6fd2807SJeff Garzik 	int nr_done = 0;
5256c6fd2807SJeff Garzik 	u32 done_mask;
5257c6fd2807SJeff Garzik 	int i;
5258c6fd2807SJeff Garzik 
5259c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5260c6fd2807SJeff Garzik 
5261c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5262c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5263c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5264c6fd2807SJeff Garzik 		return -EINVAL;
5265c6fd2807SJeff Garzik 	}
5266c6fd2807SJeff Garzik 
5267c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5268c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5269c6fd2807SJeff Garzik 
5270c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5271c6fd2807SJeff Garzik 			continue;
5272c6fd2807SJeff Garzik 
5273c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5274c6fd2807SJeff Garzik 			if (finish_qc)
5275c6fd2807SJeff Garzik 				finish_qc(qc);
5276c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5277c6fd2807SJeff Garzik 			nr_done++;
5278c6fd2807SJeff Garzik 		}
5279c6fd2807SJeff Garzik 	}
5280c6fd2807SJeff Garzik 
5281c6fd2807SJeff Garzik 	return nr_done;
5282c6fd2807SJeff Garzik }
5283c6fd2807SJeff Garzik 
5284c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5285c6fd2807SJeff Garzik {
5286c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5287c6fd2807SJeff Garzik 
5288c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5289c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5290c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5291c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5292c6fd2807SJeff Garzik 		return 1;
5293c6fd2807SJeff Garzik 
5294c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5295c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5296c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5297c6fd2807SJeff Garzik 			return 1;
5298c6fd2807SJeff Garzik 
5299c6fd2807SJeff Garzik 		/* fall through */
5300c6fd2807SJeff Garzik 
5301c6fd2807SJeff Garzik 	default:
5302c6fd2807SJeff Garzik 		return 0;
5303c6fd2807SJeff Garzik 	}
5304c6fd2807SJeff Garzik 
5305c6fd2807SJeff Garzik 	/* never reached */
5306c6fd2807SJeff Garzik }
5307c6fd2807SJeff Garzik 
5308c6fd2807SJeff Garzik /**
5309c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5310c6fd2807SJeff Garzik  *	@qc: command to issue to device
5311c6fd2807SJeff Garzik  *
5312c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5313c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5314c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5315c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5316c6fd2807SJeff Garzik  *
5317c6fd2807SJeff Garzik  *	LOCKING:
5318cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5319c6fd2807SJeff Garzik  */
5320c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5321c6fd2807SJeff Garzik {
5322c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5323c6fd2807SJeff Garzik 
5324c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5325c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5326c6fd2807SJeff Garzik 	 * request ATAPI sense.
5327c6fd2807SJeff Garzik 	 */
5328c6fd2807SJeff Garzik 	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5329c6fd2807SJeff Garzik 
5330c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5331c6fd2807SJeff Garzik 		WARN_ON(ap->sactive & (1 << qc->tag));
5332c6fd2807SJeff Garzik 		ap->sactive |= 1 << qc->tag;
5333c6fd2807SJeff Garzik 	} else {
5334c6fd2807SJeff Garzik 		WARN_ON(ap->sactive);
5335c6fd2807SJeff Garzik 		ap->active_tag = qc->tag;
5336c6fd2807SJeff Garzik 	}
5337c6fd2807SJeff Garzik 
5338c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5339c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5340c6fd2807SJeff Garzik 
5341c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5342c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5343c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5344c6fd2807SJeff Garzik 				goto sg_err;
5345c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5346c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5347c6fd2807SJeff Garzik 				goto sg_err;
5348c6fd2807SJeff Garzik 		}
5349c6fd2807SJeff Garzik 	} else {
5350c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5351c6fd2807SJeff Garzik 	}
5352c6fd2807SJeff Garzik 
5353c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5354c6fd2807SJeff Garzik 
5355c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5356c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5357c6fd2807SJeff Garzik 		goto err;
5358c6fd2807SJeff Garzik 	return;
5359c6fd2807SJeff Garzik 
5360c6fd2807SJeff Garzik sg_err:
5361c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5362c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5363c6fd2807SJeff Garzik err:
5364c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5365c6fd2807SJeff Garzik }
5366c6fd2807SJeff Garzik 
5367c6fd2807SJeff Garzik /**
5368c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5369c6fd2807SJeff Garzik  *	@qc: command to issue to device
5370c6fd2807SJeff Garzik  *
5371c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5372c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5373c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5374c6fd2807SJeff Garzik  *	is slightly different.
5375c6fd2807SJeff Garzik  *
5376c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5377c6fd2807SJeff Garzik  *
5378c6fd2807SJeff Garzik  *	LOCKING:
5379cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5380c6fd2807SJeff Garzik  *
5381c6fd2807SJeff Garzik  *	RETURNS:
5382c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5383c6fd2807SJeff Garzik  */
5384c6fd2807SJeff Garzik 
5385c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5386c6fd2807SJeff Garzik {
5387c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5388c6fd2807SJeff Garzik 
5389c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5390c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5391c6fd2807SJeff Garzik 	 */
5392c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5393c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5394c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5395e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5396c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5397c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5398c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5399c6fd2807SJeff Garzik 			break;
5400c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5401c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5402c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5403c6fd2807SJeff Garzik 				BUG();
5404c6fd2807SJeff Garzik 			break;
5405c6fd2807SJeff Garzik 		default:
5406c6fd2807SJeff Garzik 			break;
5407c6fd2807SJeff Garzik 		}
5408c6fd2807SJeff Garzik 	}
5409c6fd2807SJeff Garzik 
5410c6fd2807SJeff Garzik 	/* select the device */
5411c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5412c6fd2807SJeff Garzik 
5413c6fd2807SJeff Garzik 	/* start the command */
5414c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5415c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5416c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5417c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5418c6fd2807SJeff Garzik 
5419c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5420c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5421c6fd2807SJeff Garzik 
5422c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5423c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5424c6fd2807SJeff Garzik 
5425c6fd2807SJeff Garzik 		break;
5426c6fd2807SJeff Garzik 
5427c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5428c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5429c6fd2807SJeff Garzik 
5430c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5431c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5432c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5433c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5434c6fd2807SJeff Garzik 		break;
5435c6fd2807SJeff Garzik 
5436c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5437c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5438c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5439c6fd2807SJeff Garzik 
5440c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5441c6fd2807SJeff Garzik 
5442c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5443c6fd2807SJeff Garzik 			/* PIO data out protocol */
5444c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5445c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5446c6fd2807SJeff Garzik 
5447c6fd2807SJeff Garzik 			/* always send first data block using
5448c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5449c6fd2807SJeff Garzik 			 */
5450c6fd2807SJeff Garzik 		} else {
5451c6fd2807SJeff Garzik 			/* PIO data in protocol */
5452c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5453c6fd2807SJeff Garzik 
5454c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5455c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5456c6fd2807SJeff Garzik 
5457c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5458c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5459c6fd2807SJeff Garzik 			 */
5460c6fd2807SJeff Garzik 		}
5461c6fd2807SJeff Garzik 
5462c6fd2807SJeff Garzik 		break;
5463c6fd2807SJeff Garzik 
5464c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5465c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5466c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5467c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5468c6fd2807SJeff Garzik 
5469c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5470c6fd2807SJeff Garzik 
5471c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5472c6fd2807SJeff Garzik 
5473c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5474c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5475c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5476c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5477c6fd2807SJeff Garzik 		break;
5478c6fd2807SJeff Garzik 
5479c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5480c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5481c6fd2807SJeff Garzik 
5482c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5483c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5484c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5485c6fd2807SJeff Garzik 
5486c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5487c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5488c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5489c6fd2807SJeff Garzik 		break;
5490c6fd2807SJeff Garzik 
5491c6fd2807SJeff Garzik 	default:
5492c6fd2807SJeff Garzik 		WARN_ON(1);
5493c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5494c6fd2807SJeff Garzik 	}
5495c6fd2807SJeff Garzik 
5496c6fd2807SJeff Garzik 	return 0;
5497c6fd2807SJeff Garzik }
5498c6fd2807SJeff Garzik 
5499c6fd2807SJeff Garzik /**
5500c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5501c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5502c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5503c6fd2807SJeff Garzik  *
5504c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5505c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5506c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5507c6fd2807SJeff Garzik  *
5508c6fd2807SJeff Garzik  *	LOCKING:
5509cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5510c6fd2807SJeff Garzik  *
5511c6fd2807SJeff Garzik  *	RETURNS:
5512c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5513c6fd2807SJeff Garzik  */
5514c6fd2807SJeff Garzik 
5515c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap,
5516c6fd2807SJeff Garzik 				   struct ata_queued_cmd *qc)
5517c6fd2807SJeff Garzik {
5518ea54763fSTejun Heo 	struct ata_eh_info *ehi = &ap->eh_info;
5519c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5520c6fd2807SJeff Garzik 
5521c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
552244877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5523c6fd2807SJeff Garzik 
5524c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5525c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5526c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5527c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5528c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5529c6fd2807SJeff Garzik 		 */
5530c6fd2807SJeff Garzik 
5531c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5532c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5533c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5534c6fd2807SJeff Garzik 		 */
5535c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5536c6fd2807SJeff Garzik 			goto idle_irq;
5537c6fd2807SJeff Garzik 		break;
5538c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5539c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5540c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5541c6fd2807SJeff Garzik 			/* check status of DMA engine */
5542c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
554344877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
554444877b4eSTejun Heo 				ap->print_id, host_stat);
5545c6fd2807SJeff Garzik 
5546c6fd2807SJeff Garzik 			/* if it's not our irq... */
5547c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5548c6fd2807SJeff Garzik 				goto idle_irq;
5549c6fd2807SJeff Garzik 
5550c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5551c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5552c6fd2807SJeff Garzik 
5553c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5554c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5555c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5556c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5557c6fd2807SJeff Garzik 			}
5558c6fd2807SJeff Garzik 		}
5559c6fd2807SJeff Garzik 		break;
5560c6fd2807SJeff Garzik 	case HSM_ST:
5561c6fd2807SJeff Garzik 		break;
5562c6fd2807SJeff Garzik 	default:
5563c6fd2807SJeff Garzik 		goto idle_irq;
5564c6fd2807SJeff Garzik 	}
5565c6fd2807SJeff Garzik 
5566c6fd2807SJeff Garzik 	/* check altstatus */
5567c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5568c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5569c6fd2807SJeff Garzik 		goto idle_irq;
5570c6fd2807SJeff Garzik 
5571c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5572c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5573c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5574c6fd2807SJeff Garzik 		goto idle_irq;
5575c6fd2807SJeff Garzik 
5576c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5577c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5578c6fd2807SJeff Garzik 
5579c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5580ea54763fSTejun Heo 
5581ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5582ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5583ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5584ea54763fSTejun Heo 
5585c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5586c6fd2807SJeff Garzik 
5587c6fd2807SJeff Garzik idle_irq:
5588c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5589c6fd2807SJeff Garzik 
5590c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5591c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
559283625006SAkira Iguchi 		ap->ops->irq_ack(ap, 0); /* debug trap */
5593c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5594c6fd2807SJeff Garzik 		return 1;
5595c6fd2807SJeff Garzik 	}
5596c6fd2807SJeff Garzik #endif
5597c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5598c6fd2807SJeff Garzik }
5599c6fd2807SJeff Garzik 
5600c6fd2807SJeff Garzik /**
5601c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
5602c6fd2807SJeff Garzik  *	@irq: irq line (unused)
5603cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
5604c6fd2807SJeff Garzik  *
5605c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
5606c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
5607c6fd2807SJeff Garzik  *
5608c6fd2807SJeff Garzik  *	LOCKING:
5609cca3974eSJeff Garzik  *	Obtains host lock during operation.
5610c6fd2807SJeff Garzik  *
5611c6fd2807SJeff Garzik  *	RETURNS:
5612c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
5613c6fd2807SJeff Garzik  */
5614c6fd2807SJeff Garzik 
56157d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance)
5616c6fd2807SJeff Garzik {
5617cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
5618c6fd2807SJeff Garzik 	unsigned int i;
5619c6fd2807SJeff Garzik 	unsigned int handled = 0;
5620c6fd2807SJeff Garzik 	unsigned long flags;
5621c6fd2807SJeff Garzik 
5622c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5623cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
5624c6fd2807SJeff Garzik 
5625cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5626c6fd2807SJeff Garzik 		struct ata_port *ap;
5627c6fd2807SJeff Garzik 
5628cca3974eSJeff Garzik 		ap = host->ports[i];
5629c6fd2807SJeff Garzik 		if (ap &&
5630c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5631c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
5632c6fd2807SJeff Garzik 
5633c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, ap->active_tag);
5634c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5635c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5636c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
5637c6fd2807SJeff Garzik 		}
5638c6fd2807SJeff Garzik 	}
5639c6fd2807SJeff Garzik 
5640cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
5641c6fd2807SJeff Garzik 
5642c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
5643c6fd2807SJeff Garzik }
5644c6fd2807SJeff Garzik 
5645c6fd2807SJeff Garzik /**
5646c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
5647c6fd2807SJeff Garzik  *	@ap: ATA port to test SCR accessibility for
5648c6fd2807SJeff Garzik  *
5649c6fd2807SJeff Garzik  *	Test whether SCRs are accessible for @ap.
5650c6fd2807SJeff Garzik  *
5651c6fd2807SJeff Garzik  *	LOCKING:
5652c6fd2807SJeff Garzik  *	None.
5653c6fd2807SJeff Garzik  *
5654c6fd2807SJeff Garzik  *	RETURNS:
5655c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
5656c6fd2807SJeff Garzik  */
5657c6fd2807SJeff Garzik int sata_scr_valid(struct ata_port *ap)
5658c6fd2807SJeff Garzik {
5659c6fd2807SJeff Garzik 	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5660c6fd2807SJeff Garzik }
5661c6fd2807SJeff Garzik 
5662c6fd2807SJeff Garzik /**
5663c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
5664c6fd2807SJeff Garzik  *	@ap: ATA port to read SCR for
5665c6fd2807SJeff Garzik  *	@reg: SCR to read
5666c6fd2807SJeff Garzik  *	@val: Place to store read value
5667c6fd2807SJeff Garzik  *
5668c6fd2807SJeff Garzik  *	Read SCR register @reg of @ap into *@val.  This function is
5669c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5670c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5671c6fd2807SJeff Garzik  *
5672c6fd2807SJeff Garzik  *	LOCKING:
5673c6fd2807SJeff Garzik  *	None.
5674c6fd2807SJeff Garzik  *
5675c6fd2807SJeff Garzik  *	RETURNS:
5676c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5677c6fd2807SJeff Garzik  */
5678c6fd2807SJeff Garzik int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5679c6fd2807SJeff Garzik {
5680c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5681c6fd2807SJeff Garzik 		*val = ap->ops->scr_read(ap, reg);
5682c6fd2807SJeff Garzik 		return 0;
5683c6fd2807SJeff Garzik 	}
5684c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5685c6fd2807SJeff Garzik }
5686c6fd2807SJeff Garzik 
5687c6fd2807SJeff Garzik /**
5688c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
5689c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5690c6fd2807SJeff Garzik  *	@reg: SCR to write
5691c6fd2807SJeff Garzik  *	@val: value to write
5692c6fd2807SJeff Garzik  *
5693c6fd2807SJeff Garzik  *	Write @val to SCR register @reg of @ap.  This function is
5694c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5695c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5696c6fd2807SJeff Garzik  *
5697c6fd2807SJeff Garzik  *	LOCKING:
5698c6fd2807SJeff Garzik  *	None.
5699c6fd2807SJeff Garzik  *
5700c6fd2807SJeff Garzik  *	RETURNS:
5701c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5702c6fd2807SJeff Garzik  */
5703c6fd2807SJeff Garzik int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5704c6fd2807SJeff Garzik {
5705c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5706c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5707c6fd2807SJeff Garzik 		return 0;
5708c6fd2807SJeff Garzik 	}
5709c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5710c6fd2807SJeff Garzik }
5711c6fd2807SJeff Garzik 
5712c6fd2807SJeff Garzik /**
5713c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
5714c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5715c6fd2807SJeff Garzik  *	@reg: SCR to write
5716c6fd2807SJeff Garzik  *	@val: value to write
5717c6fd2807SJeff Garzik  *
5718c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
5719c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
5720c6fd2807SJeff Garzik  *
5721c6fd2807SJeff Garzik  *	LOCKING:
5722c6fd2807SJeff Garzik  *	None.
5723c6fd2807SJeff Garzik  *
5724c6fd2807SJeff Garzik  *	RETURNS:
5725c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5726c6fd2807SJeff Garzik  */
5727c6fd2807SJeff Garzik int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5728c6fd2807SJeff Garzik {
5729c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5730c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5731c6fd2807SJeff Garzik 		ap->ops->scr_read(ap, reg);
5732c6fd2807SJeff Garzik 		return 0;
5733c6fd2807SJeff Garzik 	}
5734c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5735c6fd2807SJeff Garzik }
5736c6fd2807SJeff Garzik 
5737c6fd2807SJeff Garzik /**
5738c6fd2807SJeff Garzik  *	ata_port_online - test whether the given port is online
5739c6fd2807SJeff Garzik  *	@ap: ATA port to test
5740c6fd2807SJeff Garzik  *
5741c6fd2807SJeff Garzik  *	Test whether @ap is online.  Note that this function returns 0
5742c6fd2807SJeff Garzik  *	if online status of @ap cannot be obtained, so
5743c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5744c6fd2807SJeff Garzik  *
5745c6fd2807SJeff Garzik  *	LOCKING:
5746c6fd2807SJeff Garzik  *	None.
5747c6fd2807SJeff Garzik  *
5748c6fd2807SJeff Garzik  *	RETURNS:
5749c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
5750c6fd2807SJeff Garzik  */
5751c6fd2807SJeff Garzik int ata_port_online(struct ata_port *ap)
5752c6fd2807SJeff Garzik {
5753c6fd2807SJeff Garzik 	u32 sstatus;
5754c6fd2807SJeff Garzik 
5755c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5756c6fd2807SJeff Garzik 		return 1;
5757c6fd2807SJeff Garzik 	return 0;
5758c6fd2807SJeff Garzik }
5759c6fd2807SJeff Garzik 
5760c6fd2807SJeff Garzik /**
5761c6fd2807SJeff Garzik  *	ata_port_offline - test whether the given port is offline
5762c6fd2807SJeff Garzik  *	@ap: ATA port to test
5763c6fd2807SJeff Garzik  *
5764c6fd2807SJeff Garzik  *	Test whether @ap is offline.  Note that this function returns
5765c6fd2807SJeff Garzik  *	0 if offline status of @ap cannot be obtained, so
5766c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5767c6fd2807SJeff Garzik  *
5768c6fd2807SJeff Garzik  *	LOCKING:
5769c6fd2807SJeff Garzik  *	None.
5770c6fd2807SJeff Garzik  *
5771c6fd2807SJeff Garzik  *	RETURNS:
5772c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
5773c6fd2807SJeff Garzik  */
5774c6fd2807SJeff Garzik int ata_port_offline(struct ata_port *ap)
5775c6fd2807SJeff Garzik {
5776c6fd2807SJeff Garzik 	u32 sstatus;
5777c6fd2807SJeff Garzik 
5778c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5779c6fd2807SJeff Garzik 		return 1;
5780c6fd2807SJeff Garzik 	return 0;
5781c6fd2807SJeff Garzik }
5782c6fd2807SJeff Garzik 
5783c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
5784c6fd2807SJeff Garzik {
5785c6fd2807SJeff Garzik 	unsigned int err_mask;
5786c6fd2807SJeff Garzik 	u8 cmd;
5787c6fd2807SJeff Garzik 
5788c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
5789c6fd2807SJeff Garzik 		return 0;
5790c6fd2807SJeff Garzik 
57916fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5792c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
5793c6fd2807SJeff Garzik 	else
5794c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
5795c6fd2807SJeff Garzik 
5796c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
5797c6fd2807SJeff Garzik 	if (err_mask) {
5798c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5799c6fd2807SJeff Garzik 		return -EIO;
5800c6fd2807SJeff Garzik 	}
5801c6fd2807SJeff Garzik 
5802c6fd2807SJeff Garzik 	return 0;
5803c6fd2807SJeff Garzik }
5804c6fd2807SJeff Garzik 
58056ffa01d8STejun Heo #ifdef CONFIG_PM
5806cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5807cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
5808cca3974eSJeff Garzik 			       int wait)
5809c6fd2807SJeff Garzik {
5810c6fd2807SJeff Garzik 	unsigned long flags;
5811c6fd2807SJeff Garzik 	int i, rc;
5812c6fd2807SJeff Garzik 
5813cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5814cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
5815c6fd2807SJeff Garzik 
5816c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
5817c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
5818c6fd2807SJeff Garzik 		 */
5819c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5820c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5821c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5822c6fd2807SJeff Garzik 		}
5823c6fd2807SJeff Garzik 
5824c6fd2807SJeff Garzik 		/* request PM ops to EH */
5825c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
5826c6fd2807SJeff Garzik 
5827c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
5828c6fd2807SJeff Garzik 		if (wait) {
5829c6fd2807SJeff Garzik 			rc = 0;
5830c6fd2807SJeff Garzik 			ap->pm_result = &rc;
5831c6fd2807SJeff Garzik 		}
5832c6fd2807SJeff Garzik 
5833c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5834c6fd2807SJeff Garzik 		ap->eh_info.action |= action;
5835c6fd2807SJeff Garzik 		ap->eh_info.flags |= ehi_flags;
5836c6fd2807SJeff Garzik 
5837c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
5838c6fd2807SJeff Garzik 
5839c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
5840c6fd2807SJeff Garzik 
5841c6fd2807SJeff Garzik 		/* wait and check result */
5842c6fd2807SJeff Garzik 		if (wait) {
5843c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5844c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5845c6fd2807SJeff Garzik 			if (rc)
5846c6fd2807SJeff Garzik 				return rc;
5847c6fd2807SJeff Garzik 		}
5848c6fd2807SJeff Garzik 	}
5849c6fd2807SJeff Garzik 
5850c6fd2807SJeff Garzik 	return 0;
5851c6fd2807SJeff Garzik }
5852c6fd2807SJeff Garzik 
5853c6fd2807SJeff Garzik /**
5854cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
5855cca3974eSJeff Garzik  *	@host: host to suspend
5856c6fd2807SJeff Garzik  *	@mesg: PM message
5857c6fd2807SJeff Garzik  *
5858cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
5859c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
5860c6fd2807SJeff Garzik  *	to finish.
5861c6fd2807SJeff Garzik  *
5862c6fd2807SJeff Garzik  *	LOCKING:
5863c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5864c6fd2807SJeff Garzik  *
5865c6fd2807SJeff Garzik  *	RETURNS:
5866c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
5867c6fd2807SJeff Garzik  */
5868cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5869c6fd2807SJeff Garzik {
58709666f400STejun Heo 	int rc;
5871c6fd2807SJeff Garzik 
5872cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
58739666f400STejun Heo 	if (rc == 0)
5874cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
5875c6fd2807SJeff Garzik 	return rc;
5876c6fd2807SJeff Garzik }
5877c6fd2807SJeff Garzik 
5878c6fd2807SJeff Garzik /**
5879cca3974eSJeff Garzik  *	ata_host_resume - resume host
5880cca3974eSJeff Garzik  *	@host: host to resume
5881c6fd2807SJeff Garzik  *
5882cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
5883c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
5884c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
5885c6fd2807SJeff Garzik  *
5886c6fd2807SJeff Garzik  *	LOCKING:
5887c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5888c6fd2807SJeff Garzik  */
5889cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
5890c6fd2807SJeff Garzik {
5891cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5892c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5893cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
5894c6fd2807SJeff Garzik }
58956ffa01d8STejun Heo #endif
5896c6fd2807SJeff Garzik 
5897c6fd2807SJeff Garzik /**
5898c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
5899c6fd2807SJeff Garzik  *	@ap: Port to initialize
5900c6fd2807SJeff Garzik  *
5901c6fd2807SJeff Garzik  *	Called just after data structures for each port are
5902c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
5903c6fd2807SJeff Garzik  *
5904c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
5905c6fd2807SJeff Garzik  *
5906c6fd2807SJeff Garzik  *	LOCKING:
5907c6fd2807SJeff Garzik  *	Inherited from caller.
5908c6fd2807SJeff Garzik  */
5909c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
5910c6fd2807SJeff Garzik {
5911c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
5912c6fd2807SJeff Garzik 	int rc;
5913c6fd2807SJeff Garzik 
5914f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5915f0d36efdSTejun Heo 				      GFP_KERNEL);
5916c6fd2807SJeff Garzik 	if (!ap->prd)
5917c6fd2807SJeff Garzik 		return -ENOMEM;
5918c6fd2807SJeff Garzik 
5919c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
5920f0d36efdSTejun Heo 	if (rc)
5921c6fd2807SJeff Garzik 		return rc;
5922c6fd2807SJeff Garzik 
5923f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5924f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
5925c6fd2807SJeff Garzik 	return 0;
5926c6fd2807SJeff Garzik }
5927c6fd2807SJeff Garzik 
5928c6fd2807SJeff Garzik /**
5929c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
5930c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
5931c6fd2807SJeff Garzik  *
5932c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
5933c6fd2807SJeff Garzik  *
5934c6fd2807SJeff Garzik  *	LOCKING:
5935c6fd2807SJeff Garzik  *	Inherited from caller.
5936c6fd2807SJeff Garzik  */
5937c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
5938c6fd2807SJeff Garzik {
5939c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5940c6fd2807SJeff Garzik 	unsigned long flags;
5941c6fd2807SJeff Garzik 
5942c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
5943c6fd2807SJeff Garzik 	ap->sata_spd_limit = ap->hw_sata_spd_limit;
5944c6fd2807SJeff Garzik 
5945c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
5946c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
5947cca3974eSJeff Garzik 	 * host lock.
5948c6fd2807SJeff Garzik 	 */
5949c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5950c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5951c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
5952c6fd2807SJeff Garzik 
5953c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5954c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5955c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
5956c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
5957c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
5958c6fd2807SJeff Garzik }
5959c6fd2807SJeff Garzik 
5960c6fd2807SJeff Garzik /**
5961f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
5962f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
5963c6fd2807SJeff Garzik  *
5964f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
5965f3187195STejun Heo  *
5966f3187195STejun Heo  *	RETURNS:
5967f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
5968c6fd2807SJeff Garzik  *
5969c6fd2807SJeff Garzik  *	LOCKING:
5970f3187195STejun Heo  *	Inherited from calling layer (may sleep).
5971c6fd2807SJeff Garzik  */
5972f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
5973c6fd2807SJeff Garzik {
5974f3187195STejun Heo 	struct ata_port *ap;
5975c6fd2807SJeff Garzik 	unsigned int i;
5976c6fd2807SJeff Garzik 
5977f3187195STejun Heo 	DPRINTK("ENTER\n");
5978f3187195STejun Heo 
5979f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5980f3187195STejun Heo 	if (!ap)
5981f3187195STejun Heo 		return NULL;
5982f3187195STejun Heo 
5983f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5984cca3974eSJeff Garzik 	ap->lock = &host->lock;
5985c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
5986f3187195STejun Heo 	ap->print_id = -1;
5987c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
5988cca3974eSJeff Garzik 	ap->host = host;
5989f3187195STejun Heo 	ap->dev = host->dev;
5990f3187195STejun Heo 
5991c6fd2807SJeff Garzik 	ap->hw_sata_spd_limit = UINT_MAX;
5992c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
5993c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
5994c6fd2807SJeff Garzik 
5995c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
5996c6fd2807SJeff Garzik 	/* turn on all debugging levels */
5997c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
5998c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
5999c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6000c6fd2807SJeff Garzik #else
6001c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6002c6fd2807SJeff Garzik #endif
6003c6fd2807SJeff Garzik 
600465f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
600565f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
600665f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6007c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6008c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
6009c6fd2807SJeff Garzik 
6010c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6011c6fd2807SJeff Garzik 
6012c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
6013c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
6014c6fd2807SJeff Garzik 		dev->ap = ap;
6015c6fd2807SJeff Garzik 		dev->devno = i;
6016c6fd2807SJeff Garzik 		ata_dev_init(dev);
6017c6fd2807SJeff Garzik 	}
6018c6fd2807SJeff Garzik 
6019c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6020c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6021c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6022c6fd2807SJeff Garzik #endif
6023c6fd2807SJeff Garzik 	return ap;
6024c6fd2807SJeff Garzik }
6025c6fd2807SJeff Garzik 
6026f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6027f0d36efdSTejun Heo {
6028f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6029f0d36efdSTejun Heo 	int i;
6030f0d36efdSTejun Heo 
6031f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6032f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6033f0d36efdSTejun Heo 
6034ecef7253STejun Heo 		if (!ap)
6035ecef7253STejun Heo 			continue;
6036ecef7253STejun Heo 
6037ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6038f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6039f0d36efdSTejun Heo 	}
6040f0d36efdSTejun Heo 
6041ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6042f0d36efdSTejun Heo 		host->ops->host_stop(host);
60431aa56ccaSTejun Heo 
60441aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
60451aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
60461aa506e4STejun Heo 
60474911487aSTejun Heo 		if (!ap)
60484911487aSTejun Heo 			continue;
60494911487aSTejun Heo 
60504911487aSTejun Heo 		if (ap->scsi_host)
60511aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
60521aa506e4STejun Heo 
60534911487aSTejun Heo 		kfree(ap);
60541aa506e4STejun Heo 		host->ports[i] = NULL;
60551aa506e4STejun Heo 	}
60561aa506e4STejun Heo 
60571aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6058f0d36efdSTejun Heo }
6059f0d36efdSTejun Heo 
6060c6fd2807SJeff Garzik /**
6061f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6062f3187195STejun Heo  *	@dev: generic device this host is associated with
6063f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6064f3187195STejun Heo  *
6065f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6066f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6067f3187195STejun Heo  *	attaches it using ata_host_register().
6068f3187195STejun Heo  *
6069f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6070f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6071f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6072f3187195STejun Heo  *	ports will be automatically freed on registration.
6073f3187195STejun Heo  *
6074f3187195STejun Heo  *	RETURNS:
6075f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6076f3187195STejun Heo  *
6077f3187195STejun Heo  *	LOCKING:
6078f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6079f3187195STejun Heo  */
6080f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6081f3187195STejun Heo {
6082f3187195STejun Heo 	struct ata_host *host;
6083f3187195STejun Heo 	size_t sz;
6084f3187195STejun Heo 	int i;
6085f3187195STejun Heo 
6086f3187195STejun Heo 	DPRINTK("ENTER\n");
6087f3187195STejun Heo 
6088f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6089f3187195STejun Heo 		return NULL;
6090f3187195STejun Heo 
6091f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6092f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6093f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6094f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6095f3187195STejun Heo 	if (!host)
6096f3187195STejun Heo 		goto err_out;
6097f3187195STejun Heo 
6098f3187195STejun Heo 	devres_add(dev, host);
6099f3187195STejun Heo 	dev_set_drvdata(dev, host);
6100f3187195STejun Heo 
6101f3187195STejun Heo 	spin_lock_init(&host->lock);
6102f3187195STejun Heo 	host->dev = dev;
6103f3187195STejun Heo 	host->n_ports = max_ports;
6104f3187195STejun Heo 
6105f3187195STejun Heo 	/* allocate ports bound to this host */
6106f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6107f3187195STejun Heo 		struct ata_port *ap;
6108f3187195STejun Heo 
6109f3187195STejun Heo 		ap = ata_port_alloc(host);
6110f3187195STejun Heo 		if (!ap)
6111f3187195STejun Heo 			goto err_out;
6112f3187195STejun Heo 
6113f3187195STejun Heo 		ap->port_no = i;
6114f3187195STejun Heo 		host->ports[i] = ap;
6115f3187195STejun Heo 	}
6116f3187195STejun Heo 
6117f3187195STejun Heo 	devres_remove_group(dev, NULL);
6118f3187195STejun Heo 	return host;
6119f3187195STejun Heo 
6120f3187195STejun Heo  err_out:
6121f3187195STejun Heo 	devres_release_group(dev, NULL);
6122f3187195STejun Heo 	return NULL;
6123f3187195STejun Heo }
6124f3187195STejun Heo 
6125f3187195STejun Heo /**
6126f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6127f5cda257STejun Heo  *	@dev: generic device this host is associated with
6128f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6129f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6130f5cda257STejun Heo  *
6131f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6132f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6133f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6134f5cda257STejun Heo  *
6135f5cda257STejun Heo  *	RETURNS:
6136f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6137f5cda257STejun Heo  *
6138f5cda257STejun Heo  *	LOCKING:
6139f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6140f5cda257STejun Heo  */
6141f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6142f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6143f5cda257STejun Heo 				      int n_ports)
6144f5cda257STejun Heo {
6145f5cda257STejun Heo 	const struct ata_port_info *pi;
6146f5cda257STejun Heo 	struct ata_host *host;
6147f5cda257STejun Heo 	int i, j;
6148f5cda257STejun Heo 
6149f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6150f5cda257STejun Heo 	if (!host)
6151f5cda257STejun Heo 		return NULL;
6152f5cda257STejun Heo 
6153f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6154f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6155f5cda257STejun Heo 
6156f5cda257STejun Heo 		if (ppi[j])
6157f5cda257STejun Heo 			pi = ppi[j++];
6158f5cda257STejun Heo 
6159f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6160f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6161f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6162f5cda257STejun Heo 		ap->flags |= pi->flags;
6163f5cda257STejun Heo 		ap->ops = pi->port_ops;
6164f5cda257STejun Heo 
6165f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6166f5cda257STejun Heo 			host->ops = pi->port_ops;
6167f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6168f5cda257STejun Heo 			host->private_data = pi->private_data;
6169f5cda257STejun Heo 	}
6170f5cda257STejun Heo 
6171f5cda257STejun Heo 	return host;
6172f5cda257STejun Heo }
6173f5cda257STejun Heo 
6174f5cda257STejun Heo /**
6175ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6176ecef7253STejun Heo  *	@host: ATA host to start ports for
6177ecef7253STejun Heo  *
6178ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6179ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6180ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6181f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6182f3187195STejun Heo  *	first non-dummy port ops.
6183ecef7253STejun Heo  *
6184ecef7253STejun Heo  *	LOCKING:
6185ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6186ecef7253STejun Heo  *
6187ecef7253STejun Heo  *	RETURNS:
6188ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6189ecef7253STejun Heo  */
6190ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6191ecef7253STejun Heo {
6192ecef7253STejun Heo 	int i, rc;
6193ecef7253STejun Heo 
6194ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6195ecef7253STejun Heo 		return 0;
6196ecef7253STejun Heo 
6197ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6198ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6199ecef7253STejun Heo 
6200f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6201f3187195STejun Heo 			host->ops = ap->ops;
6202f3187195STejun Heo 
6203ecef7253STejun Heo 		if (ap->ops->port_start) {
6204ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6205ecef7253STejun Heo 			if (rc) {
6206ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6207ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6208ecef7253STejun Heo 				goto err_out;
6209ecef7253STejun Heo 			}
6210ecef7253STejun Heo 		}
6211ecef7253STejun Heo 
6212ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6213ecef7253STejun Heo 	}
6214ecef7253STejun Heo 
6215ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6216ecef7253STejun Heo 	return 0;
6217ecef7253STejun Heo 
6218ecef7253STejun Heo  err_out:
6219ecef7253STejun Heo 	while (--i >= 0) {
6220ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6221ecef7253STejun Heo 
6222ecef7253STejun Heo 		if (ap->ops->port_stop)
6223ecef7253STejun Heo 			ap->ops->port_stop(ap);
6224ecef7253STejun Heo 	}
6225ecef7253STejun Heo 	return rc;
6226ecef7253STejun Heo }
6227ecef7253STejun Heo 
6228ecef7253STejun Heo /**
6229cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6230cca3974eSJeff Garzik  *	@host:	host to initialize
6231cca3974eSJeff Garzik  *	@dev:	device host is attached to
6232cca3974eSJeff Garzik  *	@flags:	host flags
6233c6fd2807SJeff Garzik  *	@ops:	port_ops
6234c6fd2807SJeff Garzik  *
6235c6fd2807SJeff Garzik  *	LOCKING:
6236c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6237c6fd2807SJeff Garzik  *
6238c6fd2807SJeff Garzik  */
6239f3187195STejun Heo /* KILLME - the only user left is ipr */
6240cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6241cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6242c6fd2807SJeff Garzik {
6243cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6244cca3974eSJeff Garzik 	host->dev = dev;
6245cca3974eSJeff Garzik 	host->flags = flags;
6246cca3974eSJeff Garzik 	host->ops = ops;
6247c6fd2807SJeff Garzik }
6248c6fd2807SJeff Garzik 
6249c6fd2807SJeff Garzik /**
6250f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6251f3187195STejun Heo  *	@host: ATA host to register
6252f3187195STejun Heo  *	@sht: template for SCSI host
6253c6fd2807SJeff Garzik  *
6254f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6255f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6256f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6257f3187195STejun Heo  *	probe registered devices.
6258c6fd2807SJeff Garzik  *
6259c6fd2807SJeff Garzik  *	LOCKING:
6260f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6261c6fd2807SJeff Garzik  *
6262c6fd2807SJeff Garzik  *	RETURNS:
6263f3187195STejun Heo  *	0 on success, -errno otherwise.
6264c6fd2807SJeff Garzik  */
6265f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6266c6fd2807SJeff Garzik {
6267f3187195STejun Heo 	int i, rc;
6268c6fd2807SJeff Garzik 
6269f3187195STejun Heo 	/* host must have been started */
6270f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6271f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6272f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6273f3187195STejun Heo 		WARN_ON(1);
6274f3187195STejun Heo 		return -EINVAL;
627502f076aaSAlan Cox 	}
6276f0d36efdSTejun Heo 
6277f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6278f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6279f3187195STejun Heo 	 * allocation time.
6280f3187195STejun Heo 	 */
6281f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6282f3187195STejun Heo 		kfree(host->ports[i]);
6283f0d36efdSTejun Heo 
6284f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6285f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6286f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6287c6fd2807SJeff Garzik 
6288f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6289ecef7253STejun Heo 	if (rc)
6290f3187195STejun Heo 		return rc;
6291ecef7253STejun Heo 
6292fafbae87STejun Heo 	/* associate with ACPI nodes */
6293fafbae87STejun Heo 	ata_acpi_associate(host);
6294fafbae87STejun Heo 
6295f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6296cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6297cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6298f3187195STejun Heo 		int irq_line;
6299c6fd2807SJeff Garzik 		u32 scontrol;
6300f3187195STejun Heo 		unsigned long xfer_mask;
6301f3187195STejun Heo 
6302f3187195STejun Heo 		/* set SATA cable type if still unset */
6303f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6304f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6305c6fd2807SJeff Garzik 
6306c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
6307c6fd2807SJeff Garzik 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6308c6fd2807SJeff Garzik 			int spd = (scontrol >> 4) & 0xf;
6309afe3cc51STejun Heo 			if (spd)
6310c6fd2807SJeff Garzik 				ap->hw_sata_spd_limit &= (1 << spd) - 1;
6311c6fd2807SJeff Garzik 		}
6312c6fd2807SJeff Garzik 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6313c6fd2807SJeff Garzik 
6314f3187195STejun Heo 		/* report the secondary IRQ for second channel legacy */
6315f3187195STejun Heo 		irq_line = host->irq;
6316f3187195STejun Heo 		if (i == 1 && host->irq2)
6317f3187195STejun Heo 			irq_line = host->irq2;
6318f3187195STejun Heo 
6319f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6320f3187195STejun Heo 					      ap->udma_mask);
6321f3187195STejun Heo 
6322f3187195STejun Heo 		/* print per-port info to dmesg */
6323f3187195STejun Heo 		if (!ata_port_is_dummy(ap))
6324f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6325f3187195STejun Heo 					"ctl 0x%p bmdma 0x%p irq %d\n",
6326f3187195STejun Heo 					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6327f3187195STejun Heo 					ata_mode_string(xfer_mask),
6328f3187195STejun Heo 					ap->ioaddr.cmd_addr,
6329f3187195STejun Heo 					ap->ioaddr.ctl_addr,
6330f3187195STejun Heo 					ap->ioaddr.bmdma_addr,
6331f3187195STejun Heo 					irq_line);
6332f3187195STejun Heo 		else
6333f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6334c6fd2807SJeff Garzik 	}
6335c6fd2807SJeff Garzik 
6336f3187195STejun Heo 	/* perform each probe synchronously */
6337f3187195STejun Heo 	DPRINTK("probe begin\n");
6338f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6339f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6340f3187195STejun Heo 		int rc;
6341f3187195STejun Heo 
6342f3187195STejun Heo 		/* probe */
6343c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
6344c6fd2807SJeff Garzik 			struct ata_eh_info *ehi = &ap->eh_info;
6345c6fd2807SJeff Garzik 			unsigned long flags;
6346c6fd2807SJeff Garzik 
6347c6fd2807SJeff Garzik 			ata_port_probe(ap);
6348c6fd2807SJeff Garzik 
6349c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6350c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6351c6fd2807SJeff Garzik 
6352c6fd2807SJeff Garzik 			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6353c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6354c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6355c6fd2807SJeff Garzik 
6356f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6357c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6358c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6359c6fd2807SJeff Garzik 
6360c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6361c6fd2807SJeff Garzik 
6362c6fd2807SJeff Garzik 			/* wait for EH to finish */
6363c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6364c6fd2807SJeff Garzik 		} else {
636544877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6366c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
636744877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6368c6fd2807SJeff Garzik 
6369c6fd2807SJeff Garzik 			if (rc) {
6370c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6371c6fd2807SJeff Garzik 				 * Current libata behavior will
6372c6fd2807SJeff Garzik 				 * tear down everything when
6373c6fd2807SJeff Garzik 				 * the module is removed
6374c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6375c6fd2807SJeff Garzik 				 */
6376c6fd2807SJeff Garzik 			}
6377c6fd2807SJeff Garzik 		}
6378c6fd2807SJeff Garzik 	}
6379c6fd2807SJeff Garzik 
6380c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6381c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6382cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6383cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6384c6fd2807SJeff Garzik 
6385c6fd2807SJeff Garzik 		ata_scsi_scan_host(ap);
6386c6fd2807SJeff Garzik 	}
6387c6fd2807SJeff Garzik 
6388f3187195STejun Heo 	return 0;
6389f3187195STejun Heo }
6390f3187195STejun Heo 
6391f3187195STejun Heo /**
6392f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6393f5cda257STejun Heo  *	@host: target ATA host
6394f5cda257STejun Heo  *	@irq: IRQ to request
6395f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6396f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6397f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6398f5cda257STejun Heo  *
6399f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6400f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6401f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6402f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6403f5cda257STejun Heo  *
6404f5cda257STejun Heo  *	LOCKING:
6405f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6406f5cda257STejun Heo  *
6407f5cda257STejun Heo  *	RETURNS:
6408f5cda257STejun Heo  *	0 on success, -errno otherwise.
6409f5cda257STejun Heo  */
6410f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6411f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6412f5cda257STejun Heo 		      struct scsi_host_template *sht)
6413f5cda257STejun Heo {
6414f5cda257STejun Heo 	int rc;
6415f5cda257STejun Heo 
6416f5cda257STejun Heo 	rc = ata_host_start(host);
6417f5cda257STejun Heo 	if (rc)
6418f5cda257STejun Heo 		return rc;
6419f5cda257STejun Heo 
6420f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6421f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6422f5cda257STejun Heo 	if (rc)
6423f5cda257STejun Heo 		return rc;
6424f5cda257STejun Heo 
64254031826bSTejun Heo 	/* Used to print device info at probe */
64264031826bSTejun Heo 	host->irq = irq;
64274031826bSTejun Heo 
6428f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6429f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6430f5cda257STejun Heo 	if (rc)
6431f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6432f5cda257STejun Heo 
6433f5cda257STejun Heo 	return rc;
6434f5cda257STejun Heo }
6435f5cda257STejun Heo 
6436f5cda257STejun Heo /**
6437c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6438c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6439c6fd2807SJeff Garzik  *
6440c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6441c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6442c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6443c6fd2807SJeff Garzik  *
6444c6fd2807SJeff Garzik  *	LOCKING:
6445c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6446c6fd2807SJeff Garzik  */
6447c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap)
6448c6fd2807SJeff Garzik {
6449c6fd2807SJeff Garzik 	unsigned long flags;
6450c6fd2807SJeff Garzik 	int i;
6451c6fd2807SJeff Garzik 
6452c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6453c6fd2807SJeff Garzik 		goto skip_eh;
6454c6fd2807SJeff Garzik 
6455c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6456c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6457c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6458c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6459c6fd2807SJeff Garzik 
6460c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6461c6fd2807SJeff Garzik 
6462c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6463c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6464c6fd2807SJeff Garzik 	 */
6465c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6466c6fd2807SJeff Garzik 
6467c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
6468c6fd2807SJeff Garzik 		ata_dev_disable(&ap->device[i]);
6469c6fd2807SJeff Garzik 
6470c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6471c6fd2807SJeff Garzik 
6472c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6473c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6474c6fd2807SJeff Garzik 	 * target.
6475c6fd2807SJeff Garzik 	 */
6476c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6477c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6478c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6479c6fd2807SJeff Garzik 
6480c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6481c6fd2807SJeff Garzik 
6482c6fd2807SJeff Garzik 	/* Flush hotplug task.  The sequence is similar to
6483c6fd2807SJeff Garzik 	 * ata_port_flush_task().
6484c6fd2807SJeff Garzik 	 */
648528e53bddSOleg Nesterov 	cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6486c6fd2807SJeff Garzik 	cancel_delayed_work(&ap->hotplug_task);
648728e53bddSOleg Nesterov 	cancel_work_sync(&ap->hotplug_task.work);
6488c6fd2807SJeff Garzik 
6489c6fd2807SJeff Garzik  skip_eh:
6490c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6491cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6492c6fd2807SJeff Garzik }
6493c6fd2807SJeff Garzik 
6494c6fd2807SJeff Garzik /**
64950529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
64960529c159STejun Heo  *	@host: Host to detach
64970529c159STejun Heo  *
64980529c159STejun Heo  *	Detach all ports of @host.
64990529c159STejun Heo  *
65000529c159STejun Heo  *	LOCKING:
65010529c159STejun Heo  *	Kernel thread context (may sleep).
65020529c159STejun Heo  */
65030529c159STejun Heo void ata_host_detach(struct ata_host *host)
65040529c159STejun Heo {
65050529c159STejun Heo 	int i;
65060529c159STejun Heo 
65070529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
65080529c159STejun Heo 		ata_port_detach(host->ports[i]);
65090529c159STejun Heo }
65100529c159STejun Heo 
6511c6fd2807SJeff Garzik /**
6512c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6513c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6514c6fd2807SJeff Garzik  *
6515c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6516c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6517c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6518c6fd2807SJeff Garzik  *	relative to cmd_addr.
6519c6fd2807SJeff Garzik  *
6520c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6521c6fd2807SJeff Garzik  */
6522c6fd2807SJeff Garzik 
6523c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6524c6fd2807SJeff Garzik {
6525c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6526c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6527c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6528c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6529c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6530c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6531c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6532c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6533c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6534c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6535c6fd2807SJeff Garzik }
6536c6fd2807SJeff Garzik 
6537c6fd2807SJeff Garzik 
6538c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6539c6fd2807SJeff Garzik 
6540c6fd2807SJeff Garzik /**
6541c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
6542c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
6543c6fd2807SJeff Garzik  *
6544b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
6545b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
6546b878ca5dSTejun Heo  *	release is handled via devres.
6547c6fd2807SJeff Garzik  *
6548c6fd2807SJeff Garzik  *	LOCKING:
6549c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
6550c6fd2807SJeff Garzik  */
6551c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
6552c6fd2807SJeff Garzik {
6553c6fd2807SJeff Garzik 	struct device *dev = pci_dev_to_dev(pdev);
6554cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
6555c6fd2807SJeff Garzik 
6556f0d36efdSTejun Heo 	ata_host_detach(host);
6557c6fd2807SJeff Garzik }
6558c6fd2807SJeff Garzik 
6559c6fd2807SJeff Garzik /* move to PCI subsystem */
6560c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6561c6fd2807SJeff Garzik {
6562c6fd2807SJeff Garzik 	unsigned long tmp = 0;
6563c6fd2807SJeff Garzik 
6564c6fd2807SJeff Garzik 	switch (bits->width) {
6565c6fd2807SJeff Garzik 	case 1: {
6566c6fd2807SJeff Garzik 		u8 tmp8 = 0;
6567c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6568c6fd2807SJeff Garzik 		tmp = tmp8;
6569c6fd2807SJeff Garzik 		break;
6570c6fd2807SJeff Garzik 	}
6571c6fd2807SJeff Garzik 	case 2: {
6572c6fd2807SJeff Garzik 		u16 tmp16 = 0;
6573c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
6574c6fd2807SJeff Garzik 		tmp = tmp16;
6575c6fd2807SJeff Garzik 		break;
6576c6fd2807SJeff Garzik 	}
6577c6fd2807SJeff Garzik 	case 4: {
6578c6fd2807SJeff Garzik 		u32 tmp32 = 0;
6579c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6580c6fd2807SJeff Garzik 		tmp = tmp32;
6581c6fd2807SJeff Garzik 		break;
6582c6fd2807SJeff Garzik 	}
6583c6fd2807SJeff Garzik 
6584c6fd2807SJeff Garzik 	default:
6585c6fd2807SJeff Garzik 		return -EINVAL;
6586c6fd2807SJeff Garzik 	}
6587c6fd2807SJeff Garzik 
6588c6fd2807SJeff Garzik 	tmp &= bits->mask;
6589c6fd2807SJeff Garzik 
6590c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
6591c6fd2807SJeff Garzik }
6592c6fd2807SJeff Garzik 
65936ffa01d8STejun Heo #ifdef CONFIG_PM
6594c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6595c6fd2807SJeff Garzik {
6596c6fd2807SJeff Garzik 	pci_save_state(pdev);
6597c6fd2807SJeff Garzik 	pci_disable_device(pdev);
65984c90d971STejun Heo 
65994c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
6600c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
6601c6fd2807SJeff Garzik }
6602c6fd2807SJeff Garzik 
6603553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
6604c6fd2807SJeff Garzik {
6605553c4aa6STejun Heo 	int rc;
6606553c4aa6STejun Heo 
6607c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
6608c6fd2807SJeff Garzik 	pci_restore_state(pdev);
6609553c4aa6STejun Heo 
6610f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
6611553c4aa6STejun Heo 	if (rc) {
6612553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
6613553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
6614553c4aa6STejun Heo 		return rc;
6615553c4aa6STejun Heo 	}
6616553c4aa6STejun Heo 
6617c6fd2807SJeff Garzik 	pci_set_master(pdev);
6618553c4aa6STejun Heo 	return 0;
6619c6fd2807SJeff Garzik }
6620c6fd2807SJeff Garzik 
6621c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6622c6fd2807SJeff Garzik {
6623cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6624c6fd2807SJeff Garzik 	int rc = 0;
6625c6fd2807SJeff Garzik 
6626cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
6627c6fd2807SJeff Garzik 	if (rc)
6628c6fd2807SJeff Garzik 		return rc;
6629c6fd2807SJeff Garzik 
6630c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
6631c6fd2807SJeff Garzik 
6632c6fd2807SJeff Garzik 	return 0;
6633c6fd2807SJeff Garzik }
6634c6fd2807SJeff Garzik 
6635c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
6636c6fd2807SJeff Garzik {
6637cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6638553c4aa6STejun Heo 	int rc;
6639c6fd2807SJeff Garzik 
6640553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
6641553c4aa6STejun Heo 	if (rc == 0)
6642cca3974eSJeff Garzik 		ata_host_resume(host);
6643553c4aa6STejun Heo 	return rc;
6644c6fd2807SJeff Garzik }
66456ffa01d8STejun Heo #endif /* CONFIG_PM */
66466ffa01d8STejun Heo 
6647c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6648c6fd2807SJeff Garzik 
6649c6fd2807SJeff Garzik 
6650c6fd2807SJeff Garzik static int __init ata_init(void)
6651c6fd2807SJeff Garzik {
6652c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
6653c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
6654c6fd2807SJeff Garzik 	if (!ata_wq)
6655c6fd2807SJeff Garzik 		return -ENOMEM;
6656c6fd2807SJeff Garzik 
6657c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6658c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
6659c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
6660c6fd2807SJeff Garzik 		return -ENOMEM;
6661c6fd2807SJeff Garzik 	}
6662c6fd2807SJeff Garzik 
6663c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6664c6fd2807SJeff Garzik 	return 0;
6665c6fd2807SJeff Garzik }
6666c6fd2807SJeff Garzik 
6667c6fd2807SJeff Garzik static void __exit ata_exit(void)
6668c6fd2807SJeff Garzik {
6669c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
6670c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
6671c6fd2807SJeff Garzik }
6672c6fd2807SJeff Garzik 
6673a4625085SBrian King subsys_initcall(ata_init);
6674c6fd2807SJeff Garzik module_exit(ata_exit);
6675c6fd2807SJeff Garzik 
6676c6fd2807SJeff Garzik static unsigned long ratelimit_time;
6677c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
6678c6fd2807SJeff Garzik 
6679c6fd2807SJeff Garzik int ata_ratelimit(void)
6680c6fd2807SJeff Garzik {
6681c6fd2807SJeff Garzik 	int rc;
6682c6fd2807SJeff Garzik 	unsigned long flags;
6683c6fd2807SJeff Garzik 
6684c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6685c6fd2807SJeff Garzik 
6686c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
6687c6fd2807SJeff Garzik 		rc = 1;
6688c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
6689c6fd2807SJeff Garzik 	} else
6690c6fd2807SJeff Garzik 		rc = 0;
6691c6fd2807SJeff Garzik 
6692c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6693c6fd2807SJeff Garzik 
6694c6fd2807SJeff Garzik 	return rc;
6695c6fd2807SJeff Garzik }
6696c6fd2807SJeff Garzik 
6697c6fd2807SJeff Garzik /**
6698c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
6699c6fd2807SJeff Garzik  *	@reg: IO-mapped register
6700c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
6701c6fd2807SJeff Garzik  *	@val: Wait condition
6702c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
6703c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
6704c6fd2807SJeff Garzik  *
6705c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
6706c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
6707c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
6708c6fd2807SJeff Garzik  *
6709c6fd2807SJeff Garzik  *	(*@reg & mask) != val
6710c6fd2807SJeff Garzik  *
6711c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
6712c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
6713c6fd2807SJeff Garzik  *
6714c6fd2807SJeff Garzik  *	LOCKING:
6715c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
6716c6fd2807SJeff Garzik  *
6717c6fd2807SJeff Garzik  *	RETURNS:
6718c6fd2807SJeff Garzik  *	The final register value.
6719c6fd2807SJeff Garzik  */
6720c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6721c6fd2807SJeff Garzik 		      unsigned long interval_msec,
6722c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
6723c6fd2807SJeff Garzik {
6724c6fd2807SJeff Garzik 	unsigned long timeout;
6725c6fd2807SJeff Garzik 	u32 tmp;
6726c6fd2807SJeff Garzik 
6727c6fd2807SJeff Garzik 	tmp = ioread32(reg);
6728c6fd2807SJeff Garzik 
6729c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
6730c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
6731c6fd2807SJeff Garzik 	 * eat away the timeout.
6732c6fd2807SJeff Garzik 	 */
6733c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6734c6fd2807SJeff Garzik 
6735c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6736c6fd2807SJeff Garzik 		msleep(interval_msec);
6737c6fd2807SJeff Garzik 		tmp = ioread32(reg);
6738c6fd2807SJeff Garzik 	}
6739c6fd2807SJeff Garzik 
6740c6fd2807SJeff Garzik 	return tmp;
6741c6fd2807SJeff Garzik }
6742c6fd2807SJeff Garzik 
6743c6fd2807SJeff Garzik /*
6744c6fd2807SJeff Garzik  * Dummy port_ops
6745c6fd2807SJeff Garzik  */
6746c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
6747c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6748c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6749c6fd2807SJeff Garzik 
6750c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
6751c6fd2807SJeff Garzik {
6752c6fd2807SJeff Garzik 	return ATA_DRDY;
6753c6fd2807SJeff Garzik }
6754c6fd2807SJeff Garzik 
6755c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6756c6fd2807SJeff Garzik {
6757c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
6758c6fd2807SJeff Garzik }
6759c6fd2807SJeff Garzik 
6760c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
6761c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
6762c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
6763c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
6764c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
6765c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
6766c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
6767c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
6768c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
6769c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
6770c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
6771c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
6772c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
6773c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
6774c6fd2807SJeff Garzik };
6775c6fd2807SJeff Garzik 
677621b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
677721b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
677821b0ad4fSTejun Heo };
677921b0ad4fSTejun Heo 
6780c6fd2807SJeff Garzik /*
6781c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
6782c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
6783c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
6784c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
6785c6fd2807SJeff Garzik  */
6786c6fd2807SJeff Garzik 
6787c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6788c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6789c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6790c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
679121b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6792c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
6793c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
6794cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
6795f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
6796f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6797ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
6798f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
6799f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
68000529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
6801c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
6802c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
6803c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
6804c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
6805c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6806c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6807c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
6808c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
6809c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6810c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
681143727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
6812c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6813c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6814c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
6815c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
6816c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
6817c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
6818d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
6819c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
682004351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
68210d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
68220d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6823c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
6824c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6825c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6826c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
6827c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6828c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
6829c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6830c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6831c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6832c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6833c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6834c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6835c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
683610305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
6837c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
6838c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_debounce);
6839c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_resume);
6840c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
6841c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
6842c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
6843c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
6844c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
6845b6103f6dSTejun Heo EXPORT_SYMBOL_GPL(sata_port_hardreset);
6846c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
6847c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
6848c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
6849c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
6850c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
6851c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
6852c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
6853c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
6854d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
6855c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
6856c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6857c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6858c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6859c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6860c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6861c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
6862c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
6863c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
6864c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
6865c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6866c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_online);
6867c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_offline);
68686ffa01d8STejun Heo #ifdef CONFIG_PM
6869cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
6870cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
68716ffa01d8STejun Heo #endif /* CONFIG_PM */
6872c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
6873c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
687410305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
68756919a0a6SAlan Cox EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6876c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6877c6fd2807SJeff Garzik 
6878c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6879c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
6880c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
6881c6fd2807SJeff Garzik 
6882c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6883c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
6884d491b27bSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
68851626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
688621b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
6887c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
6888c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
68896ffa01d8STejun Heo #ifdef CONFIG_PM
6890c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6891c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6892c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6893c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
68946ffa01d8STejun Heo #endif /* CONFIG_PM */
6895c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6896c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6897c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6898c6fd2807SJeff Garzik 
6899c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
6900c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6901c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
6902c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
6903c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6904c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6905c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6906c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6907c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
690883625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
690983625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
691083625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_ack);
691183625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6912a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6913be0d18dfSAlan Cox 
6914be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
6915be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
6916be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
6917be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
6918