xref: /openbmc/linux/drivers/ata/libata-core.c (revision 31daabda)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
52c6fd2807SJeff Garzik #include <scsi/scsi.h>
53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
55c6fd2807SJeff Garzik #include <linux/libata.h>
56c6fd2807SJeff Garzik #include <asm/io.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
62cb48cab7SJeff Garzik #define DRV_VERSION	"2.20"	/* must be exactly four chars */
63fda0efc5SJeff Garzik 
64fda0efc5SJeff Garzik 
65c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69c6fd2807SJeff Garzik 
70c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
71c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
72c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
74c6fd2807SJeff Garzik 
75f3187195STejun Heo unsigned int ata_print_id = 1;
76c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
77c6fd2807SJeff Garzik 
78c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
79c6fd2807SJeff Garzik 
80c6fd2807SJeff Garzik int atapi_enabled = 1;
81c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
82c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83c6fd2807SJeff Garzik 
84c6fd2807SJeff Garzik int atapi_dmadir = 0;
85c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
86c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87c6fd2807SJeff Garzik 
88c6fd2807SJeff Garzik int libata_fua = 0;
89c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
90c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91c6fd2807SJeff Garzik 
921e999736SAlan Cox static int ata_ignore_hpa = 0;
931e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
941e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
951e999736SAlan Cox 
96c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
98c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99c6fd2807SJeff Garzik 
100d7d0dad6SJeff Garzik int libata_noacpi = 1;
101d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
10211ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
10311ef697bSKristen Carlson Accardi 
104c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
105c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
106c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
107c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
108c6fd2807SJeff Garzik 
109c6fd2807SJeff Garzik 
110c6fd2807SJeff Garzik /**
111c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
113c6fd2807SJeff Garzik  *	@fis: Buffer into which data will output
114c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
115c6fd2807SJeff Garzik  *
116c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
117c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
118c6fd2807SJeff Garzik  *
119c6fd2807SJeff Garzik  *	LOCKING:
120c6fd2807SJeff Garzik  *	Inherited from caller.
121c6fd2807SJeff Garzik  */
122c6fd2807SJeff Garzik 
123c6fd2807SJeff Garzik void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
124c6fd2807SJeff Garzik {
125c6fd2807SJeff Garzik 	fis[0] = 0x27;	/* Register - Host to Device FIS */
126c6fd2807SJeff Garzik 	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127c6fd2807SJeff Garzik 					    bit 7 indicates Command FIS */
128c6fd2807SJeff Garzik 	fis[2] = tf->command;
129c6fd2807SJeff Garzik 	fis[3] = tf->feature;
130c6fd2807SJeff Garzik 
131c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
132c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
133c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
134c6fd2807SJeff Garzik 	fis[7] = tf->device;
135c6fd2807SJeff Garzik 
136c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
137c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
138c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
139c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
140c6fd2807SJeff Garzik 
141c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
142c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
143c6fd2807SJeff Garzik 	fis[14] = 0;
144c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
145c6fd2807SJeff Garzik 
146c6fd2807SJeff Garzik 	fis[16] = 0;
147c6fd2807SJeff Garzik 	fis[17] = 0;
148c6fd2807SJeff Garzik 	fis[18] = 0;
149c6fd2807SJeff Garzik 	fis[19] = 0;
150c6fd2807SJeff Garzik }
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik /**
153c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
155c6fd2807SJeff Garzik  *	@tf: Taskfile to output
156c6fd2807SJeff Garzik  *
157c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
158c6fd2807SJeff Garzik  *
159c6fd2807SJeff Garzik  *	LOCKING:
160c6fd2807SJeff Garzik  *	Inherited from caller.
161c6fd2807SJeff Garzik  */
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
164c6fd2807SJeff Garzik {
165c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
166c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
167c6fd2807SJeff Garzik 
168c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
169c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
170c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
171c6fd2807SJeff Garzik 	tf->device	= fis[7];
172c6fd2807SJeff Garzik 
173c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
174c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
175c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
176c6fd2807SJeff Garzik 
177c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
178c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
179c6fd2807SJeff Garzik }
180c6fd2807SJeff Garzik 
181c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
182c6fd2807SJeff Garzik 	/* pio multi */
183c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
184c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
185c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
186c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
187c6fd2807SJeff Garzik 	0,
188c6fd2807SJeff Garzik 	0,
189c6fd2807SJeff Garzik 	0,
190c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
191c6fd2807SJeff Garzik 	/* pio */
192c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
193c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
194c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
195c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
196c6fd2807SJeff Garzik 	0,
197c6fd2807SJeff Garzik 	0,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	/* dma */
201c6fd2807SJeff Garzik 	ATA_CMD_READ,
202c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
203c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
204c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
205c6fd2807SJeff Garzik 	0,
206c6fd2807SJeff Garzik 	0,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
209c6fd2807SJeff Garzik };
210c6fd2807SJeff Garzik 
211c6fd2807SJeff Garzik /**
212c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
213bd056d7eSTejun Heo  *	@tf: command to examine and configure
214bd056d7eSTejun Heo  *	@dev: device tf belongs to
215c6fd2807SJeff Garzik  *
216c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
217c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
218c6fd2807SJeff Garzik  *
219c6fd2807SJeff Garzik  *	LOCKING:
220c6fd2807SJeff Garzik  *	caller.
221c6fd2807SJeff Garzik  */
222bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
223c6fd2807SJeff Garzik {
224c6fd2807SJeff Garzik 	u8 cmd;
225c6fd2807SJeff Garzik 
226c6fd2807SJeff Garzik 	int index, fua, lba48, write;
227c6fd2807SJeff Garzik 
228c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
229c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
231c6fd2807SJeff Garzik 
232c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
233c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
234c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
235bd056d7eSTejun Heo 	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
236c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
237c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
238c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
239c6fd2807SJeff Garzik 	} else {
240c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
241c6fd2807SJeff Garzik 		index = 16;
242c6fd2807SJeff Garzik 	}
243c6fd2807SJeff Garzik 
244c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
245c6fd2807SJeff Garzik 	if (cmd) {
246c6fd2807SJeff Garzik 		tf->command = cmd;
247c6fd2807SJeff Garzik 		return 0;
248c6fd2807SJeff Garzik 	}
249c6fd2807SJeff Garzik 	return -1;
250c6fd2807SJeff Garzik }
251c6fd2807SJeff Garzik 
252c6fd2807SJeff Garzik /**
25335b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
25435b649feSTejun Heo  *	@tf: ATA taskfile of interest
25535b649feSTejun Heo  *	@dev: ATA device @tf belongs to
25635b649feSTejun Heo  *
25735b649feSTejun Heo  *	LOCKING:
25835b649feSTejun Heo  *	None.
25935b649feSTejun Heo  *
26035b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
26135b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
26235b649feSTejun Heo  *	flags select the address format to use.
26335b649feSTejun Heo  *
26435b649feSTejun Heo  *	RETURNS:
26535b649feSTejun Heo  *	Block address read from @tf.
26635b649feSTejun Heo  */
26735b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
26835b649feSTejun Heo {
26935b649feSTejun Heo 	u64 block = 0;
27035b649feSTejun Heo 
27135b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
27235b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
27335b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
27435b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
27535b649feSTejun Heo 			block |= tf->hob_lbal << 24;
27635b649feSTejun Heo 		} else
27735b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
27835b649feSTejun Heo 
27935b649feSTejun Heo 		block |= tf->lbah << 16;
28035b649feSTejun Heo 		block |= tf->lbam << 8;
28135b649feSTejun Heo 		block |= tf->lbal;
28235b649feSTejun Heo 	} else {
28335b649feSTejun Heo 		u32 cyl, head, sect;
28435b649feSTejun Heo 
28535b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
28635b649feSTejun Heo 		head = tf->device & 0xf;
28735b649feSTejun Heo 		sect = tf->lbal;
28835b649feSTejun Heo 
28935b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
29035b649feSTejun Heo 	}
29135b649feSTejun Heo 
29235b649feSTejun Heo 	return block;
29335b649feSTejun Heo }
29435b649feSTejun Heo 
29535b649feSTejun Heo /**
296bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
297bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
298bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
299bd056d7eSTejun Heo  *	@block: Block address
300bd056d7eSTejun Heo  *	@n_block: Number of blocks
301bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
302bd056d7eSTejun Heo  *	@tag: tag
303bd056d7eSTejun Heo  *
304bd056d7eSTejun Heo  *	LOCKING:
305bd056d7eSTejun Heo  *	None.
306bd056d7eSTejun Heo  *
307bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
308bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
309bd056d7eSTejun Heo  *
310bd056d7eSTejun Heo  *	RETURNS:
311bd056d7eSTejun Heo  *
312bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
313bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
314bd056d7eSTejun Heo  */
315bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
317bd056d7eSTejun Heo 		    unsigned int tag)
318bd056d7eSTejun Heo {
319bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320bd056d7eSTejun Heo 	tf->flags |= tf_flags;
321bd056d7eSTejun Heo 
3226d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
323bd056d7eSTejun Heo 		/* yay, NCQ */
324bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
325bd056d7eSTejun Heo 			return -ERANGE;
326bd056d7eSTejun Heo 
327bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
328bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329bd056d7eSTejun Heo 
330bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
331bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
332bd056d7eSTejun Heo 		else
333bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
334bd056d7eSTejun Heo 
335bd056d7eSTejun Heo 		tf->nsect = tag << 3;
336bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
337bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
338bd056d7eSTejun Heo 
339bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
340bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
341bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
342bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
343bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
344bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->device = 1 << 6;
347bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
348bd056d7eSTejun Heo 			tf->device |= 1 << 7;
349bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
350bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
351bd056d7eSTejun Heo 
352bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
353bd056d7eSTejun Heo 			/* use LBA28 */
354bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
355bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
356bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
357bd056d7eSTejun Heo 				return -ERANGE;
358bd056d7eSTejun Heo 
359bd056d7eSTejun Heo 			/* use LBA48 */
360bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
361bd056d7eSTejun Heo 
362bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
363bd056d7eSTejun Heo 
364bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
365bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
366bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
367bd056d7eSTejun Heo 		} else
368bd056d7eSTejun Heo 			/* request too large even for LBA48 */
369bd056d7eSTejun Heo 			return -ERANGE;
370bd056d7eSTejun Heo 
371bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372bd056d7eSTejun Heo 			return -EINVAL;
373bd056d7eSTejun Heo 
374bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
375bd056d7eSTejun Heo 
376bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
377bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
378bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
379bd056d7eSTejun Heo 
380bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
381bd056d7eSTejun Heo 	} else {
382bd056d7eSTejun Heo 		/* CHS */
383bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
386bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
387bd056d7eSTejun Heo 			return -ERANGE;
388bd056d7eSTejun Heo 
389bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390bd056d7eSTejun Heo 			return -EINVAL;
391bd056d7eSTejun Heo 
392bd056d7eSTejun Heo 		/* Convert LBA to CHS */
393bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
394bd056d7eSTejun Heo 		cyl   = track / dev->heads;
395bd056d7eSTejun Heo 		head  = track % dev->heads;
396bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
400bd056d7eSTejun Heo 
401bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
402bd056d7eSTejun Heo 		   Cylinder: 0-65535
403bd056d7eSTejun Heo 		   Head: 0-15
404bd056d7eSTejun Heo 		   Sector: 1-255*/
405bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406bd056d7eSTejun Heo 			return -ERANGE;
407bd056d7eSTejun Heo 
408bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409bd056d7eSTejun Heo 		tf->lbal = sect;
410bd056d7eSTejun Heo 		tf->lbam = cyl;
411bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
412bd056d7eSTejun Heo 		tf->device |= head;
413bd056d7eSTejun Heo 	}
414bd056d7eSTejun Heo 
415bd056d7eSTejun Heo 	return 0;
416bd056d7eSTejun Heo }
417bd056d7eSTejun Heo 
418bd056d7eSTejun Heo /**
419c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
421c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
422c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
423c6fd2807SJeff Garzik  *
424c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
426c6fd2807SJeff Garzik  *
427c6fd2807SJeff Garzik  *	LOCKING:
428c6fd2807SJeff Garzik  *	None.
429c6fd2807SJeff Garzik  *
430c6fd2807SJeff Garzik  *	RETURNS:
431c6fd2807SJeff Garzik  *	Packed xfer_mask.
432c6fd2807SJeff Garzik  */
433c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
435c6fd2807SJeff Garzik 				      unsigned int udma_mask)
436c6fd2807SJeff Garzik {
437c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440c6fd2807SJeff Garzik }
441c6fd2807SJeff Garzik 
442c6fd2807SJeff Garzik /**
443c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
445c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
446c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
447c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
448c6fd2807SJeff Garzik  *
449c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
451c6fd2807SJeff Garzik  */
452c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
453c6fd2807SJeff Garzik 				unsigned int *pio_mask,
454c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
455c6fd2807SJeff Garzik 				unsigned int *udma_mask)
456c6fd2807SJeff Garzik {
457c6fd2807SJeff Garzik 	if (pio_mask)
458c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459c6fd2807SJeff Garzik 	if (mwdma_mask)
460c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461c6fd2807SJeff Garzik 	if (udma_mask)
462c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463c6fd2807SJeff Garzik }
464c6fd2807SJeff Garzik 
465c6fd2807SJeff Garzik static const struct ata_xfer_ent {
466c6fd2807SJeff Garzik 	int shift, bits;
467c6fd2807SJeff Garzik 	u8 base;
468c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
469c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472c6fd2807SJeff Garzik 	{ -1, },
473c6fd2807SJeff Garzik };
474c6fd2807SJeff Garzik 
475c6fd2807SJeff Garzik /**
476c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
478c6fd2807SJeff Garzik  *
479c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
480c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
481c6fd2807SJeff Garzik  *
482c6fd2807SJeff Garzik  *	LOCKING:
483c6fd2807SJeff Garzik  *	None.
484c6fd2807SJeff Garzik  *
485c6fd2807SJeff Garzik  *	RETURNS:
486c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
487c6fd2807SJeff Garzik  */
488c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489c6fd2807SJeff Garzik {
490c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
491c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
492c6fd2807SJeff Garzik 
493c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
496c6fd2807SJeff Garzik 	return 0;
497c6fd2807SJeff Garzik }
498c6fd2807SJeff Garzik 
499c6fd2807SJeff Garzik /**
500c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
502c6fd2807SJeff Garzik  *
503c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
504c6fd2807SJeff Garzik  *
505c6fd2807SJeff Garzik  *	LOCKING:
506c6fd2807SJeff Garzik  *	None.
507c6fd2807SJeff Garzik  *
508c6fd2807SJeff Garzik  *	RETURNS:
509c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
510c6fd2807SJeff Garzik  */
511c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512c6fd2807SJeff Garzik {
513c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
514c6fd2807SJeff Garzik 
515c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
518c6fd2807SJeff Garzik 	return 0;
519c6fd2807SJeff Garzik }
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik /**
522c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
524c6fd2807SJeff Garzik  *
525c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	LOCKING:
528c6fd2807SJeff Garzik  *	None.
529c6fd2807SJeff Garzik  *
530c6fd2807SJeff Garzik  *	RETURNS:
531c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
532c6fd2807SJeff Garzik  */
533c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
534c6fd2807SJeff Garzik {
535c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
536c6fd2807SJeff Garzik 
537c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539c6fd2807SJeff Garzik 			return ent->shift;
540c6fd2807SJeff Garzik 	return -1;
541c6fd2807SJeff Garzik }
542c6fd2807SJeff Garzik 
543c6fd2807SJeff Garzik /**
544c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
545c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
546c6fd2807SJeff Garzik  *
547c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
548c6fd2807SJeff Garzik  *	(highest bit in @modemask).
549c6fd2807SJeff Garzik  *
550c6fd2807SJeff Garzik  *	LOCKING:
551c6fd2807SJeff Garzik  *	None.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	RETURNS:
554c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
555c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
556c6fd2807SJeff Garzik  */
557c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
558c6fd2807SJeff Garzik {
559c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
560c6fd2807SJeff Garzik 		"PIO0",
561c6fd2807SJeff Garzik 		"PIO1",
562c6fd2807SJeff Garzik 		"PIO2",
563c6fd2807SJeff Garzik 		"PIO3",
564c6fd2807SJeff Garzik 		"PIO4",
565b352e57dSAlan Cox 		"PIO5",
566b352e57dSAlan Cox 		"PIO6",
567c6fd2807SJeff Garzik 		"MWDMA0",
568c6fd2807SJeff Garzik 		"MWDMA1",
569c6fd2807SJeff Garzik 		"MWDMA2",
570b352e57dSAlan Cox 		"MWDMA3",
571b352e57dSAlan Cox 		"MWDMA4",
572c6fd2807SJeff Garzik 		"UDMA/16",
573c6fd2807SJeff Garzik 		"UDMA/25",
574c6fd2807SJeff Garzik 		"UDMA/33",
575c6fd2807SJeff Garzik 		"UDMA/44",
576c6fd2807SJeff Garzik 		"UDMA/66",
577c6fd2807SJeff Garzik 		"UDMA/100",
578c6fd2807SJeff Garzik 		"UDMA/133",
579c6fd2807SJeff Garzik 		"UDMA7",
580c6fd2807SJeff Garzik 	};
581c6fd2807SJeff Garzik 	int highbit;
582c6fd2807SJeff Garzik 
583c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
584c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
586c6fd2807SJeff Garzik 	return "<n/a>";
587c6fd2807SJeff Garzik }
588c6fd2807SJeff Garzik 
589c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
590c6fd2807SJeff Garzik {
591c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
592c6fd2807SJeff Garzik 		"1.5 Gbps",
593c6fd2807SJeff Garzik 		"3.0 Gbps",
594c6fd2807SJeff Garzik 	};
595c6fd2807SJeff Garzik 
596c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597c6fd2807SJeff Garzik 		return "<unknown>";
598c6fd2807SJeff Garzik 	return spd_str[spd - 1];
599c6fd2807SJeff Garzik }
600c6fd2807SJeff Garzik 
601c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
602c6fd2807SJeff Garzik {
603c6fd2807SJeff Garzik 	if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
604c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6054ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6064ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
607c6fd2807SJeff Garzik 		dev->class++;
608c6fd2807SJeff Garzik 	}
609c6fd2807SJeff Garzik }
610c6fd2807SJeff Garzik 
611c6fd2807SJeff Garzik /**
612c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
613c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
614c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
615c6fd2807SJeff Garzik  *
6160d5ff566STejun Heo  *	This technique was originally described in
6170d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6180d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6190d5ff566STejun Heo  *
6200d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6210d5ff566STejun Heo  *	and if a device is present, it will respond by
6220d5ff566STejun Heo  *	correctly storing and echoing back the
6230d5ff566STejun Heo  *	ATA shadow register contents.
624c6fd2807SJeff Garzik  *
625c6fd2807SJeff Garzik  *	LOCKING:
626c6fd2807SJeff Garzik  *	caller.
627c6fd2807SJeff Garzik  */
628c6fd2807SJeff Garzik 
6290d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
630c6fd2807SJeff Garzik {
6310d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6320d5ff566STejun Heo 	u8 nsect, lbal;
6330d5ff566STejun Heo 
6340d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6350d5ff566STejun Heo 
6360d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6370d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6380d5ff566STejun Heo 
6390d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6400d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6410d5ff566STejun Heo 
6420d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6430d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6440d5ff566STejun Heo 
6450d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6460d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6470d5ff566STejun Heo 
6480d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6490d5ff566STejun Heo 		return 1;	/* we found a device */
6500d5ff566STejun Heo 
6510d5ff566STejun Heo 	return 0;		/* nothing found */
652c6fd2807SJeff Garzik }
653c6fd2807SJeff Garzik 
654c6fd2807SJeff Garzik /**
655c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
656c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
657c6fd2807SJeff Garzik  *
658c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
659c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
660c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
661c6fd2807SJeff Garzik  *
662c6fd2807SJeff Garzik  *	LOCKING:
663c6fd2807SJeff Garzik  *	None.
664c6fd2807SJeff Garzik  *
665c6fd2807SJeff Garzik  *	RETURNS:
666c6fd2807SJeff Garzik  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667c6fd2807SJeff Garzik  *	the event of failure.
668c6fd2807SJeff Garzik  */
669c6fd2807SJeff Garzik 
670c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
671c6fd2807SJeff Garzik {
672c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
673c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
674c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
675c6fd2807SJeff Garzik 	 */
676c6fd2807SJeff Garzik 
677c6fd2807SJeff Garzik 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678c6fd2807SJeff Garzik 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
680c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
681c6fd2807SJeff Garzik 	}
682c6fd2807SJeff Garzik 
683c6fd2807SJeff Garzik 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684c6fd2807SJeff Garzik 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
686c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
687c6fd2807SJeff Garzik 	}
688c6fd2807SJeff Garzik 
689c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
690c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
691c6fd2807SJeff Garzik }
692c6fd2807SJeff Garzik 
693c6fd2807SJeff Garzik /**
694c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
695c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
696c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
697c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
698c6fd2807SJeff Garzik  *
699c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
701c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
702c6fd2807SJeff Garzik  *	and diagnostics.
703c6fd2807SJeff Garzik  *
704c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
705c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
706c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
707c6fd2807SJeff Garzik  *
708c6fd2807SJeff Garzik  *	LOCKING:
709c6fd2807SJeff Garzik  *	caller.
710c6fd2807SJeff Garzik  *
711c6fd2807SJeff Garzik  *	RETURNS:
712c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
713c6fd2807SJeff Garzik  */
714c6fd2807SJeff Garzik 
715a619f981SAkira Iguchi unsigned int
716c6fd2807SJeff Garzik ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
717c6fd2807SJeff Garzik {
718c6fd2807SJeff Garzik 	struct ata_taskfile tf;
719c6fd2807SJeff Garzik 	unsigned int class;
720c6fd2807SJeff Garzik 	u8 err;
721c6fd2807SJeff Garzik 
722c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
723c6fd2807SJeff Garzik 
724c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
725c6fd2807SJeff Garzik 
726c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
727c6fd2807SJeff Garzik 	err = tf.feature;
728c6fd2807SJeff Garzik 	if (r_err)
729c6fd2807SJeff Garzik 		*r_err = err;
730c6fd2807SJeff Garzik 
73193590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
73293590859SAlan Cox 	if (err == 0 && device == 0)
73393590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
73493590859SAlan Cox 		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
73593590859SAlan Cox 	else if (err == 1)
736c6fd2807SJeff Garzik 		/* do nothing */ ;
737c6fd2807SJeff Garzik 	else if ((device == 0) && (err == 0x81))
738c6fd2807SJeff Garzik 		/* do nothing */ ;
739c6fd2807SJeff Garzik 	else
740c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
741c6fd2807SJeff Garzik 
742c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
743c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
744c6fd2807SJeff Garzik 
745c6fd2807SJeff Garzik 	if (class == ATA_DEV_UNKNOWN)
746c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
747c6fd2807SJeff Garzik 	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
748c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
749c6fd2807SJeff Garzik 	return class;
750c6fd2807SJeff Garzik }
751c6fd2807SJeff Garzik 
752c6fd2807SJeff Garzik /**
753c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
754c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
755c6fd2807SJeff Garzik  *	@s: string into which data is output
756c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
757c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
758c6fd2807SJeff Garzik  *
759c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
760c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
761c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
762c6fd2807SJeff Garzik  *
763c6fd2807SJeff Garzik  *	LOCKING:
764c6fd2807SJeff Garzik  *	caller.
765c6fd2807SJeff Garzik  */
766c6fd2807SJeff Garzik 
767c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
768c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
769c6fd2807SJeff Garzik {
770c6fd2807SJeff Garzik 	unsigned int c;
771c6fd2807SJeff Garzik 
772c6fd2807SJeff Garzik 	while (len > 0) {
773c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
774c6fd2807SJeff Garzik 		*s = c;
775c6fd2807SJeff Garzik 		s++;
776c6fd2807SJeff Garzik 
777c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
778c6fd2807SJeff Garzik 		*s = c;
779c6fd2807SJeff Garzik 		s++;
780c6fd2807SJeff Garzik 
781c6fd2807SJeff Garzik 		ofs++;
782c6fd2807SJeff Garzik 		len -= 2;
783c6fd2807SJeff Garzik 	}
784c6fd2807SJeff Garzik }
785c6fd2807SJeff Garzik 
786c6fd2807SJeff Garzik /**
787c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
788c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
789c6fd2807SJeff Garzik  *	@s: string into which data is output
790c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
791c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
792c6fd2807SJeff Garzik  *
793c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
794c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
795c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
796c6fd2807SJeff Garzik  *
797c6fd2807SJeff Garzik  *	LOCKING:
798c6fd2807SJeff Garzik  *	caller.
799c6fd2807SJeff Garzik  */
800c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
801c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
802c6fd2807SJeff Garzik {
803c6fd2807SJeff Garzik 	unsigned char *p;
804c6fd2807SJeff Garzik 
805c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
806c6fd2807SJeff Garzik 
807c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
808c6fd2807SJeff Garzik 
809c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
810c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
811c6fd2807SJeff Garzik 		p--;
812c6fd2807SJeff Garzik 	*p = '\0';
813c6fd2807SJeff Garzik }
814c6fd2807SJeff Garzik 
8151e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8161e999736SAlan Cox {
8171e999736SAlan Cox 	u64 sectors = 0;
8181e999736SAlan Cox 
8191e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8201e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8211e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8221e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8231e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8241e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8251e999736SAlan Cox 
8261e999736SAlan Cox 	return ++sectors;
8271e999736SAlan Cox }
8281e999736SAlan Cox 
8291e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8301e999736SAlan Cox {
8311e999736SAlan Cox 	u64 sectors = 0;
8321e999736SAlan Cox 
8331e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8341e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8351e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8361e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8371e999736SAlan Cox 
8381e999736SAlan Cox 	return ++sectors;
8391e999736SAlan Cox }
8401e999736SAlan Cox 
8411e999736SAlan Cox /**
8421e999736SAlan Cox  *	ata_read_native_max_address_ext	-	LBA48 native max query
8431e999736SAlan Cox  *	@dev: Device to query
8441e999736SAlan Cox  *
8451e999736SAlan Cox  *	Perform an LBA48 size query upon the device in question. Return the
8461e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
8471e999736SAlan Cox  */
8481e999736SAlan Cox 
8491e999736SAlan Cox static u64 ata_read_native_max_address_ext(struct ata_device *dev)
8501e999736SAlan Cox {
8511e999736SAlan Cox 	unsigned int err;
8521e999736SAlan Cox 	struct ata_taskfile tf;
8531e999736SAlan Cox 
8541e999736SAlan Cox 	ata_tf_init(dev, &tf);
8551e999736SAlan Cox 
8561e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
8571e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
8581e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8591e999736SAlan Cox 	tf.device |= 0x40;
8601e999736SAlan Cox 
8611e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8621e999736SAlan Cox 	if (err)
8631e999736SAlan Cox 		return 0;
8641e999736SAlan Cox 
8651e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
8661e999736SAlan Cox }
8671e999736SAlan Cox 
8681e999736SAlan Cox /**
8691e999736SAlan Cox  *	ata_read_native_max_address	-	LBA28 native max query
8701e999736SAlan Cox  *	@dev: Device to query
8711e999736SAlan Cox  *
8721e999736SAlan Cox  *	Performa an LBA28 size query upon the device in question. Return the
8731e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
8741e999736SAlan Cox  */
8751e999736SAlan Cox 
8761e999736SAlan Cox static u64 ata_read_native_max_address(struct ata_device *dev)
8771e999736SAlan Cox {
8781e999736SAlan Cox 	unsigned int err;
8791e999736SAlan Cox 	struct ata_taskfile tf;
8801e999736SAlan Cox 
8811e999736SAlan Cox 	ata_tf_init(dev, &tf);
8821e999736SAlan Cox 
8831e999736SAlan Cox 	tf.command = ATA_CMD_READ_NATIVE_MAX;
8841e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
8851e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
8861e999736SAlan Cox 	tf.device |= 0x40;
8871e999736SAlan Cox 
8881e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8891e999736SAlan Cox 	if (err)
8901e999736SAlan Cox 		return 0;
8911e999736SAlan Cox 
8921e999736SAlan Cox 	return ata_tf_to_lba(&tf);
8931e999736SAlan Cox }
8941e999736SAlan Cox 
8951e999736SAlan Cox /**
8961e999736SAlan Cox  *	ata_set_native_max_address_ext	-	LBA48 native max set
8971e999736SAlan Cox  *	@dev: Device to query
8981e999736SAlan Cox  *
8991e999736SAlan Cox  *	Perform an LBA48 size set max upon the device in question. Return the
9001e999736SAlan Cox  *	actual LBA48 size or zero if the command fails.
9011e999736SAlan Cox  */
9021e999736SAlan Cox 
9031e999736SAlan Cox static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
9041e999736SAlan Cox {
9051e999736SAlan Cox 	unsigned int err;
9061e999736SAlan Cox 	struct ata_taskfile tf;
9071e999736SAlan Cox 
9081e999736SAlan Cox 	new_sectors--;
9091e999736SAlan Cox 
9101e999736SAlan Cox 	ata_tf_init(dev, &tf);
9111e999736SAlan Cox 
9121e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX_EXT;
9131e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
9141e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9151e999736SAlan Cox 	tf.device |= 0x40;
9161e999736SAlan Cox 
9171e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9181e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9191e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9201e999736SAlan Cox 
9211e999736SAlan Cox 	tf.hob_lbal = (new_sectors >> 24) & 0xff;
9221e999736SAlan Cox 	tf.hob_lbam = (new_sectors >> 32) & 0xff;
9231e999736SAlan Cox 	tf.hob_lbah = (new_sectors >> 40) & 0xff;
9241e999736SAlan Cox 
9251e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9261e999736SAlan Cox 	if (err)
9271e999736SAlan Cox 		return 0;
9281e999736SAlan Cox 
9291e999736SAlan Cox 	return ata_tf_to_lba48(&tf);
9301e999736SAlan Cox }
9311e999736SAlan Cox 
9321e999736SAlan Cox /**
9331e999736SAlan Cox  *	ata_set_native_max_address	-	LBA28 native max set
9341e999736SAlan Cox  *	@dev: Device to query
9351e999736SAlan Cox  *
9361e999736SAlan Cox  *	Perform an LBA28 size set max upon the device in question. Return the
9371e999736SAlan Cox  *	actual LBA28 size or zero if the command fails.
9381e999736SAlan Cox  */
9391e999736SAlan Cox 
9401e999736SAlan Cox static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
9411e999736SAlan Cox {
9421e999736SAlan Cox 	unsigned int err;
9431e999736SAlan Cox 	struct ata_taskfile tf;
9441e999736SAlan Cox 
9451e999736SAlan Cox 	new_sectors--;
9461e999736SAlan Cox 
9471e999736SAlan Cox 	ata_tf_init(dev, &tf);
9481e999736SAlan Cox 
9491e999736SAlan Cox 	tf.command = ATA_CMD_SET_MAX;
9501e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9511e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
9521e999736SAlan Cox 
9531e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9541e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9551e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9561e999736SAlan Cox 	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
9571e999736SAlan Cox 
9581e999736SAlan Cox 	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9591e999736SAlan Cox 	if (err)
9601e999736SAlan Cox 		return 0;
9611e999736SAlan Cox 
9621e999736SAlan Cox 	return ata_tf_to_lba(&tf);
9631e999736SAlan Cox }
9641e999736SAlan Cox 
9651e999736SAlan Cox /**
9661e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
9671e999736SAlan Cox  *	@dev: Device to resize
9681e999736SAlan Cox  *
9691e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
9701e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
9711e999736SAlan Cox  *	the drive has the HPA feature set enabled.
9721e999736SAlan Cox  */
9731e999736SAlan Cox 
9741e999736SAlan Cox static u64 ata_hpa_resize(struct ata_device *dev)
9751e999736SAlan Cox {
9761e999736SAlan Cox 	u64 sectors = dev->n_sectors;
9771e999736SAlan Cox 	u64 hpa_sectors;
9781e999736SAlan Cox 
9791e999736SAlan Cox 	if (ata_id_has_lba48(dev->id))
9801e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address_ext(dev);
9811e999736SAlan Cox 	else
9821e999736SAlan Cox 		hpa_sectors = ata_read_native_max_address(dev);
9831e999736SAlan Cox 
9841e999736SAlan Cox 	/* if no hpa, both should be equal */
985bd1d5ec6SAndrew Morton 	ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
986bd1d5ec6SAndrew Morton 				"hpa_sectors = %lld\n",
987bd1d5ec6SAndrew Morton 		__FUNCTION__, (long long)sectors, (long long)hpa_sectors);
9881e999736SAlan Cox 
9891e999736SAlan Cox 	if (hpa_sectors > sectors) {
9901e999736SAlan Cox 		ata_dev_printk(dev, KERN_INFO,
9911e999736SAlan Cox 			"Host Protected Area detected:\n"
9921e999736SAlan Cox 			"\tcurrent size: %lld sectors\n"
9931e999736SAlan Cox 			"\tnative size: %lld sectors\n",
994bd1d5ec6SAndrew Morton 			(long long)sectors, (long long)hpa_sectors);
9951e999736SAlan Cox 
9961e999736SAlan Cox 		if (ata_ignore_hpa) {
9971e999736SAlan Cox 			if (ata_id_has_lba48(dev->id))
9981e999736SAlan Cox 				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
9991e999736SAlan Cox 			else
1000bd1d5ec6SAndrew Morton 				hpa_sectors = ata_set_native_max_address(dev,
1001bd1d5ec6SAndrew Morton 								hpa_sectors);
10021e999736SAlan Cox 
10031e999736SAlan Cox 			if (hpa_sectors) {
1004bd1d5ec6SAndrew Morton 				ata_dev_printk(dev, KERN_INFO, "native size "
1005bd1d5ec6SAndrew Morton 					"increased to %lld sectors\n",
1006bd1d5ec6SAndrew Morton 					(long long)hpa_sectors);
10071e999736SAlan Cox 				return hpa_sectors;
10081e999736SAlan Cox 			}
10091e999736SAlan Cox 		}
10101e999736SAlan Cox 	}
10111e999736SAlan Cox 	return sectors;
10121e999736SAlan Cox }
10131e999736SAlan Cox 
1014c6fd2807SJeff Garzik static u64 ata_id_n_sectors(const u16 *id)
1015c6fd2807SJeff Garzik {
1016c6fd2807SJeff Garzik 	if (ata_id_has_lba(id)) {
1017c6fd2807SJeff Garzik 		if (ata_id_has_lba48(id))
1018c6fd2807SJeff Garzik 			return ata_id_u64(id, 100);
1019c6fd2807SJeff Garzik 		else
1020c6fd2807SJeff Garzik 			return ata_id_u32(id, 60);
1021c6fd2807SJeff Garzik 	} else {
1022c6fd2807SJeff Garzik 		if (ata_id_current_chs_valid(id))
1023c6fd2807SJeff Garzik 			return ata_id_u32(id, 57);
1024c6fd2807SJeff Garzik 		else
1025c6fd2807SJeff Garzik 			return id[1] * id[3] * id[6];
1026c6fd2807SJeff Garzik 	}
1027c6fd2807SJeff Garzik }
1028c6fd2807SJeff Garzik 
1029c6fd2807SJeff Garzik /**
103010305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
103110305f0fSAlan  *	@dev: device to identify
1032cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
103310305f0fSAlan  *
103410305f0fSAlan  *	Set up the timing values for the device based upon the identify
103510305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
103610305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
103710305f0fSAlan  *	mode correctly when possible.
103810305f0fSAlan  *
103910305f0fSAlan  *	In addition we emit similarly formatted messages to the default
104010305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
104110305f0fSAlan  *	presentation.
104210305f0fSAlan  */
104310305f0fSAlan 
104410305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
104510305f0fSAlan {
104610305f0fSAlan 	unsigned int mask;
104710305f0fSAlan 	u8 mode;
104810305f0fSAlan 
104910305f0fSAlan 	/* Pack the DMA modes */
105010305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
105110305f0fSAlan 	if (dev->id[53] & 0x04)
105210305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
105310305f0fSAlan 
105410305f0fSAlan 	/* Select the mode in use */
105510305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
105610305f0fSAlan 
105710305f0fSAlan 	if (mode != 0) {
105810305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
105910305f0fSAlan 		       ata_mode_string(mask));
106010305f0fSAlan 	} else {
106110305f0fSAlan 		/* SWDMA perhaps ? */
106210305f0fSAlan 		mode = unknown;
106310305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
106410305f0fSAlan 	}
106510305f0fSAlan 
106610305f0fSAlan 	/* Configure the device reporting */
106710305f0fSAlan 	dev->xfer_mode = mode;
106810305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
106910305f0fSAlan }
107010305f0fSAlan 
107110305f0fSAlan /**
1072c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1073c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1074c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1075c6fd2807SJeff Garzik  *
1076c6fd2807SJeff Garzik  *	This function performs no actual function.
1077c6fd2807SJeff Garzik  *
1078c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1079c6fd2807SJeff Garzik  *
1080c6fd2807SJeff Garzik  *	LOCKING:
1081c6fd2807SJeff Garzik  *	caller.
1082c6fd2807SJeff Garzik  */
1083c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1084c6fd2807SJeff Garzik {
1085c6fd2807SJeff Garzik }
1086c6fd2807SJeff Garzik 
1087c6fd2807SJeff Garzik 
1088c6fd2807SJeff Garzik /**
1089c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1090c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1091c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1092c6fd2807SJeff Garzik  *
1093c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1094c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1095c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1096c6fd2807SJeff Garzik  *
1097c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1098c6fd2807SJeff Garzik  *
1099c6fd2807SJeff Garzik  *	LOCKING:
1100c6fd2807SJeff Garzik  *	caller.
1101c6fd2807SJeff Garzik  */
1102c6fd2807SJeff Garzik 
1103c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1104c6fd2807SJeff Garzik {
1105c6fd2807SJeff Garzik 	u8 tmp;
1106c6fd2807SJeff Garzik 
1107c6fd2807SJeff Garzik 	if (device == 0)
1108c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1109c6fd2807SJeff Garzik 	else
1110c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1111c6fd2807SJeff Garzik 
11120d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1113c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1114c6fd2807SJeff Garzik }
1115c6fd2807SJeff Garzik 
1116c6fd2807SJeff Garzik /**
1117c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1118c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1119c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1120c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1121c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1122c6fd2807SJeff Garzik  *
1123c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1124c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1125c6fd2807SJeff Garzik  *	ATA channel.
1126c6fd2807SJeff Garzik  *
1127c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1128c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1129c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1130c6fd2807SJeff Garzik  *
1131c6fd2807SJeff Garzik  *	LOCKING:
1132c6fd2807SJeff Garzik  *	caller.
1133c6fd2807SJeff Garzik  */
1134c6fd2807SJeff Garzik 
1135c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1136c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1137c6fd2807SJeff Garzik {
1138c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
113944877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
114044877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1141c6fd2807SJeff Garzik 
1142c6fd2807SJeff Garzik 	if (wait)
1143c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1144c6fd2807SJeff Garzik 
1145c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1146c6fd2807SJeff Garzik 
1147c6fd2807SJeff Garzik 	if (wait) {
1148c6fd2807SJeff Garzik 		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1149c6fd2807SJeff Garzik 			msleep(150);
1150c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1151c6fd2807SJeff Garzik 	}
1152c6fd2807SJeff Garzik }
1153c6fd2807SJeff Garzik 
1154c6fd2807SJeff Garzik /**
1155c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1156c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1157c6fd2807SJeff Garzik  *
1158c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1159c6fd2807SJeff Garzik  *	page.
1160c6fd2807SJeff Garzik  *
1161c6fd2807SJeff Garzik  *	LOCKING:
1162c6fd2807SJeff Garzik  *	caller.
1163c6fd2807SJeff Garzik  */
1164c6fd2807SJeff Garzik 
1165c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1166c6fd2807SJeff Garzik {
1167c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1168c6fd2807SJeff Garzik 		"53==0x%04x  "
1169c6fd2807SJeff Garzik 		"63==0x%04x  "
1170c6fd2807SJeff Garzik 		"64==0x%04x  "
1171c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1172c6fd2807SJeff Garzik 		id[49],
1173c6fd2807SJeff Garzik 		id[53],
1174c6fd2807SJeff Garzik 		id[63],
1175c6fd2807SJeff Garzik 		id[64],
1176c6fd2807SJeff Garzik 		id[75]);
1177c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1178c6fd2807SJeff Garzik 		"81==0x%04x  "
1179c6fd2807SJeff Garzik 		"82==0x%04x  "
1180c6fd2807SJeff Garzik 		"83==0x%04x  "
1181c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1182c6fd2807SJeff Garzik 		id[80],
1183c6fd2807SJeff Garzik 		id[81],
1184c6fd2807SJeff Garzik 		id[82],
1185c6fd2807SJeff Garzik 		id[83],
1186c6fd2807SJeff Garzik 		id[84]);
1187c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1188c6fd2807SJeff Garzik 		"93==0x%04x\n",
1189c6fd2807SJeff Garzik 		id[88],
1190c6fd2807SJeff Garzik 		id[93]);
1191c6fd2807SJeff Garzik }
1192c6fd2807SJeff Garzik 
1193c6fd2807SJeff Garzik /**
1194c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1195c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1196c6fd2807SJeff Garzik  *
1197c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1198c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1199c6fd2807SJeff Garzik  *
1200c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1201c6fd2807SJeff Garzik  *
1202c6fd2807SJeff Garzik  *	LOCKING:
1203c6fd2807SJeff Garzik  *	None.
1204c6fd2807SJeff Garzik  *
1205c6fd2807SJeff Garzik  *	RETURNS:
1206c6fd2807SJeff Garzik  *	Computed xfermask
1207c6fd2807SJeff Garzik  */
1208c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1209c6fd2807SJeff Garzik {
1210c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1211c6fd2807SJeff Garzik 
1212c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1213c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1214c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1215c6fd2807SJeff Garzik 		pio_mask <<= 3;
1216c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1217c6fd2807SJeff Garzik 	} else {
1218c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1219c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1220c6fd2807SJeff Garzik 		 * a mask.
1221c6fd2807SJeff Garzik 		 */
12227a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
122346767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
122446767aebSAlan Cox                 	pio_mask = (2 << mode) - 1;
122546767aebSAlan Cox 		else
122646767aebSAlan Cox 			pio_mask = 1;
1227c6fd2807SJeff Garzik 
1228c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1229c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1230c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1231c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1232c6fd2807SJeff Garzik 		 * will get this right anyway
1233c6fd2807SJeff Garzik 		 */
1234c6fd2807SJeff Garzik 	}
1235c6fd2807SJeff Garzik 
1236c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1237c6fd2807SJeff Garzik 
1238b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1239b352e57dSAlan Cox 		/*
1240b352e57dSAlan Cox 		 *	Process compact flash extended modes
1241b352e57dSAlan Cox 		 */
1242b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1243b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1244b352e57dSAlan Cox 
1245b352e57dSAlan Cox 		if (pio)
1246b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1247b352e57dSAlan Cox 		if (pio > 1)
1248b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1249b352e57dSAlan Cox 		if (dma)
1250b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1251b352e57dSAlan Cox 		if (dma > 1)
1252b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1253b352e57dSAlan Cox 	}
1254b352e57dSAlan Cox 
1255c6fd2807SJeff Garzik 	udma_mask = 0;
1256c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1257c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1258c6fd2807SJeff Garzik 
1259c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1260c6fd2807SJeff Garzik }
1261c6fd2807SJeff Garzik 
1262c6fd2807SJeff Garzik /**
1263c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1264c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1265c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
126665f27f38SDavid Howells  *	@data: data for @fn to use
1267c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1268c6fd2807SJeff Garzik  *
1269c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1270c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1271c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1272c6fd2807SJeff Garzik  *	one task is active at any given time.
1273c6fd2807SJeff Garzik  *
1274c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1275c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1276c6fd2807SJeff Garzik  *	synchronization.
1277c6fd2807SJeff Garzik  *
1278c6fd2807SJeff Garzik  *	LOCKING:
1279c6fd2807SJeff Garzik  *	Inherited from caller.
1280c6fd2807SJeff Garzik  */
128165f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1282c6fd2807SJeff Garzik 			 unsigned long delay)
1283c6fd2807SJeff Garzik {
1284c6fd2807SJeff Garzik 	int rc;
1285c6fd2807SJeff Garzik 
1286c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1287c6fd2807SJeff Garzik 		return;
1288c6fd2807SJeff Garzik 
128965f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
129065f27f38SDavid Howells 	ap->port_task_data = data;
1291c6fd2807SJeff Garzik 
1292c6fd2807SJeff Garzik 	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1293c6fd2807SJeff Garzik 
1294c6fd2807SJeff Garzik 	/* rc == 0 means that another user is using port task */
1295c6fd2807SJeff Garzik 	WARN_ON(rc == 0);
1296c6fd2807SJeff Garzik }
1297c6fd2807SJeff Garzik 
1298c6fd2807SJeff Garzik /**
1299c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1300c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1301c6fd2807SJeff Garzik  *
1302c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1303c6fd2807SJeff Garzik  *	be running or scheduled.
1304c6fd2807SJeff Garzik  *
1305c6fd2807SJeff Garzik  *	LOCKING:
1306c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1307c6fd2807SJeff Garzik  */
1308c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1309c6fd2807SJeff Garzik {
1310c6fd2807SJeff Garzik 	unsigned long flags;
1311c6fd2807SJeff Garzik 
1312c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1313c6fd2807SJeff Garzik 
1314c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1315c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1316c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1317c6fd2807SJeff Garzik 
1318c6fd2807SJeff Garzik 	DPRINTK("flush #1\n");
1319c6fd2807SJeff Garzik 	flush_workqueue(ata_wq);
1320c6fd2807SJeff Garzik 
1321c6fd2807SJeff Garzik 	/*
1322c6fd2807SJeff Garzik 	 * At this point, if a task is running, it's guaranteed to see
1323c6fd2807SJeff Garzik 	 * the FLUSH flag; thus, it will never queue pio tasks again.
1324c6fd2807SJeff Garzik 	 * Cancel and flush.
1325c6fd2807SJeff Garzik 	 */
1326c6fd2807SJeff Garzik 	if (!cancel_delayed_work(&ap->port_task)) {
1327c6fd2807SJeff Garzik 		if (ata_msg_ctl(ap))
1328c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1329c6fd2807SJeff Garzik 					__FUNCTION__);
1330c6fd2807SJeff Garzik 		flush_workqueue(ata_wq);
1331c6fd2807SJeff Garzik 	}
1332c6fd2807SJeff Garzik 
1333c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1334c6fd2807SJeff Garzik 	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1335c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1336c6fd2807SJeff Garzik 
1337c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1338c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1339c6fd2807SJeff Garzik }
1340c6fd2807SJeff Garzik 
13417102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1342c6fd2807SJeff Garzik {
1343c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1344c6fd2807SJeff Garzik 
1345c6fd2807SJeff Garzik 	complete(waiting);
1346c6fd2807SJeff Garzik }
1347c6fd2807SJeff Garzik 
1348c6fd2807SJeff Garzik /**
13492432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1350c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1351c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1352c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1353c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13542432697bSTejun Heo  *	@sg: sg list for the data buffer of the command
13552432697bSTejun Heo  *	@n_elem: Number of sg entries
1356c6fd2807SJeff Garzik  *
1357c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1358c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1359c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1360c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1361c6fd2807SJeff Garzik  *	clean up after timeout.
1362c6fd2807SJeff Garzik  *
1363c6fd2807SJeff Garzik  *	LOCKING:
1364c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1365c6fd2807SJeff Garzik  *
1366c6fd2807SJeff Garzik  *	RETURNS:
1367c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1368c6fd2807SJeff Garzik  */
13692432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1370c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
13712432697bSTejun Heo 			      int dma_dir, struct scatterlist *sg,
13722432697bSTejun Heo 			      unsigned int n_elem)
1373c6fd2807SJeff Garzik {
1374c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1375c6fd2807SJeff Garzik 	u8 command = tf->command;
1376c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1377c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1378c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1379c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1380c6fd2807SJeff Garzik 	unsigned long flags;
1381c6fd2807SJeff Garzik 	unsigned int err_mask;
1382c6fd2807SJeff Garzik 	int rc;
1383c6fd2807SJeff Garzik 
1384c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1385c6fd2807SJeff Garzik 
1386c6fd2807SJeff Garzik 	/* no internal command while frozen */
1387c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1388c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1389c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1390c6fd2807SJeff Garzik 	}
1391c6fd2807SJeff Garzik 
1392c6fd2807SJeff Garzik 	/* initialize internal qc */
1393c6fd2807SJeff Garzik 
1394c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1395c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1396c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1397c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1398c6fd2807SJeff Garzik 	 */
1399c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1400c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1401c6fd2807SJeff Garzik 	else
1402c6fd2807SJeff Garzik 		tag = 0;
1403c6fd2807SJeff Garzik 
1404c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1405c6fd2807SJeff Garzik 		BUG();
1406c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1407c6fd2807SJeff Garzik 
1408c6fd2807SJeff Garzik 	qc->tag = tag;
1409c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1410c6fd2807SJeff Garzik 	qc->ap = ap;
1411c6fd2807SJeff Garzik 	qc->dev = dev;
1412c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1413c6fd2807SJeff Garzik 
1414c6fd2807SJeff Garzik 	preempted_tag = ap->active_tag;
1415c6fd2807SJeff Garzik 	preempted_sactive = ap->sactive;
1416c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1417c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
1418c6fd2807SJeff Garzik 	ap->sactive = 0;
1419c6fd2807SJeff Garzik 	ap->qc_active = 0;
1420c6fd2807SJeff Garzik 
1421c6fd2807SJeff Garzik 	/* prepare & issue qc */
1422c6fd2807SJeff Garzik 	qc->tf = *tf;
1423c6fd2807SJeff Garzik 	if (cdb)
1424c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1425c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1426c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1427c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14282432697bSTejun Heo 		unsigned int i, buflen = 0;
14292432697bSTejun Heo 
14302432697bSTejun Heo 		for (i = 0; i < n_elem; i++)
14312432697bSTejun Heo 			buflen += sg[i].length;
14322432697bSTejun Heo 
14332432697bSTejun Heo 		ata_sg_init(qc, sg, n_elem);
143449c80429SBrian King 		qc->nbytes = buflen;
1435c6fd2807SJeff Garzik 	}
1436c6fd2807SJeff Garzik 
1437c6fd2807SJeff Garzik 	qc->private_data = &wait;
1438c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1439c6fd2807SJeff Garzik 
1440c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1443c6fd2807SJeff Garzik 
1444c6fd2807SJeff Garzik 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1445c6fd2807SJeff Garzik 
1446c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1447c6fd2807SJeff Garzik 
1448c6fd2807SJeff Garzik 	if (!rc) {
1449c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1450c6fd2807SJeff Garzik 
1451c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1452c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1453c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1454c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1455c6fd2807SJeff Garzik 		 */
1456c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1457c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1458c6fd2807SJeff Garzik 
1459c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1460c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1461c6fd2807SJeff Garzik 			else
1462c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1463c6fd2807SJeff Garzik 
1464c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1465c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1466c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1467c6fd2807SJeff Garzik 		}
1468c6fd2807SJeff Garzik 
1469c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1470c6fd2807SJeff Garzik 	}
1471c6fd2807SJeff Garzik 
1472c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1473c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1474c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1475c6fd2807SJeff Garzik 
1476a51d644aSTejun Heo 	/* perform minimal error analysis */
1477a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1478a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1479a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1480a51d644aSTejun Heo 
1481a51d644aSTejun Heo 		if (!qc->err_mask)
1482c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1483a51d644aSTejun Heo 
1484a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1485a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1486c6fd2807SJeff Garzik 	}
1487c6fd2807SJeff Garzik 
1488c6fd2807SJeff Garzik 	/* finish up */
1489c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1490c6fd2807SJeff Garzik 
1491c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1492c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1493c6fd2807SJeff Garzik 
1494c6fd2807SJeff Garzik 	ata_qc_free(qc);
1495c6fd2807SJeff Garzik 	ap->active_tag = preempted_tag;
1496c6fd2807SJeff Garzik 	ap->sactive = preempted_sactive;
1497c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1498c6fd2807SJeff Garzik 
1499c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1500c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1501c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1502c6fd2807SJeff Garzik 	 * port.
1503c6fd2807SJeff Garzik 	 *
1504c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1505c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1506c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1507c6fd2807SJeff Garzik 	 *
1508c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1509c6fd2807SJeff Garzik 	 */
1510c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1511c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1512c6fd2807SJeff Garzik 		ata_port_probe(ap);
1513c6fd2807SJeff Garzik 	}
1514c6fd2807SJeff Garzik 
1515c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1516c6fd2807SJeff Garzik 
1517c6fd2807SJeff Garzik 	return err_mask;
1518c6fd2807SJeff Garzik }
1519c6fd2807SJeff Garzik 
1520c6fd2807SJeff Garzik /**
152133480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15222432697bSTejun Heo  *	@dev: Device to which the command is sent
15232432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15242432697bSTejun Heo  *	@cdb: CDB for packet command
15252432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15262432697bSTejun Heo  *	@buf: Data buffer of the command
15272432697bSTejun Heo  *	@buflen: Length of data buffer
15282432697bSTejun Heo  *
15292432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15302432697bSTejun Heo  *	buffer instead of sg list.
15312432697bSTejun Heo  *
15322432697bSTejun Heo  *	LOCKING:
15332432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15342432697bSTejun Heo  *
15352432697bSTejun Heo  *	RETURNS:
15362432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15372432697bSTejun Heo  */
15382432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15392432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15402432697bSTejun Heo 			   int dma_dir, void *buf, unsigned int buflen)
15412432697bSTejun Heo {
154233480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
154333480a0eSTejun Heo 	unsigned int n_elem = 0;
15442432697bSTejun Heo 
154533480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
154633480a0eSTejun Heo 		WARN_ON(!buf);
15472432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
154833480a0eSTejun Heo 		psg = &sg;
154933480a0eSTejun Heo 		n_elem++;
155033480a0eSTejun Heo 	}
15512432697bSTejun Heo 
155233480a0eSTejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
15532432697bSTejun Heo }
15542432697bSTejun Heo 
15552432697bSTejun Heo /**
1556c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1557c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1558c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1559c6fd2807SJeff Garzik  *
1560c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1561c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1562c6fd2807SJeff Garzik  *
1563c6fd2807SJeff Garzik  *	LOCKING:
1564c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1565c6fd2807SJeff Garzik  *
1566c6fd2807SJeff Garzik  *	RETURNS:
1567c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1568c6fd2807SJeff Garzik  */
1569c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1570c6fd2807SJeff Garzik {
1571c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1572c6fd2807SJeff Garzik 
1573c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1574c6fd2807SJeff Garzik 
1575c6fd2807SJeff Garzik 	tf.command = cmd;
1576c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1577c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1578c6fd2807SJeff Garzik 
1579c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1580c6fd2807SJeff Garzik }
1581c6fd2807SJeff Garzik 
1582c6fd2807SJeff Garzik /**
1583c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1584c6fd2807SJeff Garzik  *	@adev: ATA device
1585c6fd2807SJeff Garzik  *
1586c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1587c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1588c6fd2807SJeff Garzik  */
1589c6fd2807SJeff Garzik 
1590c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1591c6fd2807SJeff Garzik {
1592432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1593432729f0SAlan Cox 	   as the caller should know this */
1594432729f0SAlan Cox 	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1595c6fd2807SJeff Garzik 		return 0;
1596432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1597432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1598c6fd2807SJeff Garzik 		return 1;
1599432729f0SAlan Cox 	/* We turn it on when possible */
1600432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1601432729f0SAlan Cox 		return 1;
1602432729f0SAlan Cox 	return 0;
1603432729f0SAlan Cox }
1604c6fd2807SJeff Garzik 
1605432729f0SAlan Cox /**
1606432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1607432729f0SAlan Cox  *	@adev: ATA device
1608432729f0SAlan Cox  *
1609432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1610432729f0SAlan Cox  *	-1 if no iordy mode is available.
1611432729f0SAlan Cox  */
1612432729f0SAlan Cox 
1613432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1614432729f0SAlan Cox {
1615c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1616c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1617432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1618c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1619c6fd2807SJeff Garzik 		if (pio) {
1620c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1621c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1622432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1623432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1624c6fd2807SJeff Garzik 		}
1625c6fd2807SJeff Garzik 	}
1626432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1627c6fd2807SJeff Garzik }
1628c6fd2807SJeff Garzik 
1629c6fd2807SJeff Garzik /**
1630c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1631c6fd2807SJeff Garzik  *	@dev: target device
1632c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1633bff04647STejun Heo  *	@flags: ATA_READID_* flags
1634c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1635c6fd2807SJeff Garzik  *
1636c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1637c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1638c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1639c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1640c6fd2807SJeff Garzik  *
1641c6fd2807SJeff Garzik  *	LOCKING:
1642c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1643c6fd2807SJeff Garzik  *
1644c6fd2807SJeff Garzik  *	RETURNS:
1645c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1646c6fd2807SJeff Garzik  */
1647c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1648bff04647STejun Heo 		    unsigned int flags, u16 *id)
1649c6fd2807SJeff Garzik {
1650c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1651c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1652c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1653c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1654c6fd2807SJeff Garzik 	const char *reason;
1655169439c2SMark Lord 	int tried_spinup = 0;
1656c6fd2807SJeff Garzik 	int rc;
1657c6fd2807SJeff Garzik 
1658c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
165944877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1660c6fd2807SJeff Garzik 
1661c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1662c6fd2807SJeff Garzik  retry:
1663c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1664c6fd2807SJeff Garzik 
1665c6fd2807SJeff Garzik 	switch (class) {
1666c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1667c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1668c6fd2807SJeff Garzik 		break;
1669c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1670c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1671c6fd2807SJeff Garzik 		break;
1672c6fd2807SJeff Garzik 	default:
1673c6fd2807SJeff Garzik 		rc = -ENODEV;
1674c6fd2807SJeff Garzik 		reason = "unsupported class";
1675c6fd2807SJeff Garzik 		goto err_out;
1676c6fd2807SJeff Garzik 	}
1677c6fd2807SJeff Garzik 
1678c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
167981afe893STejun Heo 
168081afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
168181afe893STejun Heo 	 * sure those are properly initialized.
168281afe893STejun Heo 	 */
168381afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
168481afe893STejun Heo 
168581afe893STejun Heo 	/* Device presence detection is unreliable on some
168681afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
168781afe893STejun Heo 	 */
168881afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1689c6fd2807SJeff Garzik 
1690c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1691c6fd2807SJeff Garzik 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1692c6fd2807SJeff Garzik 	if (err_mask) {
1693800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
169455a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
169544877b4eSTejun Heo 				ap->print_id, dev->devno);
169655a8e2c8STejun Heo 			return -ENOENT;
169755a8e2c8STejun Heo 		}
169855a8e2c8STejun Heo 
1699c6fd2807SJeff Garzik 		rc = -EIO;
1700c6fd2807SJeff Garzik 		reason = "I/O error";
1701c6fd2807SJeff Garzik 		goto err_out;
1702c6fd2807SJeff Garzik 	}
1703c6fd2807SJeff Garzik 
1704c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1705c6fd2807SJeff Garzik 
1706c6fd2807SJeff Garzik 	/* sanity check */
1707c6fd2807SJeff Garzik 	rc = -EINVAL;
1708c6fd2807SJeff Garzik 	reason = "device reports illegal type";
17094a3381feSJeff Garzik 
17104a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17114a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17124a3381feSJeff Garzik 			goto err_out;
17134a3381feSJeff Garzik 	} else {
17144a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1715c6fd2807SJeff Garzik 			goto err_out;
1716c6fd2807SJeff Garzik 	}
1717c6fd2807SJeff Garzik 
1718169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1719169439c2SMark Lord 		tried_spinup = 1;
1720169439c2SMark Lord 		/*
1721169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1722169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1723169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1724169439c2SMark Lord 		 */
1725169439c2SMark Lord 		ata_tf_init(dev, &tf);
1726169439c2SMark Lord 		tf.command = ATA_CMD_SET_FEATURES;
1727169439c2SMark Lord 		tf.feature = SETFEATURES_SPINUP;
1728169439c2SMark Lord 		tf.protocol = ATA_PROT_NODATA;
1729169439c2SMark Lord 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1730169439c2SMark Lord 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1731169439c2SMark Lord 		if (err_mask) {
1732169439c2SMark Lord 			rc = -EIO;
1733169439c2SMark Lord 			reason = "SPINUP failed";
1734169439c2SMark Lord 			goto err_out;
1735169439c2SMark Lord 		}
1736169439c2SMark Lord 		/*
1737169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1738169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1739169439c2SMark Lord 		 */
1740169439c2SMark Lord 		if (id[2] == 0x37c8)
1741169439c2SMark Lord 			goto retry;
1742169439c2SMark Lord 	}
1743169439c2SMark Lord 
1744bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1745c6fd2807SJeff Garzik 		/*
1746c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1747c6fd2807SJeff Garzik 		 * SRST RESET
1748c6fd2807SJeff Garzik 		 * IDENTIFY
1749c6fd2807SJeff Garzik 		 * INITIALIZE DEVICE PARAMETERS
1750c6fd2807SJeff Garzik 		 * anything else..
1751c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
1752c6fd2807SJeff Garzik 		 */
1753c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1754c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1755c6fd2807SJeff Garzik 			if (err_mask) {
1756c6fd2807SJeff Garzik 				rc = -EIO;
1757c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1758c6fd2807SJeff Garzik 				goto err_out;
1759c6fd2807SJeff Garzik 			}
1760c6fd2807SJeff Garzik 
1761c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1762c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1763c6fd2807SJeff Garzik 			 */
1764bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1765c6fd2807SJeff Garzik 			goto retry;
1766c6fd2807SJeff Garzik 		}
1767c6fd2807SJeff Garzik 	}
1768c6fd2807SJeff Garzik 
1769c6fd2807SJeff Garzik 	*p_class = class;
1770c6fd2807SJeff Garzik 
1771c6fd2807SJeff Garzik 	return 0;
1772c6fd2807SJeff Garzik 
1773c6fd2807SJeff Garzik  err_out:
1774c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1775c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1776c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1777c6fd2807SJeff Garzik 	return rc;
1778c6fd2807SJeff Garzik }
1779c6fd2807SJeff Garzik 
1780c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1781c6fd2807SJeff Garzik {
1782c6fd2807SJeff Garzik 	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1783c6fd2807SJeff Garzik }
1784c6fd2807SJeff Garzik 
1785c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1786c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1787c6fd2807SJeff Garzik {
1788c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1789c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1790c6fd2807SJeff Garzik 
1791c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1792c6fd2807SJeff Garzik 		desc[0] = '\0';
1793c6fd2807SJeff Garzik 		return;
1794c6fd2807SJeff Garzik 	}
17956919a0a6SAlan Cox 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
17966919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
17976919a0a6SAlan Cox 		return;
17986919a0a6SAlan Cox 	}
1799c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1800cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1801c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1802c6fd2807SJeff Garzik 	}
1803c6fd2807SJeff Garzik 
1804c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1805c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1806c6fd2807SJeff Garzik 	else
1807c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1808c6fd2807SJeff Garzik }
1809c6fd2807SJeff Garzik 
1810c6fd2807SJeff Garzik /**
1811c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1812c6fd2807SJeff Garzik  *	@dev: Target device to configure
1813c6fd2807SJeff Garzik  *
1814c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1815c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1816c6fd2807SJeff Garzik  *
1817c6fd2807SJeff Garzik  *	LOCKING:
1818c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1819c6fd2807SJeff Garzik  *
1820c6fd2807SJeff Garzik  *	RETURNS:
1821c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1822c6fd2807SJeff Garzik  */
1823efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1824c6fd2807SJeff Garzik {
1825c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
1826efdaedc4STejun Heo 	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1827c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1828c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1829b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
18303f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
18313f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1832c6fd2807SJeff Garzik 	int rc;
1833c6fd2807SJeff Garzik 
1834c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
183544877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
183644877b4eSTejun Heo 			       __FUNCTION__);
1837c6fd2807SJeff Garzik 		return 0;
1838c6fd2807SJeff Garzik 	}
1839c6fd2807SJeff Garzik 
1840c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
184144877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1842c6fd2807SJeff Garzik 
184308573a86SKristen Carlson Accardi 	/* set _SDD */
184408573a86SKristen Carlson Accardi 	rc = ata_acpi_push_id(ap, dev->devno);
184508573a86SKristen Carlson Accardi 	if (rc) {
184608573a86SKristen Carlson Accardi 		ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
184708573a86SKristen Carlson Accardi 			rc);
184808573a86SKristen Carlson Accardi 	}
184908573a86SKristen Carlson Accardi 
185008573a86SKristen Carlson Accardi 	/* retrieve and execute the ATA task file of _GTF */
185108573a86SKristen Carlson Accardi 	ata_acpi_exec_tfs(ap);
185208573a86SKristen Carlson Accardi 
1853c6fd2807SJeff Garzik 	/* print device capabilities */
1854c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1855c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1856c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1857c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1858c6fd2807SJeff Garzik 			       __FUNCTION__,
1859c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1860c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1861c6fd2807SJeff Garzik 
1862c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1863c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1864c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1865c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1866c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1867c6fd2807SJeff Garzik 	dev->cylinders = 0;
1868c6fd2807SJeff Garzik 	dev->heads = 0;
1869c6fd2807SJeff Garzik 	dev->sectors = 0;
1870c6fd2807SJeff Garzik 
1871c6fd2807SJeff Garzik 	/*
1872c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1873c6fd2807SJeff Garzik 	 */
1874c6fd2807SJeff Garzik 
1875c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1876c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1877c6fd2807SJeff Garzik 
1878c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1879c6fd2807SJeff Garzik 		ata_dump_id(id);
1880c6fd2807SJeff Garzik 
1881c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1882c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1883b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1884b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
188544877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
188644877b4eSTejun Heo 					       "supports DRM functions and may "
188744877b4eSTejun Heo 					       "not be fully accessable.\n");
1888b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
1889b352e57dSAlan Cox 		}
1890b352e57dSAlan Cox 		else
1891b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1892b352e57dSAlan Cox 
1893c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
18941e999736SAlan Cox 		dev->n_sectors_boot = dev->n_sectors;
1895c6fd2807SJeff Garzik 
18963f64f565SEric D. Mudama 		/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1897591a6e8eSJeff Garzik 		ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
18983f64f565SEric D. Mudama 				sizeof(fwrevbuf));
18993f64f565SEric D. Mudama 
1900591a6e8eSJeff Garzik 		ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
19013f64f565SEric D. Mudama 				sizeof(modelbuf));
19023f64f565SEric D. Mudama 
19033f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19043f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19053f64f565SEric D. Mudama 
1906c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1907c6fd2807SJeff Garzik 			const char *lba_desc;
1908c6fd2807SJeff Garzik 			char ncq_desc[20];
1909c6fd2807SJeff Garzik 
1910c6fd2807SJeff Garzik 			lba_desc = "LBA";
1911c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1912c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1913c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1914c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19156fc49adbSTejun Heo 
19166fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19176fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19186fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1919c6fd2807SJeff Garzik 			}
1920c6fd2807SJeff Garzik 
19211e999736SAlan Cox 			if (ata_id_hpa_enabled(dev->id))
19221e999736SAlan Cox 				dev->n_sectors = ata_hpa_resize(dev);
19231e999736SAlan Cox 
1924c6fd2807SJeff Garzik 			/* config NCQ */
1925c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1926c6fd2807SJeff Garzik 
1927c6fd2807SJeff Garzik 			/* print device info to dmesg */
19283f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
19293f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19303f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19313f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
19323f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19333f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19343f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
1935c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
19363f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
19373f64f565SEric D. Mudama 			}
1938c6fd2807SJeff Garzik 		} else {
1939c6fd2807SJeff Garzik 			/* CHS */
1940c6fd2807SJeff Garzik 
1941c6fd2807SJeff Garzik 			/* Default translation */
1942c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
1943c6fd2807SJeff Garzik 			dev->heads	= id[3];
1944c6fd2807SJeff Garzik 			dev->sectors	= id[6];
1945c6fd2807SJeff Garzik 
1946c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
1947c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
1948c6fd2807SJeff Garzik 				dev->cylinders = id[54];
1949c6fd2807SJeff Garzik 				dev->heads     = id[55];
1950c6fd2807SJeff Garzik 				dev->sectors   = id[56];
1951c6fd2807SJeff Garzik 			}
1952c6fd2807SJeff Garzik 
1953c6fd2807SJeff Garzik 			/* print device info to dmesg */
19543f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
1955c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
19563f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19573f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
19583f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19593f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19603f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
19613f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
19623f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
19633f64f565SEric D. Mudama 					dev->heads, dev->sectors);
19643f64f565SEric D. Mudama 			}
1965c6fd2807SJeff Garzik 		}
1966c6fd2807SJeff Garzik 
1967c6fd2807SJeff Garzik 		dev->cdb_len = 16;
1968c6fd2807SJeff Garzik 	}
1969c6fd2807SJeff Garzik 
1970c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
1971c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
1972c6fd2807SJeff Garzik 		char *cdb_intr_string = "";
1973c6fd2807SJeff Garzik 
1974c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
1975c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1976c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1977c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1978c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
1979c6fd2807SJeff Garzik 			rc = -EINVAL;
1980c6fd2807SJeff Garzik 			goto err_out_nosup;
1981c6fd2807SJeff Garzik 		}
1982c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
1983c6fd2807SJeff Garzik 
1984c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
1985c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
1986c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
1987c6fd2807SJeff Garzik 		}
1988c6fd2807SJeff Garzik 
1989c6fd2807SJeff Garzik 		/* print device info to dmesg */
1990c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
1991c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1992c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
1993c6fd2807SJeff Garzik 				       cdb_intr_string);
1994c6fd2807SJeff Garzik 	}
1995c6fd2807SJeff Garzik 
1996914ed354STejun Heo 	/* determine max_sectors */
1997914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
1998914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
1999914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2000914ed354STejun Heo 
200193590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
200293590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
200393590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
200493590859SAlan Cox 		   idiot */
200593590859SAlan Cox                 if (print_info) {
200693590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
200793590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
200893590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
200993590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
201093590859SAlan Cox 		}
201193590859SAlan Cox 	}
201293590859SAlan Cox 
2013c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2014c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2015c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2016c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2017c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2018c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2019c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2020c6fd2807SJeff Garzik 	}
2021c6fd2807SJeff Garzik 
202218d6e9d5SAlbert Lee 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
202303ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
202403ec52deSTejun Heo 					 dev->max_sectors);
202518d6e9d5SAlbert Lee 
20266f23a31dSAlbert Lee 	/* limit ATAPI DMA to R/W commands only */
20276f23a31dSAlbert Lee 	if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
20286f23a31dSAlbert Lee 		dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
20296f23a31dSAlbert Lee 
2030c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2031cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2032c6fd2807SJeff Garzik 
2033c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2034c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2035c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2036c6fd2807SJeff Garzik 	return 0;
2037c6fd2807SJeff Garzik 
2038c6fd2807SJeff Garzik err_out_nosup:
2039c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2040c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2041c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2042c6fd2807SJeff Garzik 	return rc;
2043c6fd2807SJeff Garzik }
2044c6fd2807SJeff Garzik 
2045c6fd2807SJeff Garzik /**
20462e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2047be0d18dfSAlan Cox  *	@ap: port
2048be0d18dfSAlan Cox  *
20492e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2050be0d18dfSAlan Cox  *	detection.
2051be0d18dfSAlan Cox  */
2052be0d18dfSAlan Cox 
2053be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2054be0d18dfSAlan Cox {
2055be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2056be0d18dfSAlan Cox }
2057be0d18dfSAlan Cox 
2058be0d18dfSAlan Cox /**
20592e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2060be0d18dfSAlan Cox  *	@ap: port
2061be0d18dfSAlan Cox  *
20622e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2063be0d18dfSAlan Cox  *	detection.
2064be0d18dfSAlan Cox  */
2065be0d18dfSAlan Cox 
2066be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2067be0d18dfSAlan Cox {
2068be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2069be0d18dfSAlan Cox }
2070be0d18dfSAlan Cox 
2071be0d18dfSAlan Cox /**
2072be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2073be0d18dfSAlan Cox  *	@ap: port
2074be0d18dfSAlan Cox  *
2075be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2076be0d18dfSAlan Cox  */
2077be0d18dfSAlan Cox 
2078be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2079be0d18dfSAlan Cox {
2080be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2081be0d18dfSAlan Cox }
2082be0d18dfSAlan Cox 
2083be0d18dfSAlan Cox /**
2084be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2085be0d18dfSAlan Cox  *	@ap: port
2086be0d18dfSAlan Cox  *
2087be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2088be0d18dfSAlan Cox  */
2089be0d18dfSAlan Cox 
2090be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2091be0d18dfSAlan Cox {
2092be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2093be0d18dfSAlan Cox }
2094be0d18dfSAlan Cox 
2095be0d18dfSAlan Cox /**
2096c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2097c6fd2807SJeff Garzik  *	@ap: Bus to probe
2098c6fd2807SJeff Garzik  *
2099c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2100c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2101c6fd2807SJeff Garzik  *	the bus.
2102c6fd2807SJeff Garzik  *
2103c6fd2807SJeff Garzik  *	LOCKING:
2104c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2105c6fd2807SJeff Garzik  *
2106c6fd2807SJeff Garzik  *	RETURNS:
2107c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2108c6fd2807SJeff Garzik  */
2109c6fd2807SJeff Garzik 
2110c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2111c6fd2807SJeff Garzik {
2112c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2113c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
21144ae72a1eSTejun Heo 	int i, rc;
2115c6fd2807SJeff Garzik 	struct ata_device *dev;
2116c6fd2807SJeff Garzik 
2117c6fd2807SJeff Garzik 	ata_port_probe(ap);
2118c6fd2807SJeff Garzik 
2119c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2120c6fd2807SJeff Garzik 		tries[i] = ATA_PROBE_MAX_TRIES;
2121c6fd2807SJeff Garzik 
2122c6fd2807SJeff Garzik  retry:
2123c6fd2807SJeff Garzik 	/* reset and determine device classes */
2124c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2125c6fd2807SJeff Garzik 
2126c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2127c6fd2807SJeff Garzik 		dev = &ap->device[i];
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2130c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2131c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2132c6fd2807SJeff Garzik 		else
2133c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2134c6fd2807SJeff Garzik 
2135c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2136c6fd2807SJeff Garzik 	}
2137c6fd2807SJeff Garzik 
2138c6fd2807SJeff Garzik 	ata_port_probe(ap);
2139c6fd2807SJeff Garzik 
2140c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2141c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2142c6fd2807SJeff Garzik 
2143c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2144c6fd2807SJeff Garzik 		ap->device[i].pio_mode = XFER_PIO_0;
2145c6fd2807SJeff Garzik 
2146f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2147f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2148f31f0cc2SJeff Garzik 	   the slave device */
2149f31f0cc2SJeff Garzik 
2150f31f0cc2SJeff Garzik 	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2151c6fd2807SJeff Garzik 		dev = &ap->device[i];
2152c6fd2807SJeff Garzik 
2153c6fd2807SJeff Garzik 		if (tries[i])
2154c6fd2807SJeff Garzik 			dev->class = classes[i];
2155c6fd2807SJeff Garzik 
2156c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2157c6fd2807SJeff Garzik 			continue;
2158c6fd2807SJeff Garzik 
2159bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2160bff04647STejun Heo 				     dev->id);
2161c6fd2807SJeff Garzik 		if (rc)
2162c6fd2807SJeff Garzik 			goto fail;
2163f31f0cc2SJeff Garzik 	}
2164f31f0cc2SJeff Garzik 
2165be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2166be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2167be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2168be0d18dfSAlan Cox 
2169f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2170f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2171f31f0cc2SJeff Garzik 
2172f31f0cc2SJeff Garzik 	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2173f31f0cc2SJeff Garzik 		dev = &ap->device[i];
2174f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2175f31f0cc2SJeff Garzik 			continue;
2176c6fd2807SJeff Garzik 
2177efdaedc4STejun Heo 		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2178efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
2179efdaedc4STejun Heo 		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2180c6fd2807SJeff Garzik 		if (rc)
2181c6fd2807SJeff Garzik 			goto fail;
2182c6fd2807SJeff Garzik 	}
2183c6fd2807SJeff Garzik 
2184c6fd2807SJeff Garzik 	/* configure transfer mode */
2185c6fd2807SJeff Garzik 	rc = ata_set_mode(ap, &dev);
21864ae72a1eSTejun Heo 	if (rc)
2187c6fd2807SJeff Garzik 		goto fail;
2188c6fd2807SJeff Garzik 
2189c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
2190c6fd2807SJeff Garzik 		if (ata_dev_enabled(&ap->device[i]))
2191c6fd2807SJeff Garzik 			return 0;
2192c6fd2807SJeff Garzik 
2193c6fd2807SJeff Garzik 	/* no device present, disable port */
2194c6fd2807SJeff Garzik 	ata_port_disable(ap);
2195c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
2196c6fd2807SJeff Garzik 	return -ENODEV;
2197c6fd2807SJeff Garzik 
2198c6fd2807SJeff Garzik  fail:
21994ae72a1eSTejun Heo 	tries[dev->devno]--;
22004ae72a1eSTejun Heo 
2201c6fd2807SJeff Garzik 	switch (rc) {
2202c6fd2807SJeff Garzik 	case -EINVAL:
22034ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2204c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2205c6fd2807SJeff Garzik 		break;
22064ae72a1eSTejun Heo 
22074ae72a1eSTejun Heo 	case -ENODEV:
22084ae72a1eSTejun Heo 		/* give it just one more chance */
22094ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2210c6fd2807SJeff Garzik 	case -EIO:
22114ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
22124ae72a1eSTejun Heo 			/* This is the last chance, better to slow
22134ae72a1eSTejun Heo 			 * down than lose it.
22144ae72a1eSTejun Heo 			 */
2215c6fd2807SJeff Garzik 			sata_down_spd_limit(ap);
22164ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
22174ae72a1eSTejun Heo 		}
2218c6fd2807SJeff Garzik 	}
2219c6fd2807SJeff Garzik 
22204ae72a1eSTejun Heo 	if (!tries[dev->devno])
2221c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2222c6fd2807SJeff Garzik 
2223c6fd2807SJeff Garzik 	goto retry;
2224c6fd2807SJeff Garzik }
2225c6fd2807SJeff Garzik 
2226c6fd2807SJeff Garzik /**
2227c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2228c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2229c6fd2807SJeff Garzik  *
2230c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2231c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2232c6fd2807SJeff Garzik  *
2233cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2234c6fd2807SJeff Garzik  *	serialization.
2235c6fd2807SJeff Garzik  */
2236c6fd2807SJeff Garzik 
2237c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2238c6fd2807SJeff Garzik {
2239c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2240c6fd2807SJeff Garzik }
2241c6fd2807SJeff Garzik 
2242c6fd2807SJeff Garzik /**
2243c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2244c6fd2807SJeff Garzik  *	@ap: SATA port to printk link status about
2245c6fd2807SJeff Garzik  *
2246c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2247c6fd2807SJeff Garzik  *
2248c6fd2807SJeff Garzik  *	LOCKING:
2249c6fd2807SJeff Garzik  *	None.
2250c6fd2807SJeff Garzik  */
225143727fbcSJeff Garzik void sata_print_link_status(struct ata_port *ap)
2252c6fd2807SJeff Garzik {
2253c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2254c6fd2807SJeff Garzik 
2255c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2256c6fd2807SJeff Garzik 		return;
2257c6fd2807SJeff Garzik 	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2258c6fd2807SJeff Garzik 
2259c6fd2807SJeff Garzik 	if (ata_port_online(ap)) {
2260c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2261c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2262c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2263c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2264c6fd2807SJeff Garzik 	} else {
2265c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_INFO,
2266c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2267c6fd2807SJeff Garzik 				sstatus, scontrol);
2268c6fd2807SJeff Garzik 	}
2269c6fd2807SJeff Garzik }
2270c6fd2807SJeff Garzik 
2271c6fd2807SJeff Garzik /**
2272c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2273c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2274c6fd2807SJeff Garzik  *
2275c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2276c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2277c6fd2807SJeff Garzik  *	clear any reset condition.
2278c6fd2807SJeff Garzik  *
2279c6fd2807SJeff Garzik  *	LOCKING:
2280c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2281c6fd2807SJeff Garzik  *
2282c6fd2807SJeff Garzik  */
2283c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2284c6fd2807SJeff Garzik {
2285c6fd2807SJeff Garzik 	u32 sstatus;
2286c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2287c6fd2807SJeff Garzik 
2288c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2289c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2290c6fd2807SJeff Garzik 		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2291c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2292c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2293c6fd2807SJeff Garzik 		mdelay(1);
2294c6fd2807SJeff Garzik 	}
2295c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2296c6fd2807SJeff Garzik 	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2297c6fd2807SJeff Garzik 
2298c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2299c6fd2807SJeff Garzik 	do {
2300c6fd2807SJeff Garzik 		msleep(200);
2301c6fd2807SJeff Garzik 		sata_scr_read(ap, SCR_STATUS, &sstatus);
2302c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2303c6fd2807SJeff Garzik 			break;
2304c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2305c6fd2807SJeff Garzik 
2306c6fd2807SJeff Garzik 	/* print link status */
2307c6fd2807SJeff Garzik 	sata_print_link_status(ap);
2308c6fd2807SJeff Garzik 
2309c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2310c6fd2807SJeff Garzik 	if (!ata_port_offline(ap))
2311c6fd2807SJeff Garzik 		ata_port_probe(ap);
2312c6fd2807SJeff Garzik 	else
2313c6fd2807SJeff Garzik 		ata_port_disable(ap);
2314c6fd2807SJeff Garzik 
2315c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2316c6fd2807SJeff Garzik 		return;
2317c6fd2807SJeff Garzik 
2318c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2319c6fd2807SJeff Garzik 		ata_port_disable(ap);
2320c6fd2807SJeff Garzik 		return;
2321c6fd2807SJeff Garzik 	}
2322c6fd2807SJeff Garzik 
2323c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2324c6fd2807SJeff Garzik }
2325c6fd2807SJeff Garzik 
2326c6fd2807SJeff Garzik /**
2327c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2328c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2329c6fd2807SJeff Garzik  *
2330c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2331c6fd2807SJeff Garzik  *	the bus for devices.
2332c6fd2807SJeff Garzik  *
2333c6fd2807SJeff Garzik  *	LOCKING:
2334c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2335c6fd2807SJeff Garzik  *
2336c6fd2807SJeff Garzik  */
2337c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2338c6fd2807SJeff Garzik {
2339c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2340c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2341c6fd2807SJeff Garzik 		return;
2342c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2343c6fd2807SJeff Garzik }
2344c6fd2807SJeff Garzik 
2345c6fd2807SJeff Garzik /**
2346c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2347c6fd2807SJeff Garzik  *	@adev: device
2348c6fd2807SJeff Garzik  *
2349c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2350c6fd2807SJeff Garzik  *	present NULL is returned
2351c6fd2807SJeff Garzik  */
2352c6fd2807SJeff Garzik 
2353c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2354c6fd2807SJeff Garzik {
2355c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
2356c6fd2807SJeff Garzik 	struct ata_device *pair = &ap->device[1 - adev->devno];
2357c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2358c6fd2807SJeff Garzik 		return NULL;
2359c6fd2807SJeff Garzik 	return pair;
2360c6fd2807SJeff Garzik }
2361c6fd2807SJeff Garzik 
2362c6fd2807SJeff Garzik /**
2363c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2364c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2365c6fd2807SJeff Garzik  *
2366c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2367c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2368c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2369c6fd2807SJeff Garzik  *	on this port.
2370c6fd2807SJeff Garzik  *
2371cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2372c6fd2807SJeff Garzik  *	serialization.
2373c6fd2807SJeff Garzik  */
2374c6fd2807SJeff Garzik 
2375c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2376c6fd2807SJeff Garzik {
2377c6fd2807SJeff Garzik 	ap->device[0].class = ATA_DEV_NONE;
2378c6fd2807SJeff Garzik 	ap->device[1].class = ATA_DEV_NONE;
2379c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2380c6fd2807SJeff Garzik }
2381c6fd2807SJeff Garzik 
2382c6fd2807SJeff Garzik /**
2383c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2384c6fd2807SJeff Garzik  *	@ap: Port to adjust SATA spd limit for
2385c6fd2807SJeff Garzik  *
2386c6fd2807SJeff Garzik  *	Adjust SATA spd limit of @ap downward.  Note that this
2387c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2388c6fd2807SJeff Garzik  *	using sata_set_spd().
2389c6fd2807SJeff Garzik  *
2390c6fd2807SJeff Garzik  *	LOCKING:
2391c6fd2807SJeff Garzik  *	Inherited from caller.
2392c6fd2807SJeff Garzik  *
2393c6fd2807SJeff Garzik  *	RETURNS:
2394c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2395c6fd2807SJeff Garzik  */
2396c6fd2807SJeff Garzik int sata_down_spd_limit(struct ata_port *ap)
2397c6fd2807SJeff Garzik {
2398c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2399c6fd2807SJeff Garzik 	int rc, highbit;
2400c6fd2807SJeff Garzik 
2401c6fd2807SJeff Garzik 	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2402c6fd2807SJeff Garzik 	if (rc)
2403c6fd2807SJeff Garzik 		return rc;
2404c6fd2807SJeff Garzik 
2405c6fd2807SJeff Garzik 	mask = ap->sata_spd_limit;
2406c6fd2807SJeff Garzik 	if (mask <= 1)
2407c6fd2807SJeff Garzik 		return -EINVAL;
2408c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2409c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2410c6fd2807SJeff Garzik 
2411c6fd2807SJeff Garzik 	spd = (sstatus >> 4) & 0xf;
2412c6fd2807SJeff Garzik 	if (spd <= 1)
2413c6fd2807SJeff Garzik 		return -EINVAL;
2414c6fd2807SJeff Garzik 	spd--;
2415c6fd2807SJeff Garzik 	mask &= (1 << spd) - 1;
2416c6fd2807SJeff Garzik 	if (!mask)
2417c6fd2807SJeff Garzik 		return -EINVAL;
2418c6fd2807SJeff Garzik 
2419c6fd2807SJeff Garzik 	ap->sata_spd_limit = mask;
2420c6fd2807SJeff Garzik 
2421c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2422c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2423c6fd2807SJeff Garzik 
2424c6fd2807SJeff Garzik 	return 0;
2425c6fd2807SJeff Garzik }
2426c6fd2807SJeff Garzik 
2427c6fd2807SJeff Garzik static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2428c6fd2807SJeff Garzik {
2429c6fd2807SJeff Garzik 	u32 spd, limit;
2430c6fd2807SJeff Garzik 
2431c6fd2807SJeff Garzik 	if (ap->sata_spd_limit == UINT_MAX)
2432c6fd2807SJeff Garzik 		limit = 0;
2433c6fd2807SJeff Garzik 	else
2434c6fd2807SJeff Garzik 		limit = fls(ap->sata_spd_limit);
2435c6fd2807SJeff Garzik 
2436c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2437c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2438c6fd2807SJeff Garzik 
2439c6fd2807SJeff Garzik 	return spd != limit;
2440c6fd2807SJeff Garzik }
2441c6fd2807SJeff Garzik 
2442c6fd2807SJeff Garzik /**
2443c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2444c6fd2807SJeff Garzik  *	@ap: Port in question
2445c6fd2807SJeff Garzik  *
2446c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2447c6fd2807SJeff Garzik  *	@ap->sata_spd_limit.  This function is used to determine
2448c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2449c6fd2807SJeff Garzik  *	configuration.
2450c6fd2807SJeff Garzik  *
2451c6fd2807SJeff Garzik  *	LOCKING:
2452c6fd2807SJeff Garzik  *	Inherited from caller.
2453c6fd2807SJeff Garzik  *
2454c6fd2807SJeff Garzik  *	RETURNS:
2455c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2456c6fd2807SJeff Garzik  */
2457c6fd2807SJeff Garzik int sata_set_spd_needed(struct ata_port *ap)
2458c6fd2807SJeff Garzik {
2459c6fd2807SJeff Garzik 	u32 scontrol;
2460c6fd2807SJeff Garzik 
2461c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2462c6fd2807SJeff Garzik 		return 0;
2463c6fd2807SJeff Garzik 
2464c6fd2807SJeff Garzik 	return __sata_set_spd_needed(ap, &scontrol);
2465c6fd2807SJeff Garzik }
2466c6fd2807SJeff Garzik 
2467c6fd2807SJeff Garzik /**
2468c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2469c6fd2807SJeff Garzik  *	@ap: Port to set SATA spd for
2470c6fd2807SJeff Garzik  *
2471c6fd2807SJeff Garzik  *	Set SATA spd of @ap according to sata_spd_limit.
2472c6fd2807SJeff Garzik  *
2473c6fd2807SJeff Garzik  *	LOCKING:
2474c6fd2807SJeff Garzik  *	Inherited from caller.
2475c6fd2807SJeff Garzik  *
2476c6fd2807SJeff Garzik  *	RETURNS:
2477c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2478c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2479c6fd2807SJeff Garzik  */
2480c6fd2807SJeff Garzik int sata_set_spd(struct ata_port *ap)
2481c6fd2807SJeff Garzik {
2482c6fd2807SJeff Garzik 	u32 scontrol;
2483c6fd2807SJeff Garzik 	int rc;
2484c6fd2807SJeff Garzik 
2485c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2486c6fd2807SJeff Garzik 		return rc;
2487c6fd2807SJeff Garzik 
2488c6fd2807SJeff Garzik 	if (!__sata_set_spd_needed(ap, &scontrol))
2489c6fd2807SJeff Garzik 		return 0;
2490c6fd2807SJeff Garzik 
2491c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2492c6fd2807SJeff Garzik 		return rc;
2493c6fd2807SJeff Garzik 
2494c6fd2807SJeff Garzik 	return 1;
2495c6fd2807SJeff Garzik }
2496c6fd2807SJeff Garzik 
2497c6fd2807SJeff Garzik /*
2498c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2499c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2500c6fd2807SJeff Garzik  */
2501c6fd2807SJeff Garzik /*
2502b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2503c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2504b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2505b352e57dSAlan Cox  *
2506b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2507c6fd2807SJeff Garzik  */
2508c6fd2807SJeff Garzik 
2509c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2510c6fd2807SJeff Garzik 
2511c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2512c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2513c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2514c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2515c6fd2807SJeff Garzik 
2516b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2517b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2518c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2519c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2520c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2521c6fd2807SJeff Garzik 
2522c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2523c6fd2807SJeff Garzik 
2524c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2525c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2526c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2527c6fd2807SJeff Garzik 
2528c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2529c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2530c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2531c6fd2807SJeff Garzik 
2532b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2533b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2534c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2535c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2536c6fd2807SJeff Garzik 
2537c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2538c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2539c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2540c6fd2807SJeff Garzik 
2541c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2542c6fd2807SJeff Garzik 
2543c6fd2807SJeff Garzik 	{ 0xFF }
2544c6fd2807SJeff Garzik };
2545c6fd2807SJeff Garzik 
2546c6fd2807SJeff Garzik #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2547c6fd2807SJeff Garzik #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2548c6fd2807SJeff Garzik 
2549c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2550c6fd2807SJeff Garzik {
2551c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2552c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2553c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2554c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2555c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2556c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2557c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2558c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2559c6fd2807SJeff Garzik }
2560c6fd2807SJeff Garzik 
2561c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2562c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2563c6fd2807SJeff Garzik {
2564c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2565c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2566c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2567c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2568c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2569c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2570c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2571c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2572c6fd2807SJeff Garzik }
2573c6fd2807SJeff Garzik 
2574c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2575c6fd2807SJeff Garzik {
2576c6fd2807SJeff Garzik 	const struct ata_timing *t;
2577c6fd2807SJeff Garzik 
2578c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2579c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2580c6fd2807SJeff Garzik 			return NULL;
2581c6fd2807SJeff Garzik 	return t;
2582c6fd2807SJeff Garzik }
2583c6fd2807SJeff Garzik 
2584c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2585c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2586c6fd2807SJeff Garzik {
2587c6fd2807SJeff Garzik 	const struct ata_timing *s;
2588c6fd2807SJeff Garzik 	struct ata_timing p;
2589c6fd2807SJeff Garzik 
2590c6fd2807SJeff Garzik 	/*
2591c6fd2807SJeff Garzik 	 * Find the mode.
2592c6fd2807SJeff Garzik 	 */
2593c6fd2807SJeff Garzik 
2594c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2595c6fd2807SJeff Garzik 		return -EINVAL;
2596c6fd2807SJeff Garzik 
2597c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2598c6fd2807SJeff Garzik 
2599c6fd2807SJeff Garzik 	/*
2600c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2601c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2602c6fd2807SJeff Garzik 	 */
2603c6fd2807SJeff Garzik 
2604c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2605c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2606c6fd2807SJeff Garzik 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2607c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2608c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2609c6fd2807SJeff Garzik 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2610c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2611c6fd2807SJeff Garzik 		}
2612c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2613c6fd2807SJeff Garzik 	}
2614c6fd2807SJeff Garzik 
2615c6fd2807SJeff Garzik 	/*
2616c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2617c6fd2807SJeff Garzik 	 */
2618c6fd2807SJeff Garzik 
2619c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2620c6fd2807SJeff Garzik 
2621c6fd2807SJeff Garzik 	/*
2622c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2623c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2624c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2625c6fd2807SJeff Garzik 	 */
2626c6fd2807SJeff Garzik 
2627fd3367afSAlan 	if (speed > XFER_PIO_6) {
2628c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2629c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2630c6fd2807SJeff Garzik 	}
2631c6fd2807SJeff Garzik 
2632c6fd2807SJeff Garzik 	/*
2633c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2634c6fd2807SJeff Garzik 	 */
2635c6fd2807SJeff Garzik 
2636c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2637c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2638c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2639c6fd2807SJeff Garzik 	}
2640c6fd2807SJeff Garzik 
2641c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2642c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2643c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2644c6fd2807SJeff Garzik 	}
26454f701d1eSAlan Cox 
26464f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
26474f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
26484f701d1eSAlan Cox 	   if so we must correct this */
26494f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
26504f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2651c6fd2807SJeff Garzik 
2652c6fd2807SJeff Garzik 	return 0;
2653c6fd2807SJeff Garzik }
2654c6fd2807SJeff Garzik 
2655c6fd2807SJeff Garzik /**
2656c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2657c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2658458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2659c6fd2807SJeff Garzik  *
2660c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2661c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2662c6fd2807SJeff Garzik  *	will apply the limit.
2663c6fd2807SJeff Garzik  *
2664c6fd2807SJeff Garzik  *	LOCKING:
2665c6fd2807SJeff Garzik  *	Inherited from caller.
2666c6fd2807SJeff Garzik  *
2667c6fd2807SJeff Garzik  *	RETURNS:
2668c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2669c6fd2807SJeff Garzik  */
2670458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2671c6fd2807SJeff Garzik {
2672458337dbSTejun Heo 	char buf[32];
2673458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2674458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2675458337dbSTejun Heo 	int quiet, highbit;
2676c6fd2807SJeff Garzik 
2677458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2678458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2679458337dbSTejun Heo 
2680458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2681458337dbSTejun Heo 						  dev->mwdma_mask,
2682c6fd2807SJeff Garzik 						  dev->udma_mask);
2683458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2684c6fd2807SJeff Garzik 
2685458337dbSTejun Heo 	switch (sel) {
2686458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2687458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2688458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2689458337dbSTejun Heo 		break;
2690458337dbSTejun Heo 
2691458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2692458337dbSTejun Heo 		if (udma_mask) {
2693458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2694458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2695458337dbSTejun Heo 			if (!udma_mask)
2696458337dbSTejun Heo 				return -ENOENT;
2697458337dbSTejun Heo 		} else if (mwdma_mask) {
2698458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2699458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2700458337dbSTejun Heo 			if (!mwdma_mask)
2701458337dbSTejun Heo 				return -ENOENT;
2702458337dbSTejun Heo 		}
2703458337dbSTejun Heo 		break;
2704458337dbSTejun Heo 
2705458337dbSTejun Heo 	case ATA_DNXFER_40C:
2706458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2707458337dbSTejun Heo 		break;
2708458337dbSTejun Heo 
2709458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2710458337dbSTejun Heo 		pio_mask &= 1;
2711458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2712458337dbSTejun Heo 		mwdma_mask = 0;
2713458337dbSTejun Heo 		udma_mask = 0;
2714458337dbSTejun Heo 		break;
2715458337dbSTejun Heo 
2716458337dbSTejun Heo 	default:
2717458337dbSTejun Heo 		BUG();
2718458337dbSTejun Heo 	}
2719458337dbSTejun Heo 
2720458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2721458337dbSTejun Heo 
2722458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2723458337dbSTejun Heo 		return -ENOENT;
2724458337dbSTejun Heo 
2725458337dbSTejun Heo 	if (!quiet) {
2726458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2727458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2728458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2729458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2730458337dbSTejun Heo 		else
2731458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2732458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2733458337dbSTejun Heo 
2734458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2735458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2736458337dbSTejun Heo 	}
2737c6fd2807SJeff Garzik 
2738c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2739c6fd2807SJeff Garzik 			    &dev->udma_mask);
2740c6fd2807SJeff Garzik 
2741c6fd2807SJeff Garzik 	return 0;
2742c6fd2807SJeff Garzik }
2743c6fd2807SJeff Garzik 
2744c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2745c6fd2807SJeff Garzik {
2746baa1e78aSTejun Heo 	struct ata_eh_context *ehc = &dev->ap->eh_context;
2747c6fd2807SJeff Garzik 	unsigned int err_mask;
2748c6fd2807SJeff Garzik 	int rc;
2749c6fd2807SJeff Garzik 
2750c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2751c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2752c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2753c6fd2807SJeff Garzik 
2754c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
275511750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
275611750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
275711750a40SAlan         	err_mask &= ~AC_ERR_DEV;
275811750a40SAlan 
2759c6fd2807SJeff Garzik 	if (err_mask) {
2760c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2761c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2762c6fd2807SJeff Garzik 		return -EIO;
2763c6fd2807SJeff Garzik 	}
2764c6fd2807SJeff Garzik 
2765baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2766c6fd2807SJeff Garzik 	rc = ata_dev_revalidate(dev, 0);
2767baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2768c6fd2807SJeff Garzik 	if (rc)
2769c6fd2807SJeff Garzik 		return rc;
2770c6fd2807SJeff Garzik 
2771c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2772c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2773c6fd2807SJeff Garzik 
2774c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2775c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2776c6fd2807SJeff Garzik 	return 0;
2777c6fd2807SJeff Garzik }
2778c6fd2807SJeff Garzik 
2779c6fd2807SJeff Garzik /**
278004351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2781c6fd2807SJeff Garzik  *	@ap: port on which timings will be programmed
2782c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2783c6fd2807SJeff Garzik  *
278404351821SAlan  *	Standard implementation of the function used to tune and set
278504351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
278604351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2787c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2788c6fd2807SJeff Garzik  *
2789c6fd2807SJeff Garzik  *	LOCKING:
2790c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2791c6fd2807SJeff Garzik  *
2792c6fd2807SJeff Garzik  *	RETURNS:
2793c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2794c6fd2807SJeff Garzik  */
279504351821SAlan 
279604351821SAlan int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2797c6fd2807SJeff Garzik {
2798c6fd2807SJeff Garzik 	struct ata_device *dev;
2799c6fd2807SJeff Garzik 	int i, rc = 0, used_dma = 0, found = 0;
2800c6fd2807SJeff Garzik 
2801c6fd2807SJeff Garzik 
2802c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2803c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2804c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2805c6fd2807SJeff Garzik 
2806c6fd2807SJeff Garzik 		dev = &ap->device[i];
2807c6fd2807SJeff Garzik 
2808c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2809c6fd2807SJeff Garzik 			continue;
2810c6fd2807SJeff Garzik 
2811c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2812c6fd2807SJeff Garzik 
2813c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2814c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2815c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2816c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2817c6fd2807SJeff Garzik 
2818c6fd2807SJeff Garzik 		found = 1;
2819c6fd2807SJeff Garzik 		if (dev->dma_mode)
2820c6fd2807SJeff Garzik 			used_dma = 1;
2821c6fd2807SJeff Garzik 	}
2822c6fd2807SJeff Garzik 	if (!found)
2823c6fd2807SJeff Garzik 		goto out;
2824c6fd2807SJeff Garzik 
2825c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2826c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2827c6fd2807SJeff Garzik 		dev = &ap->device[i];
2828c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2829c6fd2807SJeff Garzik 			continue;
2830c6fd2807SJeff Garzik 
2831c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2832c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2833c6fd2807SJeff Garzik 			rc = -EINVAL;
2834c6fd2807SJeff Garzik 			goto out;
2835c6fd2807SJeff Garzik 		}
2836c6fd2807SJeff Garzik 
2837c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2838c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2839c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2840c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2841c6fd2807SJeff Garzik 	}
2842c6fd2807SJeff Garzik 
2843c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2844c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2845c6fd2807SJeff Garzik 		dev = &ap->device[i];
2846c6fd2807SJeff Garzik 
2847c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2848c6fd2807SJeff Garzik 			continue;
2849c6fd2807SJeff Garzik 
2850c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2851c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2852c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2853c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2854c6fd2807SJeff Garzik 	}
2855c6fd2807SJeff Garzik 
2856c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2857c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2858c6fd2807SJeff Garzik 		dev = &ap->device[i];
2859c6fd2807SJeff Garzik 
286018d90debSAlan 		/* don't update suspended devices' xfer mode */
2861c6fd2807SJeff Garzik 		if (!ata_dev_ready(dev))
2862c6fd2807SJeff Garzik 			continue;
2863c6fd2807SJeff Garzik 
2864c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
2865c6fd2807SJeff Garzik 		if (rc)
2866c6fd2807SJeff Garzik 			goto out;
2867c6fd2807SJeff Garzik 	}
2868c6fd2807SJeff Garzik 
2869c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
2870c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
2871c6fd2807SJeff Garzik 	 */
2872cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2873032af1ceSAlan 		ap->host->simplex_claimed = ap;
2874c6fd2807SJeff Garzik 
2875c6fd2807SJeff Garzik 	/* step5: chip specific finalisation */
2876c6fd2807SJeff Garzik 	if (ap->ops->post_set_mode)
2877c6fd2807SJeff Garzik 		ap->ops->post_set_mode(ap);
2878c6fd2807SJeff Garzik  out:
2879c6fd2807SJeff Garzik 	if (rc)
2880c6fd2807SJeff Garzik 		*r_failed_dev = dev;
2881c6fd2807SJeff Garzik 	return rc;
2882c6fd2807SJeff Garzik }
2883c6fd2807SJeff Garzik 
2884c6fd2807SJeff Garzik /**
288504351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
288604351821SAlan  *	@ap: port on which timings will be programmed
288704351821SAlan  *	@r_failed_dev: out paramter for failed device
288804351821SAlan  *
288904351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
289004351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
289104351821SAlan  *	returned in @r_failed_dev.
289204351821SAlan  *
289304351821SAlan  *	LOCKING:
289404351821SAlan  *	PCI/etc. bus probe sem.
289504351821SAlan  *
289604351821SAlan  *	RETURNS:
289704351821SAlan  *	0 on success, negative errno otherwise
289804351821SAlan  */
289904351821SAlan int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
290004351821SAlan {
290104351821SAlan 	/* has private set_mode? */
290204351821SAlan 	if (ap->ops->set_mode)
290304351821SAlan 		return ap->ops->set_mode(ap, r_failed_dev);
290404351821SAlan 	return ata_do_set_mode(ap, r_failed_dev);
290504351821SAlan }
290604351821SAlan 
290704351821SAlan /**
2908c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
2909c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
2910c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
2911c6fd2807SJeff Garzik  *
2912c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
2913c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
2914c6fd2807SJeff Garzik  *	other threads.
2915c6fd2807SJeff Garzik  *
2916c6fd2807SJeff Garzik  *	LOCKING:
2917cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
2918c6fd2807SJeff Garzik  */
2919c6fd2807SJeff Garzik 
2920c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
2921c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
2922c6fd2807SJeff Garzik {
2923c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
2924c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
2925c6fd2807SJeff Garzik }
2926c6fd2807SJeff Garzik 
2927c6fd2807SJeff Garzik /**
2928c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
2929c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
2930c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
2931c6fd2807SJeff Garzik  *	@tmout: overall timeout
2932c6fd2807SJeff Garzik  *
2933c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
2934c6fd2807SJeff Garzik  *	or a timeout occurs.
2935c6fd2807SJeff Garzik  *
2936d1adc1bbSTejun Heo  *	LOCKING:
2937d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
2938d1adc1bbSTejun Heo  *
2939d1adc1bbSTejun Heo  *	RETURNS:
2940d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
2941c6fd2807SJeff Garzik  */
2942d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
2943c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
2944c6fd2807SJeff Garzik {
2945c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
2946c6fd2807SJeff Garzik 	u8 status;
2947c6fd2807SJeff Garzik 
2948c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
2949c6fd2807SJeff Garzik 	timer_start = jiffies;
2950c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
2951d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2952d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2953c6fd2807SJeff Garzik 		msleep(50);
2954c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
2955c6fd2807SJeff Garzik 	}
2956c6fd2807SJeff Garzik 
2957d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
2958c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
295935aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
296035aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
2961c6fd2807SJeff Garzik 
2962c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
2963d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
2964d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
2965c6fd2807SJeff Garzik 		msleep(50);
2966c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
2967c6fd2807SJeff Garzik 	}
2968c6fd2807SJeff Garzik 
2969d1adc1bbSTejun Heo 	if (status == 0xff)
2970d1adc1bbSTejun Heo 		return -ENODEV;
2971d1adc1bbSTejun Heo 
2972c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
2973c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
297435aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
297535aa7a43SJeff Garzik 				tmout / HZ, status);
2976d1adc1bbSTejun Heo 		return -EBUSY;
2977c6fd2807SJeff Garzik 	}
2978c6fd2807SJeff Garzik 
2979c6fd2807SJeff Garzik 	return 0;
2980c6fd2807SJeff Garzik }
2981c6fd2807SJeff Garzik 
2982d4b2bab4STejun Heo /**
2983d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
2984d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
2985d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
2986d4b2bab4STejun Heo  *
2987d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
2988d4b2bab4STejun Heo  *	occurs.
2989d4b2bab4STejun Heo  *
2990d4b2bab4STejun Heo  *	LOCKING:
2991d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
2992d4b2bab4STejun Heo  *
2993d4b2bab4STejun Heo  *	RETURNS:
2994d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
2995d4b2bab4STejun Heo  */
2996d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
2997d4b2bab4STejun Heo {
2998d4b2bab4STejun Heo 	unsigned long start = jiffies;
2999d4b2bab4STejun Heo 	int warned = 0;
3000d4b2bab4STejun Heo 
3001d4b2bab4STejun Heo 	while (1) {
3002d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3003d4b2bab4STejun Heo 		unsigned long now = jiffies;
3004d4b2bab4STejun Heo 
3005d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3006d4b2bab4STejun Heo 			return 0;
3007d4b2bab4STejun Heo 		if (status == 0xff)
3008d4b2bab4STejun Heo 			return -ENODEV;
3009d4b2bab4STejun Heo 		if (time_after(now, deadline))
3010d4b2bab4STejun Heo 			return -EBUSY;
3011d4b2bab4STejun Heo 
3012d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3013d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3014d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3015d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3016d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3017d4b2bab4STejun Heo 			warned = 1;
3018d4b2bab4STejun Heo 		}
3019d4b2bab4STejun Heo 
3020d4b2bab4STejun Heo 		msleep(50);
3021d4b2bab4STejun Heo 	}
3022d4b2bab4STejun Heo }
3023d4b2bab4STejun Heo 
3024d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3025d4b2bab4STejun Heo 			      unsigned long deadline)
3026c6fd2807SJeff Garzik {
3027c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3028c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3029c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
30309b89391cSTejun Heo 	int rc, ret = 0;
3031c6fd2807SJeff Garzik 
3032c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3033c6fd2807SJeff Garzik 	 * BSY bit to clear
3034c6fd2807SJeff Garzik 	 */
3035d4b2bab4STejun Heo 	if (dev0) {
3036d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30379b89391cSTejun Heo 		if (rc) {
30389b89391cSTejun Heo 			if (rc != -ENODEV)
3039d4b2bab4STejun Heo 				return rc;
30409b89391cSTejun Heo 			ret = rc;
30419b89391cSTejun Heo 		}
3042d4b2bab4STejun Heo 	}
3043c6fd2807SJeff Garzik 
3044c6fd2807SJeff Garzik 	/* if device 1 was found in ata_devchk, wait for
3045c6fd2807SJeff Garzik 	 * register access, then wait for BSY to clear
3046c6fd2807SJeff Garzik 	 */
3047c6fd2807SJeff Garzik 	while (dev1) {
3048c6fd2807SJeff Garzik 		u8 nsect, lbal;
3049c6fd2807SJeff Garzik 
3050c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
30510d5ff566STejun Heo 		nsect = ioread8(ioaddr->nsect_addr);
30520d5ff566STejun Heo 		lbal = ioread8(ioaddr->lbal_addr);
3053c6fd2807SJeff Garzik 		if ((nsect == 1) && (lbal == 1))
3054c6fd2807SJeff Garzik 			break;
3055d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3056d4b2bab4STejun Heo 			return -EBUSY;
3057c6fd2807SJeff Garzik 		msleep(50);	/* give drive a breather */
3058c6fd2807SJeff Garzik 	}
3059d4b2bab4STejun Heo 	if (dev1) {
3060d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
30619b89391cSTejun Heo 		if (rc) {
30629b89391cSTejun Heo 			if (rc != -ENODEV)
3063d4b2bab4STejun Heo 				return rc;
30649b89391cSTejun Heo 			ret = rc;
30659b89391cSTejun Heo 		}
3066d4b2bab4STejun Heo 	}
3067c6fd2807SJeff Garzik 
3068c6fd2807SJeff Garzik 	/* is all this really necessary? */
3069c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3070c6fd2807SJeff Garzik 	if (dev1)
3071c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3072c6fd2807SJeff Garzik 	if (dev0)
3073c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3074d4b2bab4STejun Heo 
30759b89391cSTejun Heo 	return ret;
3076c6fd2807SJeff Garzik }
3077c6fd2807SJeff Garzik 
3078d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3079d4b2bab4STejun Heo 			     unsigned long deadline)
3080c6fd2807SJeff Garzik {
3081c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3082c6fd2807SJeff Garzik 
308344877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3084c6fd2807SJeff Garzik 
3085c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
30860d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3087c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
30880d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3089c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
30900d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3091c6fd2807SJeff Garzik 
3092c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3093c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3094c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3095c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3096c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3097c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3098c6fd2807SJeff Garzik 	 * delay here as well.
3099c6fd2807SJeff Garzik 	 *
3100c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3101c6fd2807SJeff Garzik 	 */
3102c6fd2807SJeff Garzik 	msleep(150);
3103c6fd2807SJeff Garzik 
3104c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3105c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3106c6fd2807SJeff Garzik 	 * pulldown resistor.
3107c6fd2807SJeff Garzik 	 */
3108d1adc1bbSTejun Heo 	if (ata_check_status(ap) == 0xFF)
31099b89391cSTejun Heo 		return -ENODEV;
3110c6fd2807SJeff Garzik 
3111d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3112c6fd2807SJeff Garzik }
3113c6fd2807SJeff Garzik 
3114c6fd2807SJeff Garzik /**
3115c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3116c6fd2807SJeff Garzik  *	@ap: port to reset
3117c6fd2807SJeff Garzik  *
3118c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3119c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3120c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3121c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3122c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3123c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3124c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3125c6fd2807SJeff Garzik  *
3126c6fd2807SJeff Garzik  *	LOCKING:
3127c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3128cca3974eSJeff Garzik  *	Obtains host lock.
3129c6fd2807SJeff Garzik  *
3130c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3131c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3132c6fd2807SJeff Garzik  */
3133c6fd2807SJeff Garzik 
3134c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3135c6fd2807SJeff Garzik {
3136c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3137c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3138c6fd2807SJeff Garzik 	u8 err;
3139c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
31409b89391cSTejun Heo 	int rc;
3141c6fd2807SJeff Garzik 
314244877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3143c6fd2807SJeff Garzik 
3144c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3145c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3146c6fd2807SJeff Garzik 		dev0 = 1;
3147c6fd2807SJeff Garzik 	else {
3148c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3149c6fd2807SJeff Garzik 		if (slave_possible)
3150c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3151c6fd2807SJeff Garzik 	}
3152c6fd2807SJeff Garzik 
3153c6fd2807SJeff Garzik 	if (dev0)
3154c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3155c6fd2807SJeff Garzik 	if (dev1)
3156c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3157c6fd2807SJeff Garzik 
3158c6fd2807SJeff Garzik 	/* select device 0 again */
3159c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3160c6fd2807SJeff Garzik 
3161c6fd2807SJeff Garzik 	/* issue bus reset */
31629b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
31639b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
31649b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3165c6fd2807SJeff Garzik 			goto err_out;
31669b89391cSTejun Heo 	}
3167c6fd2807SJeff Garzik 
3168c6fd2807SJeff Garzik 	/*
3169c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3170c6fd2807SJeff Garzik 	 */
3171c6fd2807SJeff Garzik 	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3172c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
3173c6fd2807SJeff Garzik 		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3174c6fd2807SJeff Garzik 
3175c6fd2807SJeff Garzik 	/* re-enable interrupts */
317683625006SAkira Iguchi 	ap->ops->irq_on(ap);
3177c6fd2807SJeff Garzik 
3178c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3179c6fd2807SJeff Garzik 	if (ap->device[1].class != ATA_DEV_NONE)
3180c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3181c6fd2807SJeff Garzik 	if (ap->device[0].class != ATA_DEV_NONE)
3182c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3183c6fd2807SJeff Garzik 
3184c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
3185c6fd2807SJeff Garzik 	if ((ap->device[0].class == ATA_DEV_NONE) &&
3186c6fd2807SJeff Garzik 	    (ap->device[1].class == ATA_DEV_NONE))
3187c6fd2807SJeff Garzik 		goto err_out;
3188c6fd2807SJeff Garzik 
3189c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3190c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
31910d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3192c6fd2807SJeff Garzik 	}
3193c6fd2807SJeff Garzik 
3194c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3195c6fd2807SJeff Garzik 	return;
3196c6fd2807SJeff Garzik 
3197c6fd2807SJeff Garzik err_out:
3198c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3199c6fd2807SJeff Garzik 	ap->ops->port_disable(ap);
3200c6fd2807SJeff Garzik 
3201c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3202c6fd2807SJeff Garzik }
3203c6fd2807SJeff Garzik 
3204c6fd2807SJeff Garzik /**
3205c6fd2807SJeff Garzik  *	sata_phy_debounce - debounce SATA phy status
3206c6fd2807SJeff Garzik  *	@ap: ATA port to debounce SATA phy status for
3207c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3208d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3209c6fd2807SJeff Garzik  *
3210c6fd2807SJeff Garzik  *	Make sure SStatus of @ap reaches stable state, determined by
3211c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3212c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3213d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3214d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3215c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3216c6fd2807SJeff Garzik  *
3217d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3218d4b2bab4STejun Heo  *	two is used.
3219d4b2bab4STejun Heo  *
3220c6fd2807SJeff Garzik  *	LOCKING:
3221c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3222c6fd2807SJeff Garzik  *
3223c6fd2807SJeff Garzik  *	RETURNS:
3224c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3225c6fd2807SJeff Garzik  */
3226d4b2bab4STejun Heo int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3227d4b2bab4STejun Heo 		      unsigned long deadline)
3228c6fd2807SJeff Garzik {
3229c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3230d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3231d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3232c6fd2807SJeff Garzik 	u32 last, cur;
3233c6fd2807SJeff Garzik 	int rc;
3234c6fd2807SJeff Garzik 
3235d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3236d4b2bab4STejun Heo 	if (time_before(t, deadline))
3237d4b2bab4STejun Heo 		deadline = t;
3238d4b2bab4STejun Heo 
3239c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3240c6fd2807SJeff Garzik 		return rc;
3241c6fd2807SJeff Garzik 	cur &= 0xf;
3242c6fd2807SJeff Garzik 
3243c6fd2807SJeff Garzik 	last = cur;
3244c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3245c6fd2807SJeff Garzik 
3246c6fd2807SJeff Garzik 	while (1) {
3247c6fd2807SJeff Garzik 		msleep(interval_msec);
3248c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3249c6fd2807SJeff Garzik 			return rc;
3250c6fd2807SJeff Garzik 		cur &= 0xf;
3251c6fd2807SJeff Garzik 
3252c6fd2807SJeff Garzik 		/* DET stable? */
3253c6fd2807SJeff Garzik 		if (cur == last) {
3254d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3255c6fd2807SJeff Garzik 				continue;
3256c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3257c6fd2807SJeff Garzik 				return 0;
3258c6fd2807SJeff Garzik 			continue;
3259c6fd2807SJeff Garzik 		}
3260c6fd2807SJeff Garzik 
3261c6fd2807SJeff Garzik 		/* unstable, start over */
3262c6fd2807SJeff Garzik 		last = cur;
3263c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3264c6fd2807SJeff Garzik 
3265d4b2bab4STejun Heo 		/* check deadline */
3266d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3267c6fd2807SJeff Garzik 			return -EBUSY;
3268c6fd2807SJeff Garzik 	}
3269c6fd2807SJeff Garzik }
3270c6fd2807SJeff Garzik 
3271c6fd2807SJeff Garzik /**
3272c6fd2807SJeff Garzik  *	sata_phy_resume - resume SATA phy
3273c6fd2807SJeff Garzik  *	@ap: ATA port to resume SATA phy for
3274c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3275d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3276c6fd2807SJeff Garzik  *
3277c6fd2807SJeff Garzik  *	Resume SATA phy of @ap and debounce it.
3278c6fd2807SJeff Garzik  *
3279c6fd2807SJeff Garzik  *	LOCKING:
3280c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3281c6fd2807SJeff Garzik  *
3282c6fd2807SJeff Garzik  *	RETURNS:
3283c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3284c6fd2807SJeff Garzik  */
3285d4b2bab4STejun Heo int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3286d4b2bab4STejun Heo 		    unsigned long deadline)
3287c6fd2807SJeff Garzik {
3288c6fd2807SJeff Garzik 	u32 scontrol;
3289c6fd2807SJeff Garzik 	int rc;
3290c6fd2807SJeff Garzik 
3291c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3292c6fd2807SJeff Garzik 		return rc;
3293c6fd2807SJeff Garzik 
3294c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3295c6fd2807SJeff Garzik 
3296c6fd2807SJeff Garzik 	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3297c6fd2807SJeff Garzik 		return rc;
3298c6fd2807SJeff Garzik 
3299c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3300c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3301c6fd2807SJeff Garzik 	 */
3302c6fd2807SJeff Garzik 	msleep(200);
3303c6fd2807SJeff Garzik 
3304d4b2bab4STejun Heo 	return sata_phy_debounce(ap, params, deadline);
3305c6fd2807SJeff Garzik }
3306c6fd2807SJeff Garzik 
3307c6fd2807SJeff Garzik /**
3308c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3309c6fd2807SJeff Garzik  *	@ap: ATA port to be reset
3310d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3311c6fd2807SJeff Garzik  *
3312b8cffc6aSTejun Heo  *	@ap is about to be reset.  Initialize it.  Failure from
3313b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3314b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3315b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3316b8cffc6aSTejun Heo  *	should just whine, not fail.
3317c6fd2807SJeff Garzik  *
3318c6fd2807SJeff Garzik  *	LOCKING:
3319c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3320c6fd2807SJeff Garzik  *
3321c6fd2807SJeff Garzik  *	RETURNS:
3322c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3323c6fd2807SJeff Garzik  */
3324d4b2bab4STejun Heo int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3325c6fd2807SJeff Garzik {
3326c6fd2807SJeff Garzik 	struct ata_eh_context *ehc = &ap->eh_context;
3327c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3328c6fd2807SJeff Garzik 	int rc;
3329c6fd2807SJeff Garzik 
333031daabdaSTejun Heo 	/* handle link resume */
3331c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3332c6fd2807SJeff Garzik 	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3333c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3334c6fd2807SJeff Garzik 
3335c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3336c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3337c6fd2807SJeff Garzik 		return 0;
3338c6fd2807SJeff Garzik 
3339c6fd2807SJeff Garzik 	/* if SATA, resume phy */
3340c6fd2807SJeff Garzik 	if (ap->cbl == ATA_CBL_SATA) {
3341d4b2bab4STejun Heo 		rc = sata_phy_resume(ap, timing, deadline);
3342b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3343b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3344c6fd2807SJeff Garzik 			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3345c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3346c6fd2807SJeff Garzik 	}
3347c6fd2807SJeff Garzik 
3348c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3349c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3350c6fd2807SJeff Garzik 	 */
3351b8cffc6aSTejun Heo 	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3352b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
3353b8cffc6aSTejun Heo 		if (rc) {
3354b8cffc6aSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "device not ready "
3355b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3356b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3357b8cffc6aSTejun Heo 		}
3358b8cffc6aSTejun Heo 	}
3359c6fd2807SJeff Garzik 
3360c6fd2807SJeff Garzik 	return 0;
3361c6fd2807SJeff Garzik }
3362c6fd2807SJeff Garzik 
3363c6fd2807SJeff Garzik /**
3364c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3365c6fd2807SJeff Garzik  *	@ap: port to reset
3366c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3367d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3368c6fd2807SJeff Garzik  *
3369c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3370c6fd2807SJeff Garzik  *
3371c6fd2807SJeff Garzik  *	LOCKING:
3372c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3373c6fd2807SJeff Garzik  *
3374c6fd2807SJeff Garzik  *	RETURNS:
3375c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3376c6fd2807SJeff Garzik  */
3377d4b2bab4STejun Heo int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3378d4b2bab4STejun Heo 		      unsigned long deadline)
3379c6fd2807SJeff Garzik {
3380c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3381d4b2bab4STejun Heo 	unsigned int devmask = 0;
3382d4b2bab4STejun Heo 	int rc;
3383c6fd2807SJeff Garzik 	u8 err;
3384c6fd2807SJeff Garzik 
3385c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3386c6fd2807SJeff Garzik 
3387c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3388c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3389c6fd2807SJeff Garzik 		goto out;
3390c6fd2807SJeff Garzik 	}
3391c6fd2807SJeff Garzik 
3392c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3393c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3394c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3395c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3396c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3397c6fd2807SJeff Garzik 
3398c6fd2807SJeff Garzik 	/* select device 0 again */
3399c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3400c6fd2807SJeff Garzik 
3401c6fd2807SJeff Garzik 	/* issue bus reset */
3402c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3403d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
34049b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
34059b89391cSTejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3406d4b2bab4STejun Heo 		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3407d4b2bab4STejun Heo 		return rc;
3408c6fd2807SJeff Garzik 	}
3409c6fd2807SJeff Garzik 
3410c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
3411c6fd2807SJeff Garzik 	classes[0] = ata_dev_try_classify(ap, 0, &err);
3412c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
3413c6fd2807SJeff Garzik 		classes[1] = ata_dev_try_classify(ap, 1, &err);
3414c6fd2807SJeff Garzik 
3415c6fd2807SJeff Garzik  out:
3416c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3417c6fd2807SJeff Garzik 	return 0;
3418c6fd2807SJeff Garzik }
3419c6fd2807SJeff Garzik 
3420c6fd2807SJeff Garzik /**
3421b6103f6dSTejun Heo  *	sata_port_hardreset - reset port via SATA phy reset
3422c6fd2807SJeff Garzik  *	@ap: port to reset
3423b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3424d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3425c6fd2807SJeff Garzik  *
3426c6fd2807SJeff Garzik  *	SATA phy-reset host port using DET bits of SControl register.
3427c6fd2807SJeff Garzik  *
3428c6fd2807SJeff Garzik  *	LOCKING:
3429c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3430c6fd2807SJeff Garzik  *
3431c6fd2807SJeff Garzik  *	RETURNS:
3432c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3433c6fd2807SJeff Garzik  */
3434d4b2bab4STejun Heo int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3435d4b2bab4STejun Heo 			unsigned long deadline)
3436c6fd2807SJeff Garzik {
3437c6fd2807SJeff Garzik 	u32 scontrol;
3438c6fd2807SJeff Garzik 	int rc;
3439c6fd2807SJeff Garzik 
3440c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3441c6fd2807SJeff Garzik 
3442c6fd2807SJeff Garzik 	if (sata_set_spd_needed(ap)) {
3443c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3444c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3445c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3446c6fd2807SJeff Garzik 		 * and Sil3124.
3447c6fd2807SJeff Garzik 		 */
3448c6fd2807SJeff Garzik 		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3449b6103f6dSTejun Heo 			goto out;
3450c6fd2807SJeff Garzik 
3451cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3452c6fd2807SJeff Garzik 
3453c6fd2807SJeff Garzik 		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3454b6103f6dSTejun Heo 			goto out;
3455c6fd2807SJeff Garzik 
3456c6fd2807SJeff Garzik 		sata_set_spd(ap);
3457c6fd2807SJeff Garzik 	}
3458c6fd2807SJeff Garzik 
3459c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3460c6fd2807SJeff Garzik 	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3461b6103f6dSTejun Heo 		goto out;
3462c6fd2807SJeff Garzik 
3463c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3464c6fd2807SJeff Garzik 
3465c6fd2807SJeff Garzik 	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3466b6103f6dSTejun Heo 		goto out;
3467c6fd2807SJeff Garzik 
3468c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3469c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3470c6fd2807SJeff Garzik 	 */
3471c6fd2807SJeff Garzik 	msleep(1);
3472c6fd2807SJeff Garzik 
3473c6fd2807SJeff Garzik 	/* bring phy back */
3474d4b2bab4STejun Heo 	rc = sata_phy_resume(ap, timing, deadline);
3475b6103f6dSTejun Heo  out:
3476b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3477b6103f6dSTejun Heo 	return rc;
3478b6103f6dSTejun Heo }
3479b6103f6dSTejun Heo 
3480b6103f6dSTejun Heo /**
3481b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3482b6103f6dSTejun Heo  *	@ap: port to reset
3483b6103f6dSTejun Heo  *	@class: resulting class of attached device
3484d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3485b6103f6dSTejun Heo  *
3486b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3487b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3488b6103f6dSTejun Heo  *
3489b6103f6dSTejun Heo  *	LOCKING:
3490b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3491b6103f6dSTejun Heo  *
3492b6103f6dSTejun Heo  *	RETURNS:
3493b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3494b6103f6dSTejun Heo  */
3495d4b2bab4STejun Heo int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3496d4b2bab4STejun Heo 		       unsigned long deadline)
3497b6103f6dSTejun Heo {
3498b6103f6dSTejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3499b6103f6dSTejun Heo 	int rc;
3500b6103f6dSTejun Heo 
3501b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3502b6103f6dSTejun Heo 
3503b6103f6dSTejun Heo 	/* do hardreset */
3504d4b2bab4STejun Heo 	rc = sata_port_hardreset(ap, timing, deadline);
3505b6103f6dSTejun Heo 	if (rc) {
3506b6103f6dSTejun Heo 		ata_port_printk(ap, KERN_ERR,
3507b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3508b6103f6dSTejun Heo 		return rc;
3509b6103f6dSTejun Heo 	}
3510c6fd2807SJeff Garzik 
3511c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3512c6fd2807SJeff Garzik 	if (ata_port_offline(ap)) {
3513c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3514c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3515c6fd2807SJeff Garzik 		return 0;
3516c6fd2807SJeff Garzik 	}
3517c6fd2807SJeff Garzik 
351834fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
351934fee227STejun Heo 	msleep(150);
352034fee227STejun Heo 
3521d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
35229b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
35239b89391cSTejun Heo 	if (rc) {
3524c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR,
3525d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3526d4b2bab4STejun Heo 		return rc;
3527c6fd2807SJeff Garzik 	}
3528c6fd2807SJeff Garzik 
3529c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3530c6fd2807SJeff Garzik 
3531c6fd2807SJeff Garzik 	*class = ata_dev_try_classify(ap, 0, NULL);
3532c6fd2807SJeff Garzik 
3533c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3534c6fd2807SJeff Garzik 	return 0;
3535c6fd2807SJeff Garzik }
3536c6fd2807SJeff Garzik 
3537c6fd2807SJeff Garzik /**
3538c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3539c6fd2807SJeff Garzik  *	@ap: the target ata_port
3540c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3541c6fd2807SJeff Garzik  *
3542c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3543c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3544c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3545c6fd2807SJeff Garzik  *
3546c6fd2807SJeff Garzik  *	LOCKING:
3547c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3548c6fd2807SJeff Garzik  */
3549c6fd2807SJeff Garzik void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3550c6fd2807SJeff Garzik {
3551c6fd2807SJeff Garzik 	u32 serror;
3552c6fd2807SJeff Garzik 
3553c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3554c6fd2807SJeff Garzik 
3555c6fd2807SJeff Garzik 	/* print link status */
3556c6fd2807SJeff Garzik 	sata_print_link_status(ap);
3557c6fd2807SJeff Garzik 
3558c6fd2807SJeff Garzik 	/* clear SError */
3559c6fd2807SJeff Garzik 	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3560c6fd2807SJeff Garzik 		sata_scr_write(ap, SCR_ERROR, serror);
3561c6fd2807SJeff Garzik 
3562c6fd2807SJeff Garzik 	/* re-enable interrupts */
356383625006SAkira Iguchi 	if (!ap->ops->error_handler)
356483625006SAkira Iguchi 		ap->ops->irq_on(ap);
3565c6fd2807SJeff Garzik 
3566c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3567c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3568c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3569c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3570c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3571c6fd2807SJeff Garzik 
3572c6fd2807SJeff Garzik 	/* bail out if no device is present */
3573c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3574c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3575c6fd2807SJeff Garzik 		return;
3576c6fd2807SJeff Garzik 	}
3577c6fd2807SJeff Garzik 
3578c6fd2807SJeff Garzik 	/* set up device control */
35790d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
35800d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3581c6fd2807SJeff Garzik 
3582c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3583c6fd2807SJeff Garzik }
3584c6fd2807SJeff Garzik 
3585c6fd2807SJeff Garzik /**
3586c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3587c6fd2807SJeff Garzik  *	@dev: device to compare against
3588c6fd2807SJeff Garzik  *	@new_class: class of the new device
3589c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3590c6fd2807SJeff Garzik  *
3591c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3592c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3593c6fd2807SJeff Garzik  *	@new_id.
3594c6fd2807SJeff Garzik  *
3595c6fd2807SJeff Garzik  *	LOCKING:
3596c6fd2807SJeff Garzik  *	None.
3597c6fd2807SJeff Garzik  *
3598c6fd2807SJeff Garzik  *	RETURNS:
3599c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3600c6fd2807SJeff Garzik  */
3601c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3602c6fd2807SJeff Garzik 			       const u16 *new_id)
3603c6fd2807SJeff Garzik {
3604c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3605a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3606a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3607c6fd2807SJeff Garzik 	u64 new_n_sectors;
3608c6fd2807SJeff Garzik 
3609c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3610c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3611c6fd2807SJeff Garzik 			       dev->class, new_class);
3612c6fd2807SJeff Garzik 		return 0;
3613c6fd2807SJeff Garzik 	}
3614c6fd2807SJeff Garzik 
3615a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3616a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3617a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3618a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3619c6fd2807SJeff Garzik 	new_n_sectors = ata_id_n_sectors(new_id);
3620c6fd2807SJeff Garzik 
3621c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3622c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3623c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3624c6fd2807SJeff Garzik 		return 0;
3625c6fd2807SJeff Garzik 	}
3626c6fd2807SJeff Garzik 
3627c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3628c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3629c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3630c6fd2807SJeff Garzik 		return 0;
3631c6fd2807SJeff Garzik 	}
3632c6fd2807SJeff Garzik 
3633c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
3634c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3635c6fd2807SJeff Garzik 			       "%llu != %llu\n",
3636c6fd2807SJeff Garzik 			       (unsigned long long)dev->n_sectors,
3637c6fd2807SJeff Garzik 			       (unsigned long long)new_n_sectors);
36381e999736SAlan Cox 		/* Are we the boot time size - if so we appear to be the
36391e999736SAlan Cox 		   same disk at this point and our HPA got reapplied */
36401e999736SAlan Cox 		if (ata_ignore_hpa && dev->n_sectors_boot == new_n_sectors
36411e999736SAlan Cox 		    && ata_id_hpa_enabled(new_id))
36421e999736SAlan Cox 			return 1;
3643c6fd2807SJeff Garzik 		return 0;
3644c6fd2807SJeff Garzik 	}
3645c6fd2807SJeff Garzik 
3646c6fd2807SJeff Garzik 	return 1;
3647c6fd2807SJeff Garzik }
3648c6fd2807SJeff Garzik 
3649c6fd2807SJeff Garzik /**
3650c6fd2807SJeff Garzik  *	ata_dev_revalidate - Revalidate ATA device
3651c6fd2807SJeff Garzik  *	@dev: device to revalidate
3652bff04647STejun Heo  *	@readid_flags: read ID flags
3653c6fd2807SJeff Garzik  *
3654c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3655c6fd2807SJeff Garzik  *	the port.
3656c6fd2807SJeff Garzik  *
3657c6fd2807SJeff Garzik  *	LOCKING:
3658c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3659c6fd2807SJeff Garzik  *
3660c6fd2807SJeff Garzik  *	RETURNS:
3661c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3662c6fd2807SJeff Garzik  */
3663bff04647STejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3664c6fd2807SJeff Garzik {
3665c6fd2807SJeff Garzik 	unsigned int class = dev->class;
3666c6fd2807SJeff Garzik 	u16 *id = (void *)dev->ap->sector_buf;
3667c6fd2807SJeff Garzik 	int rc;
3668c6fd2807SJeff Garzik 
3669c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev)) {
3670c6fd2807SJeff Garzik 		rc = -ENODEV;
3671c6fd2807SJeff Garzik 		goto fail;
3672c6fd2807SJeff Garzik 	}
3673c6fd2807SJeff Garzik 
3674c6fd2807SJeff Garzik 	/* read ID data */
3675bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3676c6fd2807SJeff Garzik 	if (rc)
3677c6fd2807SJeff Garzik 		goto fail;
3678c6fd2807SJeff Garzik 
3679c6fd2807SJeff Garzik 	/* is the device still there? */
3680c6fd2807SJeff Garzik 	if (!ata_dev_same_device(dev, class, id)) {
3681c6fd2807SJeff Garzik 		rc = -ENODEV;
3682c6fd2807SJeff Garzik 		goto fail;
3683c6fd2807SJeff Garzik 	}
3684c6fd2807SJeff Garzik 
3685c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3686c6fd2807SJeff Garzik 
3687c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3688efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
3689c6fd2807SJeff Garzik 	if (rc == 0)
3690c6fd2807SJeff Garzik 		return 0;
3691c6fd2807SJeff Garzik 
3692c6fd2807SJeff Garzik  fail:
3693c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3694c6fd2807SJeff Garzik 	return rc;
3695c6fd2807SJeff Garzik }
3696c6fd2807SJeff Garzik 
36976919a0a6SAlan Cox struct ata_blacklist_entry {
36986919a0a6SAlan Cox 	const char *model_num;
36996919a0a6SAlan Cox 	const char *model_rev;
37006919a0a6SAlan Cox 	unsigned long horkage;
37016919a0a6SAlan Cox };
37026919a0a6SAlan Cox 
37036919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
37046919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
37056919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
37066919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
37076919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
37086919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
37096919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
37106919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
37116919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
37126919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
37136919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
37146919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
37156919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
37166919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
37176919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
37186919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
37196919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
37206919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
37216919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
37226919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
37236919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
37246919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
37256919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
37266919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
37276919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
37286919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
37296919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
37306919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
37316919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
37326919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
37336919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
37346919a0a6SAlan Cox 
373518d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
37366f23a31dSAlbert Lee 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 |
37376f23a31dSAlbert Lee 						ATA_HORKAGE_DMA_RW_ONLY },
373818d6e9d5SAlbert Lee 
37396919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
37406919a0a6SAlan Cox 
37416919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
37426919a0a6SAlan Cox 	/* NCQ is slow */
37436919a0a6SAlan Cox         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
374409125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
374509125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
37467acfaf30SPaul Rolland 	/* NCQ is broken */
37477acfaf30SPaul Rolland 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
374896442925SJens Axboe 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
374996442925SJens Axboe 	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
375036e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
375136e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
375236e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
375336e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
375436e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
37556919a0a6SAlan Cox 
37566919a0a6SAlan Cox 	/* Devices with NCQ limits */
37576919a0a6SAlan Cox 
37586919a0a6SAlan Cox 	/* End Marker */
37596919a0a6SAlan Cox 	{ }
3760c6fd2807SJeff Garzik };
3761c6fd2807SJeff Garzik 
37626919a0a6SAlan Cox unsigned long ata_device_blacklisted(const struct ata_device *dev)
3763c6fd2807SJeff Garzik {
37648bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
37658bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
37666919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3767c6fd2807SJeff Garzik 
37688bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
37698bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3770c6fd2807SJeff Garzik 
37716919a0a6SAlan Cox 	while (ad->model_num) {
37728bfa79fcSTejun Heo 		if (!strcmp(ad->model_num, model_num)) {
37736919a0a6SAlan Cox 			if (ad->model_rev == NULL)
37746919a0a6SAlan Cox 				return ad->horkage;
37758bfa79fcSTejun Heo 			if (!strcmp(ad->model_rev, model_rev))
37766919a0a6SAlan Cox 				return ad->horkage;
3777c6fd2807SJeff Garzik 		}
37786919a0a6SAlan Cox 		ad++;
3779c6fd2807SJeff Garzik 	}
3780c6fd2807SJeff Garzik 	return 0;
3781c6fd2807SJeff Garzik }
3782c6fd2807SJeff Garzik 
37836919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
37846919a0a6SAlan Cox {
37856919a0a6SAlan Cox 	/* We don't support polling DMA.
37866919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
37876919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
37886919a0a6SAlan Cox 	 */
37896919a0a6SAlan Cox 	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
37906919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
37916919a0a6SAlan Cox 		return 1;
37926919a0a6SAlan Cox 	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
37936919a0a6SAlan Cox }
37946919a0a6SAlan Cox 
3795c6fd2807SJeff Garzik /**
3796c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
3797c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
3798c6fd2807SJeff Garzik  *
3799c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
3800c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
3801c6fd2807SJeff Garzik  *	known limits including host controller limits, device
3802c6fd2807SJeff Garzik  *	blacklist, etc...
3803c6fd2807SJeff Garzik  *
3804c6fd2807SJeff Garzik  *	LOCKING:
3805c6fd2807SJeff Garzik  *	None.
3806c6fd2807SJeff Garzik  */
3807c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
3808c6fd2807SJeff Garzik {
3809c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
3810cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
3811c6fd2807SJeff Garzik 	unsigned long xfer_mask;
3812c6fd2807SJeff Garzik 
3813c6fd2807SJeff Garzik 	/* controller modes available */
3814c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3815c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
3816c6fd2807SJeff Garzik 
38178343f889SRobert Hancock 	/* drive modes available */
3818c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3819c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
3820c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
3821c6fd2807SJeff Garzik 
3822b352e57dSAlan Cox 	/*
3823b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3824b352e57dSAlan Cox 	 *	cable
3825b352e57dSAlan Cox 	 */
3826b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
3827b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
3828b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3829b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
3830b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3831b352e57dSAlan Cox 	}
3832b352e57dSAlan Cox 
3833c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
3834c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3835c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
3836c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
3837c6fd2807SJeff Garzik 	}
3838c6fd2807SJeff Garzik 
383914d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
384014d66ab7SPetr Vandrovec             host->simplex_claimed && host->simplex_claimed != ap) {
3841c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3842c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3843c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
3844c6fd2807SJeff Garzik 	}
3845c6fd2807SJeff Garzik 
3846e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
3847e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
3848e424675fSJeff Garzik 
3849c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
3850a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3851c6fd2807SJeff Garzik 
38528343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
38538343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
38548343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
38558343f889SRobert Hancock 	 * solely limited by the cable.
38568343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
38578343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
38588343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
38598343f889SRobert Hancock 	 */
38608343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
38618343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
38628343f889SRobert Hancock 		if((ap->cbl == ATA_CBL_PATA40) ||
38638343f889SRobert Hancock    		    (ata_drive_40wire(dev->id) &&
38648343f889SRobert Hancock 		     (ap->cbl == ATA_CBL_PATA_UNK ||
38658343f889SRobert Hancock                      ap->cbl == ATA_CBL_PATA80))) {
38668343f889SRobert Hancock 		      	ata_dev_printk(dev, KERN_WARNING,
38678343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
38688343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
38698343f889SRobert Hancock 		}
38708343f889SRobert Hancock 
3871c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3872c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
3873c6fd2807SJeff Garzik }
3874c6fd2807SJeff Garzik 
3875c6fd2807SJeff Garzik /**
3876c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3877c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3878c6fd2807SJeff Garzik  *
3879c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
3880c6fd2807SJeff Garzik  *	on port @ap.
3881c6fd2807SJeff Garzik  *
3882c6fd2807SJeff Garzik  *	LOCKING:
3883c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3884c6fd2807SJeff Garzik  *
3885c6fd2807SJeff Garzik  *	RETURNS:
3886c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3887c6fd2807SJeff Garzik  */
3888c6fd2807SJeff Garzik 
3889c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3890c6fd2807SJeff Garzik {
3891c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3892c6fd2807SJeff Garzik 	unsigned int err_mask;
3893c6fd2807SJeff Garzik 
3894c6fd2807SJeff Garzik 	/* set up set-features taskfile */
3895c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
3896c6fd2807SJeff Garzik 
3897c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3898c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
3899c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
3900c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3901c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3902c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
3903c6fd2807SJeff Garzik 
3904c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3905c6fd2807SJeff Garzik 
3906c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3907c6fd2807SJeff Garzik 	return err_mask;
3908c6fd2807SJeff Garzik }
3909c6fd2807SJeff Garzik 
3910c6fd2807SJeff Garzik /**
3911c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
3912c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
3913c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
3914c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
3915c6fd2807SJeff Garzik  *
3916c6fd2807SJeff Garzik  *	LOCKING:
3917c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3918c6fd2807SJeff Garzik  *
3919c6fd2807SJeff Garzik  *	RETURNS:
3920c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
3921c6fd2807SJeff Garzik  */
3922c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
3923c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
3924c6fd2807SJeff Garzik {
3925c6fd2807SJeff Garzik 	struct ata_taskfile tf;
3926c6fd2807SJeff Garzik 	unsigned int err_mask;
3927c6fd2807SJeff Garzik 
3928c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
3929c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3930c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
3931c6fd2807SJeff Garzik 
3932c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
3933c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
3934c6fd2807SJeff Garzik 
3935c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
3936c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3937c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3938c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
3939c6fd2807SJeff Garzik 	tf.nsect = sectors;
3940c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3941c6fd2807SJeff Garzik 
3942c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3943c6fd2807SJeff Garzik 
3944c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3945c6fd2807SJeff Garzik 	return err_mask;
3946c6fd2807SJeff Garzik }
3947c6fd2807SJeff Garzik 
3948c6fd2807SJeff Garzik /**
3949c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
3950c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
3951c6fd2807SJeff Garzik  *
3952c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
3953c6fd2807SJeff Garzik  *
3954c6fd2807SJeff Garzik  *	LOCKING:
3955cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3956c6fd2807SJeff Garzik  */
395770e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
3958c6fd2807SJeff Garzik {
3959c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
3960c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
3961c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
3962c6fd2807SJeff Garzik 	void *pad_buf = NULL;
3963c6fd2807SJeff Garzik 
3964c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3965c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
3966c6fd2807SJeff Garzik 
3967c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
3968c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
3969c6fd2807SJeff Garzik 
3970c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3971c6fd2807SJeff Garzik 
3972c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
3973c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
3974c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
3975c6fd2807SJeff Garzik 	 */
3976c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3977c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3978c6fd2807SJeff Garzik 
3979c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
3980c6fd2807SJeff Garzik 		if (qc->n_elem)
3981c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3982c6fd2807SJeff Garzik 		/* restore last sg */
3983c6fd2807SJeff Garzik 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
3984c6fd2807SJeff Garzik 		if (pad_buf) {
3985c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
3986c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
3987c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3988c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
3989c6fd2807SJeff Garzik 		}
3990c6fd2807SJeff Garzik 	} else {
3991c6fd2807SJeff Garzik 		if (qc->n_elem)
3992c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
3993c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3994c6fd2807SJeff Garzik 				dir);
3995c6fd2807SJeff Garzik 		/* restore sg */
3996c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
3997c6fd2807SJeff Garzik 		if (pad_buf)
3998c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
3999c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4000c6fd2807SJeff Garzik 	}
4001c6fd2807SJeff Garzik 
4002c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4003c6fd2807SJeff Garzik 	qc->__sg = NULL;
4004c6fd2807SJeff Garzik }
4005c6fd2807SJeff Garzik 
4006c6fd2807SJeff Garzik /**
4007c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4008c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4009c6fd2807SJeff Garzik  *
4010c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4011c6fd2807SJeff Garzik  *	associated with the current disk command.
4012c6fd2807SJeff Garzik  *
4013c6fd2807SJeff Garzik  *	LOCKING:
4014cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4015c6fd2807SJeff Garzik  *
4016c6fd2807SJeff Garzik  */
4017c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4018c6fd2807SJeff Garzik {
4019c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4020c6fd2807SJeff Garzik 	struct scatterlist *sg;
4021c6fd2807SJeff Garzik 	unsigned int idx;
4022c6fd2807SJeff Garzik 
4023c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4024c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4025c6fd2807SJeff Garzik 
4026c6fd2807SJeff Garzik 	idx = 0;
4027c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4028c6fd2807SJeff Garzik 		u32 addr, offset;
4029c6fd2807SJeff Garzik 		u32 sg_len, len;
4030c6fd2807SJeff Garzik 
4031c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4032c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4033c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4034c6fd2807SJeff Garzik 		 */
4035c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4036c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4037c6fd2807SJeff Garzik 
4038c6fd2807SJeff Garzik 		while (sg_len) {
4039c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4040c6fd2807SJeff Garzik 			len = sg_len;
4041c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4042c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4043c6fd2807SJeff Garzik 
4044c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4045c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4046c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4047c6fd2807SJeff Garzik 
4048c6fd2807SJeff Garzik 			idx++;
4049c6fd2807SJeff Garzik 			sg_len -= len;
4050c6fd2807SJeff Garzik 			addr += len;
4051c6fd2807SJeff Garzik 		}
4052c6fd2807SJeff Garzik 	}
4053c6fd2807SJeff Garzik 
4054c6fd2807SJeff Garzik 	if (idx)
4055c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4056c6fd2807SJeff Garzik }
4057c6fd2807SJeff Garzik /**
4058c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4059c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4060c6fd2807SJeff Garzik  *
4061c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4062c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4063c6fd2807SJeff Garzik  *	supplied PACKET command.
4064c6fd2807SJeff Garzik  *
4065c6fd2807SJeff Garzik  *	LOCKING:
4066cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4067c6fd2807SJeff Garzik  *
4068c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4069c6fd2807SJeff Garzik  *               nonzero otherwise
4070c6fd2807SJeff Garzik  */
4071c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4072c6fd2807SJeff Garzik {
4073c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4074c6fd2807SJeff Garzik 	int rc = 0; /* Assume ATAPI DMA is OK by default */
4075c6fd2807SJeff Garzik 
40766f23a31dSAlbert Lee 	/* some drives can only do ATAPI DMA on read/write */
40776f23a31dSAlbert Lee 	if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
40786f23a31dSAlbert Lee 		struct scsi_cmnd *cmd = qc->scsicmd;
40796f23a31dSAlbert Lee 		u8 *scsicmd = cmd->cmnd;
40806f23a31dSAlbert Lee 
40816f23a31dSAlbert Lee 		switch (scsicmd[0]) {
40826f23a31dSAlbert Lee 		case READ_10:
40836f23a31dSAlbert Lee 		case WRITE_10:
40846f23a31dSAlbert Lee 		case READ_12:
40856f23a31dSAlbert Lee 		case WRITE_12:
40866f23a31dSAlbert Lee 		case READ_6:
40876f23a31dSAlbert Lee 		case WRITE_6:
40886f23a31dSAlbert Lee 			/* atapi dma maybe ok */
40896f23a31dSAlbert Lee 			break;
40906f23a31dSAlbert Lee 		default:
40916f23a31dSAlbert Lee 			/* turn off atapi dma */
40926f23a31dSAlbert Lee 			return 1;
40936f23a31dSAlbert Lee 		}
40946f23a31dSAlbert Lee 	}
40956f23a31dSAlbert Lee 
4096c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4097c6fd2807SJeff Garzik 		rc = ap->ops->check_atapi_dma(qc);
4098c6fd2807SJeff Garzik 
4099c6fd2807SJeff Garzik 	return rc;
4100c6fd2807SJeff Garzik }
4101c6fd2807SJeff Garzik /**
4102c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4103c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4104c6fd2807SJeff Garzik  *
4105c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4106c6fd2807SJeff Garzik  *
4107c6fd2807SJeff Garzik  *	LOCKING:
4108cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4109c6fd2807SJeff Garzik  */
4110c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4111c6fd2807SJeff Garzik {
4112c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4113c6fd2807SJeff Garzik 		return;
4114c6fd2807SJeff Garzik 
4115c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4116c6fd2807SJeff Garzik }
4117c6fd2807SJeff Garzik 
4118c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4119c6fd2807SJeff Garzik 
4120c6fd2807SJeff Garzik /**
4121c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4122c6fd2807SJeff Garzik  *	@qc: Command to be associated
4123c6fd2807SJeff Garzik  *	@buf: Memory buffer
4124c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4125c6fd2807SJeff Garzik  *
4126c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4127c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4128c6fd2807SJeff Garzik  *
4129c6fd2807SJeff Garzik  *	LOCKING:
4130cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4131c6fd2807SJeff Garzik  */
4132c6fd2807SJeff Garzik 
4133c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4134c6fd2807SJeff Garzik {
4135c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4136c6fd2807SJeff Garzik 
4137c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4138c6fd2807SJeff Garzik 	qc->n_elem = 1;
4139c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4140c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4141c6fd2807SJeff Garzik 	qc->nbytes = buflen;
4142c6fd2807SJeff Garzik 
414361c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4144c6fd2807SJeff Garzik }
4145c6fd2807SJeff Garzik 
4146c6fd2807SJeff Garzik /**
4147c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4148c6fd2807SJeff Garzik  *	@qc: Command to be associated
4149c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4150c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4151c6fd2807SJeff Garzik  *
4152c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4153c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4154c6fd2807SJeff Garzik  *	elements.
4155c6fd2807SJeff Garzik  *
4156c6fd2807SJeff Garzik  *	LOCKING:
4157cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4158c6fd2807SJeff Garzik  */
4159c6fd2807SJeff Garzik 
4160c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4161c6fd2807SJeff Garzik 		 unsigned int n_elem)
4162c6fd2807SJeff Garzik {
4163c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4164c6fd2807SJeff Garzik 	qc->__sg = sg;
4165c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4166c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
4167c6fd2807SJeff Garzik }
4168c6fd2807SJeff Garzik 
4169c6fd2807SJeff Garzik /**
4170c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4171c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4172c6fd2807SJeff Garzik  *
4173c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4174c6fd2807SJeff Garzik  *
4175c6fd2807SJeff Garzik  *	LOCKING:
4176cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4177c6fd2807SJeff Garzik  *
4178c6fd2807SJeff Garzik  *	RETURNS:
4179c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4180c6fd2807SJeff Garzik  */
4181c6fd2807SJeff Garzik 
4182c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4183c6fd2807SJeff Garzik {
4184c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4185c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4186c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4187c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4188c6fd2807SJeff Garzik 	int trim_sg = 0;
4189c6fd2807SJeff Garzik 
4190c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4191c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4192c6fd2807SJeff Garzik 	if (qc->pad_len) {
4193c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4194c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4195c6fd2807SJeff Garzik 
4196c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4197c6fd2807SJeff Garzik 
4198c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4199c6fd2807SJeff Garzik 
4200c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4201c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4202c6fd2807SJeff Garzik 			       qc->pad_len);
4203c6fd2807SJeff Garzik 
4204c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4205c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4206c6fd2807SJeff Garzik 		/* trim sg */
4207c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4208c6fd2807SJeff Garzik 		if (sg->length == 0)
4209c6fd2807SJeff Garzik 			trim_sg = 1;
4210c6fd2807SJeff Garzik 
4211c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4212c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4213c6fd2807SJeff Garzik 	}
4214c6fd2807SJeff Garzik 
4215c6fd2807SJeff Garzik 	if (trim_sg) {
4216c6fd2807SJeff Garzik 		qc->n_elem--;
4217c6fd2807SJeff Garzik 		goto skip_map;
4218c6fd2807SJeff Garzik 	}
4219c6fd2807SJeff Garzik 
4220c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4221c6fd2807SJeff Garzik 				     sg->length, dir);
4222c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4223c6fd2807SJeff Garzik 		/* restore sg */
4224c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4225c6fd2807SJeff Garzik 		return -1;
4226c6fd2807SJeff Garzik 	}
4227c6fd2807SJeff Garzik 
4228c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4229c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4230c6fd2807SJeff Garzik 
4231c6fd2807SJeff Garzik skip_map:
4232c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4233c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4234c6fd2807SJeff Garzik 
4235c6fd2807SJeff Garzik 	return 0;
4236c6fd2807SJeff Garzik }
4237c6fd2807SJeff Garzik 
4238c6fd2807SJeff Garzik /**
4239c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4240c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4241c6fd2807SJeff Garzik  *
4242c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4243c6fd2807SJeff Garzik  *
4244c6fd2807SJeff Garzik  *	LOCKING:
4245cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4246c6fd2807SJeff Garzik  *
4247c6fd2807SJeff Garzik  *	RETURNS:
4248c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4249c6fd2807SJeff Garzik  *
4250c6fd2807SJeff Garzik  */
4251c6fd2807SJeff Garzik 
4252c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4253c6fd2807SJeff Garzik {
4254c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4255c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4256c6fd2807SJeff Garzik 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4257c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4258c6fd2807SJeff Garzik 
425944877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4260c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4261c6fd2807SJeff Garzik 
4262c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4263c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4264c6fd2807SJeff Garzik 	if (qc->pad_len) {
4265c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4266c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4267c6fd2807SJeff Garzik 		unsigned int offset;
4268c6fd2807SJeff Garzik 
4269c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4270c6fd2807SJeff Garzik 
4271c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4272c6fd2807SJeff Garzik 
4273c6fd2807SJeff Garzik 		/*
4274c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4275c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4276c6fd2807SJeff Garzik 		 */
4277c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4278c6fd2807SJeff Garzik 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4279c6fd2807SJeff Garzik 		psg->offset = offset_in_page(offset);
4280c6fd2807SJeff Garzik 
4281c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4282c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4283c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4284c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4285c6fd2807SJeff Garzik 		}
4286c6fd2807SJeff Garzik 
4287c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4288c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4289c6fd2807SJeff Garzik 		/* trim last sg */
4290c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4291c6fd2807SJeff Garzik 		if (lsg->length == 0)
4292c6fd2807SJeff Garzik 			trim_sg = 1;
4293c6fd2807SJeff Garzik 
4294c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4295c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4296c6fd2807SJeff Garzik 	}
4297c6fd2807SJeff Garzik 
4298c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4299c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4300c6fd2807SJeff Garzik 		pre_n_elem--;
4301c6fd2807SJeff Garzik 
4302c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4303c6fd2807SJeff Garzik 		n_elem = 0;
4304c6fd2807SJeff Garzik 		goto skip_map;
4305c6fd2807SJeff Garzik 	}
4306c6fd2807SJeff Garzik 
4307c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4308c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4309c6fd2807SJeff Garzik 	if (n_elem < 1) {
4310c6fd2807SJeff Garzik 		/* restore last sg */
4311c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4312c6fd2807SJeff Garzik 		return -1;
4313c6fd2807SJeff Garzik 	}
4314c6fd2807SJeff Garzik 
4315c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4316c6fd2807SJeff Garzik 
4317c6fd2807SJeff Garzik skip_map:
4318c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4319c6fd2807SJeff Garzik 
4320c6fd2807SJeff Garzik 	return 0;
4321c6fd2807SJeff Garzik }
4322c6fd2807SJeff Garzik 
4323c6fd2807SJeff Garzik /**
4324c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4325c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4326c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4327c6fd2807SJeff Garzik  *
4328c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4329c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4330c6fd2807SJeff Garzik  *	vice-versa.
4331c6fd2807SJeff Garzik  *
4332c6fd2807SJeff Garzik  *	LOCKING:
4333c6fd2807SJeff Garzik  *	Inherited from caller.
4334c6fd2807SJeff Garzik  */
4335c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4336c6fd2807SJeff Garzik {
4337c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4338c6fd2807SJeff Garzik 	unsigned int i;
4339c6fd2807SJeff Garzik 
4340c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4341c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4342c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4343c6fd2807SJeff Garzik }
4344c6fd2807SJeff Garzik 
4345c6fd2807SJeff Garzik /**
43460d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4347c6fd2807SJeff Garzik  *	@adev: device to target
4348c6fd2807SJeff Garzik  *	@buf: data buffer
4349c6fd2807SJeff Garzik  *	@buflen: buffer length
4350c6fd2807SJeff Garzik  *	@write_data: read/write
4351c6fd2807SJeff Garzik  *
4352c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4353c6fd2807SJeff Garzik  *
4354c6fd2807SJeff Garzik  *	LOCKING:
4355c6fd2807SJeff Garzik  *	Inherited from caller.
4356c6fd2807SJeff Garzik  */
43570d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4358c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4359c6fd2807SJeff Garzik {
4360c6fd2807SJeff Garzik 	struct ata_port *ap = adev->ap;
4361c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4362c6fd2807SJeff Garzik 
4363c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4364c6fd2807SJeff Garzik 	if (write_data)
43650d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4366c6fd2807SJeff Garzik 	else
43670d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4368c6fd2807SJeff Garzik 
4369c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4370c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4371c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4372c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4373c6fd2807SJeff Garzik 
4374c6fd2807SJeff Garzik 		if (write_data) {
4375c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
43760d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4377c6fd2807SJeff Garzik 		} else {
43780d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4379c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4380c6fd2807SJeff Garzik 		}
4381c6fd2807SJeff Garzik 	}
4382c6fd2807SJeff Garzik }
4383c6fd2807SJeff Garzik 
4384c6fd2807SJeff Garzik /**
43850d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4386c6fd2807SJeff Garzik  *	@adev: device to target
4387c6fd2807SJeff Garzik  *	@buf: data buffer
4388c6fd2807SJeff Garzik  *	@buflen: buffer length
4389c6fd2807SJeff Garzik  *	@write_data: read/write
4390c6fd2807SJeff Garzik  *
4391c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4392c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4393c6fd2807SJeff Garzik  *
4394c6fd2807SJeff Garzik  *	LOCKING:
4395c6fd2807SJeff Garzik  *	Inherited from caller.
4396c6fd2807SJeff Garzik  */
43970d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4398c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4399c6fd2807SJeff Garzik {
4400c6fd2807SJeff Garzik 	unsigned long flags;
4401c6fd2807SJeff Garzik 	local_irq_save(flags);
44020d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4403c6fd2807SJeff Garzik 	local_irq_restore(flags);
4404c6fd2807SJeff Garzik }
4405c6fd2807SJeff Garzik 
4406c6fd2807SJeff Garzik 
4407c6fd2807SJeff Garzik /**
44085a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4409c6fd2807SJeff Garzik  *	@qc: Command on going
4410c6fd2807SJeff Garzik  *
44115a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4412c6fd2807SJeff Garzik  *
4413c6fd2807SJeff Garzik  *	LOCKING:
4414c6fd2807SJeff Garzik  *	Inherited from caller.
4415c6fd2807SJeff Garzik  */
4416c6fd2807SJeff Garzik 
4417c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4418c6fd2807SJeff Garzik {
4419c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4420c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4421c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4422c6fd2807SJeff Garzik 	struct page *page;
4423c6fd2807SJeff Garzik 	unsigned int offset;
4424c6fd2807SJeff Garzik 	unsigned char *buf;
4425c6fd2807SJeff Garzik 
44265a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4427c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4428c6fd2807SJeff Garzik 
4429c6fd2807SJeff Garzik 	page = sg[qc->cursg].page;
4430726f0785STejun Heo 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4431c6fd2807SJeff Garzik 
4432c6fd2807SJeff Garzik 	/* get the current page and offset */
4433c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4434c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4435c6fd2807SJeff Garzik 
4436c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4437c6fd2807SJeff Garzik 
4438c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4439c6fd2807SJeff Garzik 		unsigned long flags;
4440c6fd2807SJeff Garzik 
4441c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4442c6fd2807SJeff Garzik 		local_irq_save(flags);
4443c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4444c6fd2807SJeff Garzik 
4445c6fd2807SJeff Garzik 		/* do the actual data transfer */
44465a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4447c6fd2807SJeff Garzik 
4448c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4449c6fd2807SJeff Garzik 		local_irq_restore(flags);
4450c6fd2807SJeff Garzik 	} else {
4451c6fd2807SJeff Garzik 		buf = page_address(page);
44525a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4453c6fd2807SJeff Garzik 	}
4454c6fd2807SJeff Garzik 
44555a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
44565a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4457c6fd2807SJeff Garzik 
4458726f0785STejun Heo 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4459c6fd2807SJeff Garzik 		qc->cursg++;
4460c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4461c6fd2807SJeff Garzik 	}
4462c6fd2807SJeff Garzik }
4463c6fd2807SJeff Garzik 
4464c6fd2807SJeff Garzik /**
44655a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4466c6fd2807SJeff Garzik  *	@qc: Command on going
4467c6fd2807SJeff Garzik  *
44685a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4469c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4470c6fd2807SJeff Garzik  *
4471c6fd2807SJeff Garzik  *	LOCKING:
4472c6fd2807SJeff Garzik  *	Inherited from caller.
4473c6fd2807SJeff Garzik  */
4474c6fd2807SJeff Garzik 
4475c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4476c6fd2807SJeff Garzik {
4477c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4478c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4479c6fd2807SJeff Garzik 		unsigned int nsect;
4480c6fd2807SJeff Garzik 
4481c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4482c6fd2807SJeff Garzik 
44835a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4484726f0785STejun Heo 			    qc->dev->multi_count);
4485c6fd2807SJeff Garzik 		while (nsect--)
4486c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4487c6fd2807SJeff Garzik 	} else
4488c6fd2807SJeff Garzik 		ata_pio_sector(qc);
4489c6fd2807SJeff Garzik }
4490c6fd2807SJeff Garzik 
4491c6fd2807SJeff Garzik /**
4492c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4493c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4494c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4495c6fd2807SJeff Garzik  *
4496c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4497c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4498c6fd2807SJeff Garzik  *
4499c6fd2807SJeff Garzik  *	LOCKING:
4500c6fd2807SJeff Garzik  *	caller.
4501c6fd2807SJeff Garzik  */
4502c6fd2807SJeff Garzik 
4503c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4504c6fd2807SJeff Garzik {
4505c6fd2807SJeff Garzik 	/* send SCSI cdb */
4506c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4507c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4508c6fd2807SJeff Garzik 
4509c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4510c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4511c6fd2807SJeff Garzik 
4512c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4513c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4514c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4515c6fd2807SJeff Garzik 		break;
4516c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4517c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4518c6fd2807SJeff Garzik 		break;
4519c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4520c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4521c6fd2807SJeff Garzik 		/* initiate bmdma */
4522c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4523c6fd2807SJeff Garzik 		break;
4524c6fd2807SJeff Garzik 	}
4525c6fd2807SJeff Garzik }
4526c6fd2807SJeff Garzik 
4527c6fd2807SJeff Garzik /**
4528c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4529c6fd2807SJeff Garzik  *	@qc: Command on going
4530c6fd2807SJeff Garzik  *	@bytes: number of bytes
4531c6fd2807SJeff Garzik  *
4532c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4533c6fd2807SJeff Garzik  *
4534c6fd2807SJeff Garzik  *	LOCKING:
4535c6fd2807SJeff Garzik  *	Inherited from caller.
4536c6fd2807SJeff Garzik  *
4537c6fd2807SJeff Garzik  */
4538c6fd2807SJeff Garzik 
4539c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4540c6fd2807SJeff Garzik {
4541c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4542c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4543c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4544c6fd2807SJeff Garzik 	struct page *page;
4545c6fd2807SJeff Garzik 	unsigned char *buf;
4546c6fd2807SJeff Garzik 	unsigned int offset, count;
4547c6fd2807SJeff Garzik 
4548c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4549c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4550c6fd2807SJeff Garzik 
4551c6fd2807SJeff Garzik next_sg:
4552c6fd2807SJeff Garzik 	if (unlikely(qc->cursg >= qc->n_elem)) {
4553c6fd2807SJeff Garzik 		/*
4554c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4555c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4556c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4557c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4558c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4559c6fd2807SJeff Garzik 		 */
4560c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4561c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4562c6fd2807SJeff Garzik 		unsigned int i;
4563c6fd2807SJeff Garzik 
4564c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4565c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4566c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4567c6fd2807SJeff Garzik 
4568c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4569c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4570c6fd2807SJeff Garzik 
4571c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4572c6fd2807SJeff Garzik 		return;
4573c6fd2807SJeff Garzik 	}
4574c6fd2807SJeff Garzik 
4575c6fd2807SJeff Garzik 	sg = &qc->__sg[qc->cursg];
4576c6fd2807SJeff Garzik 
4577c6fd2807SJeff Garzik 	page = sg->page;
4578c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4579c6fd2807SJeff Garzik 
4580c6fd2807SJeff Garzik 	/* get the current page and offset */
4581c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4582c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4583c6fd2807SJeff Garzik 
4584c6fd2807SJeff Garzik 	/* don't overrun current sg */
4585c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
4586c6fd2807SJeff Garzik 
4587c6fd2807SJeff Garzik 	/* don't cross page boundaries */
4588c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4589c6fd2807SJeff Garzik 
4590c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4591c6fd2807SJeff Garzik 
4592c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4593c6fd2807SJeff Garzik 		unsigned long flags;
4594c6fd2807SJeff Garzik 
4595c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
4596c6fd2807SJeff Garzik 		local_irq_save(flags);
4597c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4598c6fd2807SJeff Garzik 
4599c6fd2807SJeff Garzik 		/* do the actual data transfer */
4600c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4601c6fd2807SJeff Garzik 
4602c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4603c6fd2807SJeff Garzik 		local_irq_restore(flags);
4604c6fd2807SJeff Garzik 	} else {
4605c6fd2807SJeff Garzik 		buf = page_address(page);
4606c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4607c6fd2807SJeff Garzik 	}
4608c6fd2807SJeff Garzik 
4609c6fd2807SJeff Garzik 	bytes -= count;
4610c6fd2807SJeff Garzik 	qc->curbytes += count;
4611c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
4612c6fd2807SJeff Garzik 
4613c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
4614c6fd2807SJeff Garzik 		qc->cursg++;
4615c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4616c6fd2807SJeff Garzik 	}
4617c6fd2807SJeff Garzik 
4618c6fd2807SJeff Garzik 	if (bytes)
4619c6fd2807SJeff Garzik 		goto next_sg;
4620c6fd2807SJeff Garzik }
4621c6fd2807SJeff Garzik 
4622c6fd2807SJeff Garzik /**
4623c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4624c6fd2807SJeff Garzik  *	@qc: Command on going
4625c6fd2807SJeff Garzik  *
4626c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4627c6fd2807SJeff Garzik  *
4628c6fd2807SJeff Garzik  *	LOCKING:
4629c6fd2807SJeff Garzik  *	Inherited from caller.
4630c6fd2807SJeff Garzik  */
4631c6fd2807SJeff Garzik 
4632c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4633c6fd2807SJeff Garzik {
4634c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4635c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
4636c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
4637c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4638c6fd2807SJeff Garzik 
4639c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
4640c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
4641c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
4642c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4643c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
4644c6fd2807SJeff Garzik 	 */
4645c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
4646c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
4647c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
4648c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
4649c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
4650c6fd2807SJeff Garzik 
4651c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
4652c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
4653c6fd2807SJeff Garzik 		goto err_out;
4654c6fd2807SJeff Garzik 
4655c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
4656c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4657c6fd2807SJeff Garzik 	if (do_write != i_write)
4658c6fd2807SJeff Garzik 		goto err_out;
4659c6fd2807SJeff Garzik 
466044877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4661c6fd2807SJeff Garzik 
4662c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
4663c6fd2807SJeff Garzik 
4664c6fd2807SJeff Garzik 	return;
4665c6fd2807SJeff Garzik 
4666c6fd2807SJeff Garzik err_out:
4667c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4668c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
4669c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
4670c6fd2807SJeff Garzik }
4671c6fd2807SJeff Garzik 
4672c6fd2807SJeff Garzik /**
4673c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4674c6fd2807SJeff Garzik  *	@ap: the target ata_port
4675c6fd2807SJeff Garzik  *	@qc: qc on going
4676c6fd2807SJeff Garzik  *
4677c6fd2807SJeff Garzik  *	RETURNS:
4678c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
4679c6fd2807SJeff Garzik  */
4680c6fd2807SJeff Garzik 
4681c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4682c6fd2807SJeff Garzik {
4683c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4684c6fd2807SJeff Garzik 		return 1;
4685c6fd2807SJeff Garzik 
4686c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4687c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
4688c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4689c6fd2807SJeff Garzik 		    return 1;
4690c6fd2807SJeff Garzik 
4691c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
4692c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4693c6fd2807SJeff Garzik 			return 1;
4694c6fd2807SJeff Garzik 	}
4695c6fd2807SJeff Garzik 
4696c6fd2807SJeff Garzik 	return 0;
4697c6fd2807SJeff Garzik }
4698c6fd2807SJeff Garzik 
4699c6fd2807SJeff Garzik /**
4700c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
4701c6fd2807SJeff Garzik  *	@qc: Command to complete
4702c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4703c6fd2807SJeff Garzik  *
4704c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
4705c6fd2807SJeff Garzik  *
4706c6fd2807SJeff Garzik  *	LOCKING:
4707cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
4708c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
4709c6fd2807SJeff Garzik  */
4710c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4711c6fd2807SJeff Garzik {
4712c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4713c6fd2807SJeff Garzik 	unsigned long flags;
4714c6fd2807SJeff Garzik 
4715c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
4716c6fd2807SJeff Garzik 		if (in_wq) {
4717c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4718c6fd2807SJeff Garzik 
4719cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
4720cca3974eSJeff Garzik 			 * released.
4721c6fd2807SJeff Garzik 			 */
4722c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
4723c6fd2807SJeff Garzik 			if (qc) {
4724c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
472583625006SAkira Iguchi 					ap->ops->irq_on(ap);
4726c6fd2807SJeff Garzik 					ata_qc_complete(qc);
4727c6fd2807SJeff Garzik 				} else
4728c6fd2807SJeff Garzik 					ata_port_freeze(ap);
4729c6fd2807SJeff Garzik 			}
4730c6fd2807SJeff Garzik 
4731c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4732c6fd2807SJeff Garzik 		} else {
4733c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4734c6fd2807SJeff Garzik 				ata_qc_complete(qc);
4735c6fd2807SJeff Garzik 			else
4736c6fd2807SJeff Garzik 				ata_port_freeze(ap);
4737c6fd2807SJeff Garzik 		}
4738c6fd2807SJeff Garzik 	} else {
4739c6fd2807SJeff Garzik 		if (in_wq) {
4740c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
474183625006SAkira Iguchi 			ap->ops->irq_on(ap);
4742c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4743c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4744c6fd2807SJeff Garzik 		} else
4745c6fd2807SJeff Garzik 			ata_qc_complete(qc);
4746c6fd2807SJeff Garzik 	}
4747c6fd2807SJeff Garzik 
4748c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4749c6fd2807SJeff Garzik }
4750c6fd2807SJeff Garzik 
4751c6fd2807SJeff Garzik /**
4752c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
4753c6fd2807SJeff Garzik  *	@ap: the target ata_port
4754c6fd2807SJeff Garzik  *	@qc: qc on going
4755c6fd2807SJeff Garzik  *	@status: current device status
4756c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
4757c6fd2807SJeff Garzik  *
4758c6fd2807SJeff Garzik  *	RETURNS:
4759c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
4760c6fd2807SJeff Garzik  */
4761c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4762c6fd2807SJeff Garzik 		 u8 status, int in_wq)
4763c6fd2807SJeff Garzik {
4764c6fd2807SJeff Garzik 	unsigned long flags = 0;
4765c6fd2807SJeff Garzik 	int poll_next;
4766c6fd2807SJeff Garzik 
4767c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4768c6fd2807SJeff Garzik 
4769c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
4770c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
4771c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4772c6fd2807SJeff Garzik 	 */
4773c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4774c6fd2807SJeff Garzik 
4775c6fd2807SJeff Garzik fsm_start:
4776c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
477744877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4778c6fd2807SJeff Garzik 
4779c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
4780c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
4781c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
4782c6fd2807SJeff Garzik 
4783c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
4784c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
4785c6fd2807SJeff Garzik 		 * takes over after sending the data.
4786c6fd2807SJeff Garzik 		 */
4787c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4788c6fd2807SJeff Garzik 
4789c6fd2807SJeff Garzik 		/* check device status */
4790c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
4791c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
4792c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
4793c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
4794c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4795c6fd2807SJeff Garzik 			else
4796c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
4797c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4798c6fd2807SJeff Garzik 
4799c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4800c6fd2807SJeff Garzik 			goto fsm_start;
4801c6fd2807SJeff Garzik 		}
4802c6fd2807SJeff Garzik 
4803c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
4804c6fd2807SJeff Garzik 		 * when it finds something wrong.
4805c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
4806c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
4807c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
4808c6fd2807SJeff Garzik 		 */
4809c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
481044877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
481144877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
4812c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
4813c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4814c6fd2807SJeff Garzik 			goto fsm_start;
4815c6fd2807SJeff Garzik 		}
4816c6fd2807SJeff Garzik 
4817c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
4818c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
4819c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
4820c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
4821c6fd2807SJeff Garzik 		 */
4822c6fd2807SJeff Garzik 		if (in_wq)
4823c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
4824c6fd2807SJeff Garzik 
4825c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
4826c6fd2807SJeff Garzik 			/* PIO data out protocol.
4827c6fd2807SJeff Garzik 			 * send first data block.
4828c6fd2807SJeff Garzik 			 */
4829c6fd2807SJeff Garzik 
4830c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
4831c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
4832c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
4833c6fd2807SJeff Garzik 			 */
4834c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
4835c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4836c6fd2807SJeff Garzik 			ata_altstatus(ap); /* flush */
4837c6fd2807SJeff Garzik 		} else
4838c6fd2807SJeff Garzik 			/* send CDB */
4839c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
4840c6fd2807SJeff Garzik 
4841c6fd2807SJeff Garzik 		if (in_wq)
4842c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
4843c6fd2807SJeff Garzik 
4844c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
4845c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
4846c6fd2807SJeff Garzik 		 */
4847c6fd2807SJeff Garzik 		break;
4848c6fd2807SJeff Garzik 
4849c6fd2807SJeff Garzik 	case HSM_ST:
4850c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
4851c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4852c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
4853c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
4854c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
4855c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
4856c6fd2807SJeff Garzik 				 */
4857c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
4858c6fd2807SJeff Garzik 				goto fsm_start;
4859c6fd2807SJeff Garzik 			}
4860c6fd2807SJeff Garzik 
4861c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
4862c6fd2807SJeff Garzik 			 * when it finds something wrong.
4863c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
4864c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
4865c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
4866c6fd2807SJeff Garzik 			 */
4867c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
486844877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
486944877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
487044877b4eSTejun Heo 						status);
4871c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
4872c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4873c6fd2807SJeff Garzik 				goto fsm_start;
4874c6fd2807SJeff Garzik 			}
4875c6fd2807SJeff Garzik 
4876c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
4877c6fd2807SJeff Garzik 
4878c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4879c6fd2807SJeff Garzik 				/* bad ireason reported by device */
4880c6fd2807SJeff Garzik 				goto fsm_start;
4881c6fd2807SJeff Garzik 
4882c6fd2807SJeff Garzik 		} else {
4883c6fd2807SJeff Garzik 			/* ATA PIO protocol */
4884c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
4885c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
4886c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
4887c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
4888c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
4889c6fd2807SJeff Garzik 				else
489055a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
489155a8e2c8STejun Heo 					 * Phantom devices also trigger this
489255a8e2c8STejun Heo 					 * condition.  Mark hint.
489355a8e2c8STejun Heo 					 */
489455a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
489555a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
4896c6fd2807SJeff Garzik 
4897c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4898c6fd2807SJeff Garzik 				goto fsm_start;
4899c6fd2807SJeff Garzik 			}
4900c6fd2807SJeff Garzik 
4901c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
4902c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
4903c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
4904c6fd2807SJeff Garzik 			 * block of junk data before changing the
4905c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
4906c6fd2807SJeff Garzik 			 *
4907c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
4908c6fd2807SJeff Garzik 			 * sense since the data block has been
4909c6fd2807SJeff Garzik 			 * transferred to the device.
4910c6fd2807SJeff Garzik 			 */
4911c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4912c6fd2807SJeff Garzik 				/* data might be corrputed */
4913c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
4914c6fd2807SJeff Garzik 
4915c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4916c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
4917c6fd2807SJeff Garzik 					ata_altstatus(ap);
4918c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
4919c6fd2807SJeff Garzik 				}
4920c6fd2807SJeff Garzik 
4921c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
4922c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
4923c6fd2807SJeff Garzik 
4924c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
4925c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
4926c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
4927c6fd2807SJeff Garzik 				 */
4928c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
4929c6fd2807SJeff Garzik 				goto fsm_start;
4930c6fd2807SJeff Garzik 			}
4931c6fd2807SJeff Garzik 
4932c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
4933c6fd2807SJeff Garzik 
4934c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
4935c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4936c6fd2807SJeff Garzik 				/* all data read */
4937c6fd2807SJeff Garzik 				ata_altstatus(ap);
4938c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
4939c6fd2807SJeff Garzik 				goto fsm_start;
4940c6fd2807SJeff Garzik 			}
4941c6fd2807SJeff Garzik 		}
4942c6fd2807SJeff Garzik 
4943c6fd2807SJeff Garzik 		ata_altstatus(ap); /* flush */
4944c6fd2807SJeff Garzik 		poll_next = 1;
4945c6fd2807SJeff Garzik 		break;
4946c6fd2807SJeff Garzik 
4947c6fd2807SJeff Garzik 	case HSM_ST_LAST:
4948c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
4949c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
4950c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
4951c6fd2807SJeff Garzik 			goto fsm_start;
4952c6fd2807SJeff Garzik 		}
4953c6fd2807SJeff Garzik 
4954c6fd2807SJeff Garzik 		/* no more data to transfer */
4955c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
495644877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
4957c6fd2807SJeff Garzik 
4958c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
4959c6fd2807SJeff Garzik 
4960c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
4961c6fd2807SJeff Garzik 
4962c6fd2807SJeff Garzik 		/* complete taskfile transaction */
4963c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
4964c6fd2807SJeff Garzik 
4965c6fd2807SJeff Garzik 		poll_next = 0;
4966c6fd2807SJeff Garzik 		break;
4967c6fd2807SJeff Garzik 
4968c6fd2807SJeff Garzik 	case HSM_ST_ERR:
4969c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
4970c6fd2807SJeff Garzik 		 * know what's wrong and recover
4971c6fd2807SJeff Garzik 		 */
4972c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
4973c6fd2807SJeff Garzik 
4974c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
4975c6fd2807SJeff Garzik 
4976c6fd2807SJeff Garzik 		/* complete taskfile transaction */
4977c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
4978c6fd2807SJeff Garzik 
4979c6fd2807SJeff Garzik 		poll_next = 0;
4980c6fd2807SJeff Garzik 		break;
4981c6fd2807SJeff Garzik 	default:
4982c6fd2807SJeff Garzik 		poll_next = 0;
4983c6fd2807SJeff Garzik 		BUG();
4984c6fd2807SJeff Garzik 	}
4985c6fd2807SJeff Garzik 
4986c6fd2807SJeff Garzik 	return poll_next;
4987c6fd2807SJeff Garzik }
4988c6fd2807SJeff Garzik 
498965f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
4990c6fd2807SJeff Garzik {
499165f27f38SDavid Howells 	struct ata_port *ap =
499265f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
499365f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
4994c6fd2807SJeff Garzik 	u8 status;
4995c6fd2807SJeff Garzik 	int poll_next;
4996c6fd2807SJeff Garzik 
4997c6fd2807SJeff Garzik fsm_start:
4998c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4999c6fd2807SJeff Garzik 
5000c6fd2807SJeff Garzik 	/*
5001c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5002c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5003c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5004c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5005c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5006c6fd2807SJeff Garzik 	 */
5007c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5008c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5009c6fd2807SJeff Garzik 		msleep(2);
5010c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5011c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5012c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5013c6fd2807SJeff Garzik 			return;
5014c6fd2807SJeff Garzik 		}
5015c6fd2807SJeff Garzik 	}
5016c6fd2807SJeff Garzik 
5017c6fd2807SJeff Garzik 	/* move the HSM */
5018c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5019c6fd2807SJeff Garzik 
5020c6fd2807SJeff Garzik 	/* another command or interrupt handler
5021c6fd2807SJeff Garzik 	 * may be running at this point.
5022c6fd2807SJeff Garzik 	 */
5023c6fd2807SJeff Garzik 	if (poll_next)
5024c6fd2807SJeff Garzik 		goto fsm_start;
5025c6fd2807SJeff Garzik }
5026c6fd2807SJeff Garzik 
5027c6fd2807SJeff Garzik /**
5028c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5029c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5030c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5031c6fd2807SJeff Garzik  *
5032c6fd2807SJeff Garzik  *	LOCKING:
5033c6fd2807SJeff Garzik  *	None.
5034c6fd2807SJeff Garzik  */
5035c6fd2807SJeff Garzik 
5036c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5037c6fd2807SJeff Garzik {
5038c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5039c6fd2807SJeff Garzik 	unsigned int i;
5040c6fd2807SJeff Garzik 
5041c6fd2807SJeff Garzik 	/* no command while frozen */
5042c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5043c6fd2807SJeff Garzik 		return NULL;
5044c6fd2807SJeff Garzik 
5045c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5046c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5047c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5048c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5049c6fd2807SJeff Garzik 			break;
5050c6fd2807SJeff Garzik 		}
5051c6fd2807SJeff Garzik 
5052c6fd2807SJeff Garzik 	if (qc)
5053c6fd2807SJeff Garzik 		qc->tag = i;
5054c6fd2807SJeff Garzik 
5055c6fd2807SJeff Garzik 	return qc;
5056c6fd2807SJeff Garzik }
5057c6fd2807SJeff Garzik 
5058c6fd2807SJeff Garzik /**
5059c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5060c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5061c6fd2807SJeff Garzik  *
5062c6fd2807SJeff Garzik  *	LOCKING:
5063c6fd2807SJeff Garzik  *	None.
5064c6fd2807SJeff Garzik  */
5065c6fd2807SJeff Garzik 
5066c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5067c6fd2807SJeff Garzik {
5068c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5069c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5070c6fd2807SJeff Garzik 
5071c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5072c6fd2807SJeff Garzik 	if (qc) {
5073c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5074c6fd2807SJeff Garzik 		qc->ap = ap;
5075c6fd2807SJeff Garzik 		qc->dev = dev;
5076c6fd2807SJeff Garzik 
5077c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5078c6fd2807SJeff Garzik 	}
5079c6fd2807SJeff Garzik 
5080c6fd2807SJeff Garzik 	return qc;
5081c6fd2807SJeff Garzik }
5082c6fd2807SJeff Garzik 
5083c6fd2807SJeff Garzik /**
5084c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5085c6fd2807SJeff Garzik  *	@qc: Command to complete
5086c6fd2807SJeff Garzik  *
5087c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5088c6fd2807SJeff Garzik  *	in case something prevents using it.
5089c6fd2807SJeff Garzik  *
5090c6fd2807SJeff Garzik  *	LOCKING:
5091cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5092c6fd2807SJeff Garzik  */
5093c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5094c6fd2807SJeff Garzik {
5095c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5096c6fd2807SJeff Garzik 	unsigned int tag;
5097c6fd2807SJeff Garzik 
5098c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5099c6fd2807SJeff Garzik 
5100c6fd2807SJeff Garzik 	qc->flags = 0;
5101c6fd2807SJeff Garzik 	tag = qc->tag;
5102c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5103c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5104c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5105c6fd2807SJeff Garzik 	}
5106c6fd2807SJeff Garzik }
5107c6fd2807SJeff Garzik 
5108c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5109c6fd2807SJeff Garzik {
5110c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5111c6fd2807SJeff Garzik 
5112c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5113c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5114c6fd2807SJeff Garzik 
5115c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5116c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5117c6fd2807SJeff Garzik 
5118c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5119c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ)
5120c6fd2807SJeff Garzik 		ap->sactive &= ~(1 << qc->tag);
5121c6fd2807SJeff Garzik 	else
5122c6fd2807SJeff Garzik 		ap->active_tag = ATA_TAG_POISON;
5123c6fd2807SJeff Garzik 
5124c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5125c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5126c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5127c6fd2807SJeff Garzik 	 */
5128c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5129c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5130c6fd2807SJeff Garzik 
5131c6fd2807SJeff Garzik 	/* call completion callback */
5132c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5133c6fd2807SJeff Garzik }
5134c6fd2807SJeff Garzik 
513539599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
513639599a53STejun Heo {
513739599a53STejun Heo 	struct ata_port *ap = qc->ap;
513839599a53STejun Heo 
513939599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
51404742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
514139599a53STejun Heo }
514239599a53STejun Heo 
5143c6fd2807SJeff Garzik /**
5144c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5145c6fd2807SJeff Garzik  *	@qc: Command to complete
5146c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5147c6fd2807SJeff Garzik  *
5148c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5149c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5150c6fd2807SJeff Garzik  *
5151c6fd2807SJeff Garzik  *	LOCKING:
5152cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5153c6fd2807SJeff Garzik  */
5154c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5155c6fd2807SJeff Garzik {
5156c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5157c6fd2807SJeff Garzik 
5158c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5159c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5160c6fd2807SJeff Garzik 	 *
5161c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5162c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5163c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5164c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5165c6fd2807SJeff Garzik 	 *
5166c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5167c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5168c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5169c6fd2807SJeff Garzik 	 * taken care of.
5170c6fd2807SJeff Garzik 	 */
5171c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5172c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5173c6fd2807SJeff Garzik 
5174c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5175c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5176c6fd2807SJeff Garzik 
5177c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5178c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5179c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
518039599a53STejun Heo 				fill_result_tf(qc);
5181c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5182c6fd2807SJeff Garzik 				return;
5183c6fd2807SJeff Garzik 			}
5184c6fd2807SJeff Garzik 		}
5185c6fd2807SJeff Garzik 
5186c6fd2807SJeff Garzik 		/* read result TF if requested */
5187c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
518839599a53STejun Heo 			fill_result_tf(qc);
5189c6fd2807SJeff Garzik 
5190c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5191c6fd2807SJeff Garzik 	} else {
5192c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5193c6fd2807SJeff Garzik 			return;
5194c6fd2807SJeff Garzik 
5195c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5196c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
519739599a53STejun Heo 			fill_result_tf(qc);
5198c6fd2807SJeff Garzik 
5199c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5200c6fd2807SJeff Garzik 	}
5201c6fd2807SJeff Garzik }
5202c6fd2807SJeff Garzik 
5203c6fd2807SJeff Garzik /**
5204c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5205c6fd2807SJeff Garzik  *	@ap: port in question
5206c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5207c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5208c6fd2807SJeff Garzik  *
5209c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5210c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5211c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5212c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5213c6fd2807SJeff Garzik  *
5214c6fd2807SJeff Garzik  *	LOCKING:
5215cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5216c6fd2807SJeff Garzik  *
5217c6fd2807SJeff Garzik  *	RETURNS:
5218c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5219c6fd2807SJeff Garzik  */
5220c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5221c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5222c6fd2807SJeff Garzik {
5223c6fd2807SJeff Garzik 	int nr_done = 0;
5224c6fd2807SJeff Garzik 	u32 done_mask;
5225c6fd2807SJeff Garzik 	int i;
5226c6fd2807SJeff Garzik 
5227c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5228c6fd2807SJeff Garzik 
5229c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5230c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5231c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5232c6fd2807SJeff Garzik 		return -EINVAL;
5233c6fd2807SJeff Garzik 	}
5234c6fd2807SJeff Garzik 
5235c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5236c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5237c6fd2807SJeff Garzik 
5238c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5239c6fd2807SJeff Garzik 			continue;
5240c6fd2807SJeff Garzik 
5241c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5242c6fd2807SJeff Garzik 			if (finish_qc)
5243c6fd2807SJeff Garzik 				finish_qc(qc);
5244c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5245c6fd2807SJeff Garzik 			nr_done++;
5246c6fd2807SJeff Garzik 		}
5247c6fd2807SJeff Garzik 	}
5248c6fd2807SJeff Garzik 
5249c6fd2807SJeff Garzik 	return nr_done;
5250c6fd2807SJeff Garzik }
5251c6fd2807SJeff Garzik 
5252c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5253c6fd2807SJeff Garzik {
5254c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5255c6fd2807SJeff Garzik 
5256c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5257c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5258c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5259c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5260c6fd2807SJeff Garzik 		return 1;
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5263c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5264c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5265c6fd2807SJeff Garzik 			return 1;
5266c6fd2807SJeff Garzik 
5267c6fd2807SJeff Garzik 		/* fall through */
5268c6fd2807SJeff Garzik 
5269c6fd2807SJeff Garzik 	default:
5270c6fd2807SJeff Garzik 		return 0;
5271c6fd2807SJeff Garzik 	}
5272c6fd2807SJeff Garzik 
5273c6fd2807SJeff Garzik 	/* never reached */
5274c6fd2807SJeff Garzik }
5275c6fd2807SJeff Garzik 
5276c6fd2807SJeff Garzik /**
5277c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5278c6fd2807SJeff Garzik  *	@qc: command to issue to device
5279c6fd2807SJeff Garzik  *
5280c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5281c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5282c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5283c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5284c6fd2807SJeff Garzik  *
5285c6fd2807SJeff Garzik  *	LOCKING:
5286cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5287c6fd2807SJeff Garzik  */
5288c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5289c6fd2807SJeff Garzik {
5290c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5291c6fd2807SJeff Garzik 
5292c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5293c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5294c6fd2807SJeff Garzik 	 * request ATAPI sense.
5295c6fd2807SJeff Garzik 	 */
5296c6fd2807SJeff Garzik 	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5297c6fd2807SJeff Garzik 
5298c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
5299c6fd2807SJeff Garzik 		WARN_ON(ap->sactive & (1 << qc->tag));
5300c6fd2807SJeff Garzik 		ap->sactive |= 1 << qc->tag;
5301c6fd2807SJeff Garzik 	} else {
5302c6fd2807SJeff Garzik 		WARN_ON(ap->sactive);
5303c6fd2807SJeff Garzik 		ap->active_tag = qc->tag;
5304c6fd2807SJeff Garzik 	}
5305c6fd2807SJeff Garzik 
5306c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5307c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5308c6fd2807SJeff Garzik 
5309c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5310c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5311c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5312c6fd2807SJeff Garzik 				goto sg_err;
5313c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5314c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5315c6fd2807SJeff Garzik 				goto sg_err;
5316c6fd2807SJeff Garzik 		}
5317c6fd2807SJeff Garzik 	} else {
5318c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5319c6fd2807SJeff Garzik 	}
5320c6fd2807SJeff Garzik 
5321c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5322c6fd2807SJeff Garzik 
5323c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5324c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5325c6fd2807SJeff Garzik 		goto err;
5326c6fd2807SJeff Garzik 	return;
5327c6fd2807SJeff Garzik 
5328c6fd2807SJeff Garzik sg_err:
5329c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5330c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5331c6fd2807SJeff Garzik err:
5332c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5333c6fd2807SJeff Garzik }
5334c6fd2807SJeff Garzik 
5335c6fd2807SJeff Garzik /**
5336c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5337c6fd2807SJeff Garzik  *	@qc: command to issue to device
5338c6fd2807SJeff Garzik  *
5339c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5340c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5341c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5342c6fd2807SJeff Garzik  *	is slightly different.
5343c6fd2807SJeff Garzik  *
5344c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5345c6fd2807SJeff Garzik  *
5346c6fd2807SJeff Garzik  *	LOCKING:
5347cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5348c6fd2807SJeff Garzik  *
5349c6fd2807SJeff Garzik  *	RETURNS:
5350c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5351c6fd2807SJeff Garzik  */
5352c6fd2807SJeff Garzik 
5353c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5354c6fd2807SJeff Garzik {
5355c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5356c6fd2807SJeff Garzik 
5357c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5358c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5359c6fd2807SJeff Garzik 	 */
5360c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5361c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5362c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5363e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5364c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5365c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5366c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5367c6fd2807SJeff Garzik 			break;
5368c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5369c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5370c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5371c6fd2807SJeff Garzik 				BUG();
5372c6fd2807SJeff Garzik 			break;
5373c6fd2807SJeff Garzik 		default:
5374c6fd2807SJeff Garzik 			break;
5375c6fd2807SJeff Garzik 		}
5376c6fd2807SJeff Garzik 	}
5377c6fd2807SJeff Garzik 
53783d3cca37STejun Heo 	/* Some controllers show flaky interrupt behavior after
53793d3cca37STejun Heo 	 * setting xfer mode.  Use polling instead.
53803d3cca37STejun Heo 	 */
53813d3cca37STejun Heo 	if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
53823d3cca37STejun Heo 		     qc->tf.feature == SETFEATURES_XFER) &&
53833d3cca37STejun Heo 	    (ap->flags & ATA_FLAG_SETXFER_POLLING))
53843d3cca37STejun Heo 		qc->tf.flags |= ATA_TFLAG_POLLING;
53853d3cca37STejun Heo 
5386c6fd2807SJeff Garzik 	/* select the device */
5387c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5388c6fd2807SJeff Garzik 
5389c6fd2807SJeff Garzik 	/* start the command */
5390c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5391c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5392c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5393c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5394c6fd2807SJeff Garzik 
5395c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5396c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5397c6fd2807SJeff Garzik 
5398c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5399c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5400c6fd2807SJeff Garzik 
5401c6fd2807SJeff Garzik 		break;
5402c6fd2807SJeff Garzik 
5403c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5404c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5405c6fd2807SJeff Garzik 
5406c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5407c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5408c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5409c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5410c6fd2807SJeff Garzik 		break;
5411c6fd2807SJeff Garzik 
5412c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5413c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5414c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5415c6fd2807SJeff Garzik 
5416c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5417c6fd2807SJeff Garzik 
5418c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5419c6fd2807SJeff Garzik 			/* PIO data out protocol */
5420c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5421c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5422c6fd2807SJeff Garzik 
5423c6fd2807SJeff Garzik 			/* always send first data block using
5424c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5425c6fd2807SJeff Garzik 			 */
5426c6fd2807SJeff Garzik 		} else {
5427c6fd2807SJeff Garzik 			/* PIO data in protocol */
5428c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5429c6fd2807SJeff Garzik 
5430c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5431c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5432c6fd2807SJeff Garzik 
5433c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5434c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5435c6fd2807SJeff Garzik 			 */
5436c6fd2807SJeff Garzik 		}
5437c6fd2807SJeff Garzik 
5438c6fd2807SJeff Garzik 		break;
5439c6fd2807SJeff Garzik 
5440c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5441c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5442c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5443c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5444c6fd2807SJeff Garzik 
5445c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5446c6fd2807SJeff Garzik 
5447c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5448c6fd2807SJeff Garzik 
5449c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5450c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5451c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5452c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5453c6fd2807SJeff Garzik 		break;
5454c6fd2807SJeff Garzik 
5455c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5456c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5457c6fd2807SJeff Garzik 
5458c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5459c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5460c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5461c6fd2807SJeff Garzik 
5462c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5463c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5464c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5465c6fd2807SJeff Garzik 		break;
5466c6fd2807SJeff Garzik 
5467c6fd2807SJeff Garzik 	default:
5468c6fd2807SJeff Garzik 		WARN_ON(1);
5469c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5470c6fd2807SJeff Garzik 	}
5471c6fd2807SJeff Garzik 
5472c6fd2807SJeff Garzik 	return 0;
5473c6fd2807SJeff Garzik }
5474c6fd2807SJeff Garzik 
5475c6fd2807SJeff Garzik /**
5476c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5477c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5478c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5479c6fd2807SJeff Garzik  *
5480c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5481c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5482c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5483c6fd2807SJeff Garzik  *
5484c6fd2807SJeff Garzik  *	LOCKING:
5485cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5486c6fd2807SJeff Garzik  *
5487c6fd2807SJeff Garzik  *	RETURNS:
5488c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5489c6fd2807SJeff Garzik  */
5490c6fd2807SJeff Garzik 
5491c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap,
5492c6fd2807SJeff Garzik 				   struct ata_queued_cmd *qc)
5493c6fd2807SJeff Garzik {
5494ea54763fSTejun Heo 	struct ata_eh_info *ehi = &ap->eh_info;
5495c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5496c6fd2807SJeff Garzik 
5497c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
549844877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5499c6fd2807SJeff Garzik 
5500c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5501c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5502c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5503c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5504c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5505c6fd2807SJeff Garzik 		 */
5506c6fd2807SJeff Garzik 
5507c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5508c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5509c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5510c6fd2807SJeff Garzik 		 */
5511c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5512c6fd2807SJeff Garzik 			goto idle_irq;
5513c6fd2807SJeff Garzik 		break;
5514c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5515c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5516c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5517c6fd2807SJeff Garzik 			/* check status of DMA engine */
5518c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
551944877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
552044877b4eSTejun Heo 				ap->print_id, host_stat);
5521c6fd2807SJeff Garzik 
5522c6fd2807SJeff Garzik 			/* if it's not our irq... */
5523c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5524c6fd2807SJeff Garzik 				goto idle_irq;
5525c6fd2807SJeff Garzik 
5526c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5527c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5528c6fd2807SJeff Garzik 
5529c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5530c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5531c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5532c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5533c6fd2807SJeff Garzik 			}
5534c6fd2807SJeff Garzik 		}
5535c6fd2807SJeff Garzik 		break;
5536c6fd2807SJeff Garzik 	case HSM_ST:
5537c6fd2807SJeff Garzik 		break;
5538c6fd2807SJeff Garzik 	default:
5539c6fd2807SJeff Garzik 		goto idle_irq;
5540c6fd2807SJeff Garzik 	}
5541c6fd2807SJeff Garzik 
5542c6fd2807SJeff Garzik 	/* check altstatus */
5543c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5544c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5545c6fd2807SJeff Garzik 		goto idle_irq;
5546c6fd2807SJeff Garzik 
5547c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5548c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5549c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5550c6fd2807SJeff Garzik 		goto idle_irq;
5551c6fd2807SJeff Garzik 
5552c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5553c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5554c6fd2807SJeff Garzik 
5555c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5556ea54763fSTejun Heo 
5557ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5558ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5559ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5560ea54763fSTejun Heo 
5561c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5562c6fd2807SJeff Garzik 
5563c6fd2807SJeff Garzik idle_irq:
5564c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5565c6fd2807SJeff Garzik 
5566c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5567c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
556883625006SAkira Iguchi 		ap->ops->irq_ack(ap, 0); /* debug trap */
5569c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5570c6fd2807SJeff Garzik 		return 1;
5571c6fd2807SJeff Garzik 	}
5572c6fd2807SJeff Garzik #endif
5573c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5574c6fd2807SJeff Garzik }
5575c6fd2807SJeff Garzik 
5576c6fd2807SJeff Garzik /**
5577c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
5578c6fd2807SJeff Garzik  *	@irq: irq line (unused)
5579cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
5580c6fd2807SJeff Garzik  *
5581c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
5582c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
5583c6fd2807SJeff Garzik  *
5584c6fd2807SJeff Garzik  *	LOCKING:
5585cca3974eSJeff Garzik  *	Obtains host lock during operation.
5586c6fd2807SJeff Garzik  *
5587c6fd2807SJeff Garzik  *	RETURNS:
5588c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
5589c6fd2807SJeff Garzik  */
5590c6fd2807SJeff Garzik 
55917d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance)
5592c6fd2807SJeff Garzik {
5593cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
5594c6fd2807SJeff Garzik 	unsigned int i;
5595c6fd2807SJeff Garzik 	unsigned int handled = 0;
5596c6fd2807SJeff Garzik 	unsigned long flags;
5597c6fd2807SJeff Garzik 
5598c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5599cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
5600c6fd2807SJeff Garzik 
5601cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5602c6fd2807SJeff Garzik 		struct ata_port *ap;
5603c6fd2807SJeff Garzik 
5604cca3974eSJeff Garzik 		ap = host->ports[i];
5605c6fd2807SJeff Garzik 		if (ap &&
5606c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5607c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
5608c6fd2807SJeff Garzik 
5609c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, ap->active_tag);
5610c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5611c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5612c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
5613c6fd2807SJeff Garzik 		}
5614c6fd2807SJeff Garzik 	}
5615c6fd2807SJeff Garzik 
5616cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
5617c6fd2807SJeff Garzik 
5618c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
5619c6fd2807SJeff Garzik }
5620c6fd2807SJeff Garzik 
5621c6fd2807SJeff Garzik /**
5622c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
5623c6fd2807SJeff Garzik  *	@ap: ATA port to test SCR accessibility for
5624c6fd2807SJeff Garzik  *
5625c6fd2807SJeff Garzik  *	Test whether SCRs are accessible for @ap.
5626c6fd2807SJeff Garzik  *
5627c6fd2807SJeff Garzik  *	LOCKING:
5628c6fd2807SJeff Garzik  *	None.
5629c6fd2807SJeff Garzik  *
5630c6fd2807SJeff Garzik  *	RETURNS:
5631c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
5632c6fd2807SJeff Garzik  */
5633c6fd2807SJeff Garzik int sata_scr_valid(struct ata_port *ap)
5634c6fd2807SJeff Garzik {
5635c6fd2807SJeff Garzik 	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5636c6fd2807SJeff Garzik }
5637c6fd2807SJeff Garzik 
5638c6fd2807SJeff Garzik /**
5639c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
5640c6fd2807SJeff Garzik  *	@ap: ATA port to read SCR for
5641c6fd2807SJeff Garzik  *	@reg: SCR to read
5642c6fd2807SJeff Garzik  *	@val: Place to store read value
5643c6fd2807SJeff Garzik  *
5644c6fd2807SJeff Garzik  *	Read SCR register @reg of @ap into *@val.  This function is
5645c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5646c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5647c6fd2807SJeff Garzik  *
5648c6fd2807SJeff Garzik  *	LOCKING:
5649c6fd2807SJeff Garzik  *	None.
5650c6fd2807SJeff Garzik  *
5651c6fd2807SJeff Garzik  *	RETURNS:
5652c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5653c6fd2807SJeff Garzik  */
5654c6fd2807SJeff Garzik int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5655c6fd2807SJeff Garzik {
5656c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5657c6fd2807SJeff Garzik 		*val = ap->ops->scr_read(ap, reg);
5658c6fd2807SJeff Garzik 		return 0;
5659c6fd2807SJeff Garzik 	}
5660c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5661c6fd2807SJeff Garzik }
5662c6fd2807SJeff Garzik 
5663c6fd2807SJeff Garzik /**
5664c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
5665c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5666c6fd2807SJeff Garzik  *	@reg: SCR to write
5667c6fd2807SJeff Garzik  *	@val: value to write
5668c6fd2807SJeff Garzik  *
5669c6fd2807SJeff Garzik  *	Write @val to SCR register @reg of @ap.  This function is
5670c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5671c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5672c6fd2807SJeff Garzik  *
5673c6fd2807SJeff Garzik  *	LOCKING:
5674c6fd2807SJeff Garzik  *	None.
5675c6fd2807SJeff Garzik  *
5676c6fd2807SJeff Garzik  *	RETURNS:
5677c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5678c6fd2807SJeff Garzik  */
5679c6fd2807SJeff Garzik int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5680c6fd2807SJeff Garzik {
5681c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5682c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5683c6fd2807SJeff Garzik 		return 0;
5684c6fd2807SJeff Garzik 	}
5685c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5686c6fd2807SJeff Garzik }
5687c6fd2807SJeff Garzik 
5688c6fd2807SJeff Garzik /**
5689c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
5690c6fd2807SJeff Garzik  *	@ap: ATA port to write SCR for
5691c6fd2807SJeff Garzik  *	@reg: SCR to write
5692c6fd2807SJeff Garzik  *	@val: value to write
5693c6fd2807SJeff Garzik  *
5694c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
5695c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
5696c6fd2807SJeff Garzik  *
5697c6fd2807SJeff Garzik  *	LOCKING:
5698c6fd2807SJeff Garzik  *	None.
5699c6fd2807SJeff Garzik  *
5700c6fd2807SJeff Garzik  *	RETURNS:
5701c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5702c6fd2807SJeff Garzik  */
5703c6fd2807SJeff Garzik int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5704c6fd2807SJeff Garzik {
5705c6fd2807SJeff Garzik 	if (sata_scr_valid(ap)) {
5706c6fd2807SJeff Garzik 		ap->ops->scr_write(ap, reg, val);
5707c6fd2807SJeff Garzik 		ap->ops->scr_read(ap, reg);
5708c6fd2807SJeff Garzik 		return 0;
5709c6fd2807SJeff Garzik 	}
5710c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5711c6fd2807SJeff Garzik }
5712c6fd2807SJeff Garzik 
5713c6fd2807SJeff Garzik /**
5714c6fd2807SJeff Garzik  *	ata_port_online - test whether the given port is online
5715c6fd2807SJeff Garzik  *	@ap: ATA port to test
5716c6fd2807SJeff Garzik  *
5717c6fd2807SJeff Garzik  *	Test whether @ap is online.  Note that this function returns 0
5718c6fd2807SJeff Garzik  *	if online status of @ap cannot be obtained, so
5719c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5720c6fd2807SJeff Garzik  *
5721c6fd2807SJeff Garzik  *	LOCKING:
5722c6fd2807SJeff Garzik  *	None.
5723c6fd2807SJeff Garzik  *
5724c6fd2807SJeff Garzik  *	RETURNS:
5725c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
5726c6fd2807SJeff Garzik  */
5727c6fd2807SJeff Garzik int ata_port_online(struct ata_port *ap)
5728c6fd2807SJeff Garzik {
5729c6fd2807SJeff Garzik 	u32 sstatus;
5730c6fd2807SJeff Garzik 
5731c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5732c6fd2807SJeff Garzik 		return 1;
5733c6fd2807SJeff Garzik 	return 0;
5734c6fd2807SJeff Garzik }
5735c6fd2807SJeff Garzik 
5736c6fd2807SJeff Garzik /**
5737c6fd2807SJeff Garzik  *	ata_port_offline - test whether the given port is offline
5738c6fd2807SJeff Garzik  *	@ap: ATA port to test
5739c6fd2807SJeff Garzik  *
5740c6fd2807SJeff Garzik  *	Test whether @ap is offline.  Note that this function returns
5741c6fd2807SJeff Garzik  *	0 if offline status of @ap cannot be obtained, so
5742c6fd2807SJeff Garzik  *	ata_port_online(ap) != !ata_port_offline(ap).
5743c6fd2807SJeff Garzik  *
5744c6fd2807SJeff Garzik  *	LOCKING:
5745c6fd2807SJeff Garzik  *	None.
5746c6fd2807SJeff Garzik  *
5747c6fd2807SJeff Garzik  *	RETURNS:
5748c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
5749c6fd2807SJeff Garzik  */
5750c6fd2807SJeff Garzik int ata_port_offline(struct ata_port *ap)
5751c6fd2807SJeff Garzik {
5752c6fd2807SJeff Garzik 	u32 sstatus;
5753c6fd2807SJeff Garzik 
5754c6fd2807SJeff Garzik 	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5755c6fd2807SJeff Garzik 		return 1;
5756c6fd2807SJeff Garzik 	return 0;
5757c6fd2807SJeff Garzik }
5758c6fd2807SJeff Garzik 
5759c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
5760c6fd2807SJeff Garzik {
5761c6fd2807SJeff Garzik 	unsigned int err_mask;
5762c6fd2807SJeff Garzik 	u8 cmd;
5763c6fd2807SJeff Garzik 
5764c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
5765c6fd2807SJeff Garzik 		return 0;
5766c6fd2807SJeff Garzik 
57676fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5768c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
5769c6fd2807SJeff Garzik 	else
5770c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
5771c6fd2807SJeff Garzik 
5772c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
5773c6fd2807SJeff Garzik 	if (err_mask) {
5774c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5775c6fd2807SJeff Garzik 		return -EIO;
5776c6fd2807SJeff Garzik 	}
5777c6fd2807SJeff Garzik 
5778c6fd2807SJeff Garzik 	return 0;
5779c6fd2807SJeff Garzik }
5780c6fd2807SJeff Garzik 
57816ffa01d8STejun Heo #ifdef CONFIG_PM
5782cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5783cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
5784cca3974eSJeff Garzik 			       int wait)
5785c6fd2807SJeff Garzik {
5786c6fd2807SJeff Garzik 	unsigned long flags;
5787c6fd2807SJeff Garzik 	int i, rc;
5788c6fd2807SJeff Garzik 
5789cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5790cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
5791c6fd2807SJeff Garzik 
5792c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
5793c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
5794c6fd2807SJeff Garzik 		 */
5795c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5796c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5797c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5798c6fd2807SJeff Garzik 		}
5799c6fd2807SJeff Garzik 
5800c6fd2807SJeff Garzik 		/* request PM ops to EH */
5801c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
5802c6fd2807SJeff Garzik 
5803c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
5804c6fd2807SJeff Garzik 		if (wait) {
5805c6fd2807SJeff Garzik 			rc = 0;
5806c6fd2807SJeff Garzik 			ap->pm_result = &rc;
5807c6fd2807SJeff Garzik 		}
5808c6fd2807SJeff Garzik 
5809c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
5810c6fd2807SJeff Garzik 		ap->eh_info.action |= action;
5811c6fd2807SJeff Garzik 		ap->eh_info.flags |= ehi_flags;
5812c6fd2807SJeff Garzik 
5813c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
5814c6fd2807SJeff Garzik 
5815c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
5816c6fd2807SJeff Garzik 
5817c6fd2807SJeff Garzik 		/* wait and check result */
5818c6fd2807SJeff Garzik 		if (wait) {
5819c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
5820c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5821c6fd2807SJeff Garzik 			if (rc)
5822c6fd2807SJeff Garzik 				return rc;
5823c6fd2807SJeff Garzik 		}
5824c6fd2807SJeff Garzik 	}
5825c6fd2807SJeff Garzik 
5826c6fd2807SJeff Garzik 	return 0;
5827c6fd2807SJeff Garzik }
5828c6fd2807SJeff Garzik 
5829c6fd2807SJeff Garzik /**
5830cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
5831cca3974eSJeff Garzik  *	@host: host to suspend
5832c6fd2807SJeff Garzik  *	@mesg: PM message
5833c6fd2807SJeff Garzik  *
5834cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
5835c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
5836c6fd2807SJeff Garzik  *	to finish.
5837c6fd2807SJeff Garzik  *
5838c6fd2807SJeff Garzik  *	LOCKING:
5839c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5840c6fd2807SJeff Garzik  *
5841c6fd2807SJeff Garzik  *	RETURNS:
5842c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
5843c6fd2807SJeff Garzik  */
5844cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5845c6fd2807SJeff Garzik {
5846c6fd2807SJeff Garzik 	int i, j, rc;
5847c6fd2807SJeff Garzik 
5848cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5849c6fd2807SJeff Garzik 	if (rc)
5850c6fd2807SJeff Garzik 		goto fail;
5851c6fd2807SJeff Garzik 
5852c6fd2807SJeff Garzik 	/* EH is quiescent now.  Fail if we have any ready device.
5853c6fd2807SJeff Garzik 	 * This happens if hotplug occurs between completion of device
5854c6fd2807SJeff Garzik 	 * suspension and here.
5855c6fd2807SJeff Garzik 	 */
5856cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5857cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
5858c6fd2807SJeff Garzik 
5859c6fd2807SJeff Garzik 		for (j = 0; j < ATA_MAX_DEVICES; j++) {
5860c6fd2807SJeff Garzik 			struct ata_device *dev = &ap->device[j];
5861c6fd2807SJeff Garzik 
5862c6fd2807SJeff Garzik 			if (ata_dev_ready(dev)) {
5863c6fd2807SJeff Garzik 				ata_port_printk(ap, KERN_WARNING,
5864c6fd2807SJeff Garzik 						"suspend failed, device %d "
5865c6fd2807SJeff Garzik 						"still active\n", dev->devno);
5866c6fd2807SJeff Garzik 				rc = -EBUSY;
5867c6fd2807SJeff Garzik 				goto fail;
5868c6fd2807SJeff Garzik 			}
5869c6fd2807SJeff Garzik 		}
5870c6fd2807SJeff Garzik 	}
5871c6fd2807SJeff Garzik 
5872cca3974eSJeff Garzik 	host->dev->power.power_state = mesg;
5873c6fd2807SJeff Garzik 	return 0;
5874c6fd2807SJeff Garzik 
5875c6fd2807SJeff Garzik  fail:
5876cca3974eSJeff Garzik 	ata_host_resume(host);
5877c6fd2807SJeff Garzik 	return rc;
5878c6fd2807SJeff Garzik }
5879c6fd2807SJeff Garzik 
5880c6fd2807SJeff Garzik /**
5881cca3974eSJeff Garzik  *	ata_host_resume - resume host
5882cca3974eSJeff Garzik  *	@host: host to resume
5883c6fd2807SJeff Garzik  *
5884cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
5885c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
5886c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
5887c6fd2807SJeff Garzik  *
5888c6fd2807SJeff Garzik  *	LOCKING:
5889c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
5890c6fd2807SJeff Garzik  */
5891cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
5892c6fd2807SJeff Garzik {
5893cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5894c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5895cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
5896c6fd2807SJeff Garzik }
58976ffa01d8STejun Heo #endif
5898c6fd2807SJeff Garzik 
5899c6fd2807SJeff Garzik /**
5900c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
5901c6fd2807SJeff Garzik  *	@ap: Port to initialize
5902c6fd2807SJeff Garzik  *
5903c6fd2807SJeff Garzik  *	Called just after data structures for each port are
5904c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
5905c6fd2807SJeff Garzik  *
5906c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
5907c6fd2807SJeff Garzik  *
5908c6fd2807SJeff Garzik  *	LOCKING:
5909c6fd2807SJeff Garzik  *	Inherited from caller.
5910c6fd2807SJeff Garzik  */
5911c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
5912c6fd2807SJeff Garzik {
5913c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
5914c6fd2807SJeff Garzik 	int rc;
5915c6fd2807SJeff Garzik 
5916f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5917f0d36efdSTejun Heo 				      GFP_KERNEL);
5918c6fd2807SJeff Garzik 	if (!ap->prd)
5919c6fd2807SJeff Garzik 		return -ENOMEM;
5920c6fd2807SJeff Garzik 
5921c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
5922f0d36efdSTejun Heo 	if (rc)
5923c6fd2807SJeff Garzik 		return rc;
5924c6fd2807SJeff Garzik 
5925f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5926f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
5927c6fd2807SJeff Garzik 	return 0;
5928c6fd2807SJeff Garzik }
5929c6fd2807SJeff Garzik 
5930c6fd2807SJeff Garzik /**
5931c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
5932c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
5933c6fd2807SJeff Garzik  *
5934c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
5935c6fd2807SJeff Garzik  *
5936c6fd2807SJeff Garzik  *	LOCKING:
5937c6fd2807SJeff Garzik  *	Inherited from caller.
5938c6fd2807SJeff Garzik  */
5939c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
5940c6fd2807SJeff Garzik {
5941c6fd2807SJeff Garzik 	struct ata_port *ap = dev->ap;
5942c6fd2807SJeff Garzik 	unsigned long flags;
5943c6fd2807SJeff Garzik 
5944c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
5945c6fd2807SJeff Garzik 	ap->sata_spd_limit = ap->hw_sata_spd_limit;
5946c6fd2807SJeff Garzik 
5947c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
5948c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
5949cca3974eSJeff Garzik 	 * host lock.
5950c6fd2807SJeff Garzik 	 */
5951c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
5952c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5953c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
5954c6fd2807SJeff Garzik 
5955c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5956c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5957c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
5958c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
5959c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
5960c6fd2807SJeff Garzik }
5961c6fd2807SJeff Garzik 
5962c6fd2807SJeff Garzik /**
5963f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
5964f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
5965c6fd2807SJeff Garzik  *
5966f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
5967f3187195STejun Heo  *
5968f3187195STejun Heo  *	RETURNS:
5969f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
5970c6fd2807SJeff Garzik  *
5971c6fd2807SJeff Garzik  *	LOCKING:
5972f3187195STejun Heo  *	Inherited from calling layer (may sleep).
5973c6fd2807SJeff Garzik  */
5974f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
5975c6fd2807SJeff Garzik {
5976f3187195STejun Heo 	struct ata_port *ap;
5977c6fd2807SJeff Garzik 	unsigned int i;
5978c6fd2807SJeff Garzik 
5979f3187195STejun Heo 	DPRINTK("ENTER\n");
5980f3187195STejun Heo 
5981f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5982f3187195STejun Heo 	if (!ap)
5983f3187195STejun Heo 		return NULL;
5984f3187195STejun Heo 
5985cca3974eSJeff Garzik 	ap->lock = &host->lock;
5986c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
5987f3187195STejun Heo 	ap->print_id = -1;
5988c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
5989cca3974eSJeff Garzik 	ap->host = host;
5990f3187195STejun Heo 	ap->dev = host->dev;
5991f3187195STejun Heo 
5992c6fd2807SJeff Garzik 	ap->hw_sata_spd_limit = UINT_MAX;
5993c6fd2807SJeff Garzik 	ap->active_tag = ATA_TAG_POISON;
5994c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
5995c6fd2807SJeff Garzik 
5996c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
5997c6fd2807SJeff Garzik 	/* turn on all debugging levels */
5998c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
5999c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6000c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6001c6fd2807SJeff Garzik #else
6002c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6003c6fd2807SJeff Garzik #endif
6004c6fd2807SJeff Garzik 
600565f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
600665f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
600765f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6008c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6009c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
6010c6fd2807SJeff Garzik 
6011c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6012c6fd2807SJeff Garzik 
6013c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
6014c6fd2807SJeff Garzik 		struct ata_device *dev = &ap->device[i];
6015c6fd2807SJeff Garzik 		dev->ap = ap;
6016c6fd2807SJeff Garzik 		dev->devno = i;
6017c6fd2807SJeff Garzik 		ata_dev_init(dev);
6018c6fd2807SJeff Garzik 	}
6019c6fd2807SJeff Garzik 
6020c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6021c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6022c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6023c6fd2807SJeff Garzik #endif
6024c6fd2807SJeff Garzik 	return ap;
6025c6fd2807SJeff Garzik }
6026c6fd2807SJeff Garzik 
6027f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6028f0d36efdSTejun Heo {
6029f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6030f0d36efdSTejun Heo 	int i;
6031f0d36efdSTejun Heo 
6032f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6033f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6034f0d36efdSTejun Heo 
6035ecef7253STejun Heo 		if (!ap)
6036ecef7253STejun Heo 			continue;
6037ecef7253STejun Heo 
6038ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6039f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6040f0d36efdSTejun Heo 	}
6041f0d36efdSTejun Heo 
6042ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6043f0d36efdSTejun Heo 		host->ops->host_stop(host);
60441aa56ccaSTejun Heo 
60451aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
60461aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
60471aa506e4STejun Heo 
60484911487aSTejun Heo 		if (!ap)
60494911487aSTejun Heo 			continue;
60504911487aSTejun Heo 
60514911487aSTejun Heo 		if (ap->scsi_host)
60521aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
60531aa506e4STejun Heo 
60544911487aSTejun Heo 		kfree(ap);
60551aa506e4STejun Heo 		host->ports[i] = NULL;
60561aa506e4STejun Heo 	}
60571aa506e4STejun Heo 
60581aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6059f0d36efdSTejun Heo }
6060f0d36efdSTejun Heo 
6061c6fd2807SJeff Garzik /**
6062f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6063f3187195STejun Heo  *	@dev: generic device this host is associated with
6064f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6065f3187195STejun Heo  *
6066f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6067f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6068f3187195STejun Heo  *	attaches it using ata_host_register().
6069f3187195STejun Heo  *
6070f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6071f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6072f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6073f3187195STejun Heo  *	ports will be automatically freed on registration.
6074f3187195STejun Heo  *
6075f3187195STejun Heo  *	RETURNS:
6076f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6077f3187195STejun Heo  *
6078f3187195STejun Heo  *	LOCKING:
6079f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6080f3187195STejun Heo  */
6081f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6082f3187195STejun Heo {
6083f3187195STejun Heo 	struct ata_host *host;
6084f3187195STejun Heo 	size_t sz;
6085f3187195STejun Heo 	int i;
6086f3187195STejun Heo 
6087f3187195STejun Heo 	DPRINTK("ENTER\n");
6088f3187195STejun Heo 
6089f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6090f3187195STejun Heo 		return NULL;
6091f3187195STejun Heo 
6092f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6093f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6094f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6095f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6096f3187195STejun Heo 	if (!host)
6097f3187195STejun Heo 		goto err_out;
6098f3187195STejun Heo 
6099f3187195STejun Heo 	devres_add(dev, host);
6100f3187195STejun Heo 	dev_set_drvdata(dev, host);
6101f3187195STejun Heo 
6102f3187195STejun Heo 	spin_lock_init(&host->lock);
6103f3187195STejun Heo 	host->dev = dev;
6104f3187195STejun Heo 	host->n_ports = max_ports;
6105f3187195STejun Heo 
6106f3187195STejun Heo 	/* allocate ports bound to this host */
6107f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6108f3187195STejun Heo 		struct ata_port *ap;
6109f3187195STejun Heo 
6110f3187195STejun Heo 		ap = ata_port_alloc(host);
6111f3187195STejun Heo 		if (!ap)
6112f3187195STejun Heo 			goto err_out;
6113f3187195STejun Heo 
6114f3187195STejun Heo 		ap->port_no = i;
6115f3187195STejun Heo 		host->ports[i] = ap;
6116f3187195STejun Heo 	}
6117f3187195STejun Heo 
6118f3187195STejun Heo 	devres_remove_group(dev, NULL);
6119f3187195STejun Heo 	return host;
6120f3187195STejun Heo 
6121f3187195STejun Heo  err_out:
6122f3187195STejun Heo 	devres_release_group(dev, NULL);
6123f3187195STejun Heo 	return NULL;
6124f3187195STejun Heo }
6125f3187195STejun Heo 
6126f3187195STejun Heo /**
6127f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6128f5cda257STejun Heo  *	@dev: generic device this host is associated with
6129f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6130f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6131f5cda257STejun Heo  *
6132f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6133f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6134f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6135f5cda257STejun Heo  *
6136f5cda257STejun Heo  *	RETURNS:
6137f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6138f5cda257STejun Heo  *
6139f5cda257STejun Heo  *	LOCKING:
6140f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6141f5cda257STejun Heo  */
6142f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6143f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6144f5cda257STejun Heo 				      int n_ports)
6145f5cda257STejun Heo {
6146f5cda257STejun Heo 	const struct ata_port_info *pi;
6147f5cda257STejun Heo 	struct ata_host *host;
6148f5cda257STejun Heo 	int i, j;
6149f5cda257STejun Heo 
6150f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6151f5cda257STejun Heo 	if (!host)
6152f5cda257STejun Heo 		return NULL;
6153f5cda257STejun Heo 
6154f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6155f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6156f5cda257STejun Heo 
6157f5cda257STejun Heo 		if (ppi[j])
6158f5cda257STejun Heo 			pi = ppi[j++];
6159f5cda257STejun Heo 
6160f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6161f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6162f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6163f5cda257STejun Heo 		ap->flags |= pi->flags;
6164f5cda257STejun Heo 		ap->ops = pi->port_ops;
6165f5cda257STejun Heo 
6166f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6167f5cda257STejun Heo 			host->ops = pi->port_ops;
6168f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6169f5cda257STejun Heo 			host->private_data = pi->private_data;
6170f5cda257STejun Heo 	}
6171f5cda257STejun Heo 
6172f5cda257STejun Heo 	return host;
6173f5cda257STejun Heo }
6174f5cda257STejun Heo 
6175f5cda257STejun Heo /**
6176ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6177ecef7253STejun Heo  *	@host: ATA host to start ports for
6178ecef7253STejun Heo  *
6179ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6180ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6181ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6182f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6183f3187195STejun Heo  *	first non-dummy port ops.
6184ecef7253STejun Heo  *
6185ecef7253STejun Heo  *	LOCKING:
6186ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6187ecef7253STejun Heo  *
6188ecef7253STejun Heo  *	RETURNS:
6189ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6190ecef7253STejun Heo  */
6191ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6192ecef7253STejun Heo {
6193ecef7253STejun Heo 	int i, rc;
6194ecef7253STejun Heo 
6195ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6196ecef7253STejun Heo 		return 0;
6197ecef7253STejun Heo 
6198ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6199ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6200ecef7253STejun Heo 
6201f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6202f3187195STejun Heo 			host->ops = ap->ops;
6203f3187195STejun Heo 
6204ecef7253STejun Heo 		if (ap->ops->port_start) {
6205ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6206ecef7253STejun Heo 			if (rc) {
6207ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6208ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6209ecef7253STejun Heo 				goto err_out;
6210ecef7253STejun Heo 			}
6211ecef7253STejun Heo 		}
6212ecef7253STejun Heo 
6213ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6214ecef7253STejun Heo 	}
6215ecef7253STejun Heo 
6216ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6217ecef7253STejun Heo 	return 0;
6218ecef7253STejun Heo 
6219ecef7253STejun Heo  err_out:
6220ecef7253STejun Heo 	while (--i >= 0) {
6221ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6222ecef7253STejun Heo 
6223ecef7253STejun Heo 		if (ap->ops->port_stop)
6224ecef7253STejun Heo 			ap->ops->port_stop(ap);
6225ecef7253STejun Heo 	}
6226ecef7253STejun Heo 	return rc;
6227ecef7253STejun Heo }
6228ecef7253STejun Heo 
6229ecef7253STejun Heo /**
6230cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6231cca3974eSJeff Garzik  *	@host:	host to initialize
6232cca3974eSJeff Garzik  *	@dev:	device host is attached to
6233cca3974eSJeff Garzik  *	@flags:	host flags
6234c6fd2807SJeff Garzik  *	@ops:	port_ops
6235c6fd2807SJeff Garzik  *
6236c6fd2807SJeff Garzik  *	LOCKING:
6237c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6238c6fd2807SJeff Garzik  *
6239c6fd2807SJeff Garzik  */
6240f3187195STejun Heo /* KILLME - the only user left is ipr */
6241cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6242cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6243c6fd2807SJeff Garzik {
6244cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6245cca3974eSJeff Garzik 	host->dev = dev;
6246cca3974eSJeff Garzik 	host->flags = flags;
6247cca3974eSJeff Garzik 	host->ops = ops;
6248c6fd2807SJeff Garzik }
6249c6fd2807SJeff Garzik 
6250c6fd2807SJeff Garzik /**
6251f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6252f3187195STejun Heo  *	@host: ATA host to register
6253f3187195STejun Heo  *	@sht: template for SCSI host
6254c6fd2807SJeff Garzik  *
6255f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6256f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6257f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6258f3187195STejun Heo  *	probe registered devices.
6259c6fd2807SJeff Garzik  *
6260c6fd2807SJeff Garzik  *	LOCKING:
6261f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6262c6fd2807SJeff Garzik  *
6263c6fd2807SJeff Garzik  *	RETURNS:
6264f3187195STejun Heo  *	0 on success, -errno otherwise.
6265c6fd2807SJeff Garzik  */
6266f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6267c6fd2807SJeff Garzik {
6268f3187195STejun Heo 	int i, rc;
6269c6fd2807SJeff Garzik 
6270f3187195STejun Heo 	/* host must have been started */
6271f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6272f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6273f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6274f3187195STejun Heo 		WARN_ON(1);
6275f3187195STejun Heo 		return -EINVAL;
627602f076aaSAlan Cox 	}
6277f0d36efdSTejun Heo 
6278f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6279f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6280f3187195STejun Heo 	 * allocation time.
6281f3187195STejun Heo 	 */
6282f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6283f3187195STejun Heo 		kfree(host->ports[i]);
6284f0d36efdSTejun Heo 
6285f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6286f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6287f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6288c6fd2807SJeff Garzik 
6289f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6290ecef7253STejun Heo 	if (rc)
6291f3187195STejun Heo 		return rc;
6292ecef7253STejun Heo 
6293f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6294cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6295cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6296f3187195STejun Heo 		int irq_line;
6297c6fd2807SJeff Garzik 		u32 scontrol;
6298f3187195STejun Heo 		unsigned long xfer_mask;
6299f3187195STejun Heo 
6300f3187195STejun Heo 		/* set SATA cable type if still unset */
6301f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6302f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6303c6fd2807SJeff Garzik 
6304c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
6305c6fd2807SJeff Garzik 		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6306c6fd2807SJeff Garzik 			int spd = (scontrol >> 4) & 0xf;
6307c6fd2807SJeff Garzik 			ap->hw_sata_spd_limit &= (1 << spd) - 1;
6308c6fd2807SJeff Garzik 		}
6309c6fd2807SJeff Garzik 		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6310c6fd2807SJeff Garzik 
6311f3187195STejun Heo 		/* report the secondary IRQ for second channel legacy */
6312f3187195STejun Heo 		irq_line = host->irq;
6313f3187195STejun Heo 		if (i == 1 && host->irq2)
6314f3187195STejun Heo 			irq_line = host->irq2;
6315f3187195STejun Heo 
6316f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6317f3187195STejun Heo 					      ap->udma_mask);
6318f3187195STejun Heo 
6319f3187195STejun Heo 		/* print per-port info to dmesg */
6320f3187195STejun Heo 		if (!ata_port_is_dummy(ap))
6321f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6322f3187195STejun Heo 					"ctl 0x%p bmdma 0x%p irq %d\n",
6323f3187195STejun Heo 					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6324f3187195STejun Heo 					ata_mode_string(xfer_mask),
6325f3187195STejun Heo 					ap->ioaddr.cmd_addr,
6326f3187195STejun Heo 					ap->ioaddr.ctl_addr,
6327f3187195STejun Heo 					ap->ioaddr.bmdma_addr,
6328f3187195STejun Heo 					irq_line);
6329f3187195STejun Heo 		else
6330f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6331c6fd2807SJeff Garzik 	}
6332c6fd2807SJeff Garzik 
6333f3187195STejun Heo 	/* perform each probe synchronously */
6334f3187195STejun Heo 	DPRINTK("probe begin\n");
6335f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6336f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6337f3187195STejun Heo 		int rc;
6338f3187195STejun Heo 
6339f3187195STejun Heo 		/* probe */
6340c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
6341c6fd2807SJeff Garzik 			struct ata_eh_info *ehi = &ap->eh_info;
6342c6fd2807SJeff Garzik 			unsigned long flags;
6343c6fd2807SJeff Garzik 
6344c6fd2807SJeff Garzik 			ata_port_probe(ap);
6345c6fd2807SJeff Garzik 
6346c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6347c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6348c6fd2807SJeff Garzik 
6349c6fd2807SJeff Garzik 			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6350c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6351c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6352c6fd2807SJeff Garzik 
6353c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6354c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6355c6fd2807SJeff Garzik 
6356c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6357c6fd2807SJeff Garzik 
6358c6fd2807SJeff Garzik 			/* wait for EH to finish */
6359c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6360c6fd2807SJeff Garzik 		} else {
636144877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6362c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
636344877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6364c6fd2807SJeff Garzik 
6365c6fd2807SJeff Garzik 			if (rc) {
6366c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6367c6fd2807SJeff Garzik 				 * Current libata behavior will
6368c6fd2807SJeff Garzik 				 * tear down everything when
6369c6fd2807SJeff Garzik 				 * the module is removed
6370c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6371c6fd2807SJeff Garzik 				 */
6372c6fd2807SJeff Garzik 			}
6373c6fd2807SJeff Garzik 		}
6374c6fd2807SJeff Garzik 	}
6375c6fd2807SJeff Garzik 
6376c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6377c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6378cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6379cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6380c6fd2807SJeff Garzik 
6381c6fd2807SJeff Garzik 		ata_scsi_scan_host(ap);
6382c6fd2807SJeff Garzik 	}
6383c6fd2807SJeff Garzik 
6384f3187195STejun Heo 	return 0;
6385f3187195STejun Heo }
6386f3187195STejun Heo 
6387f3187195STejun Heo /**
6388f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6389f5cda257STejun Heo  *	@host: target ATA host
6390f5cda257STejun Heo  *	@irq: IRQ to request
6391f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6392f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6393f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6394f5cda257STejun Heo  *
6395f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6396f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6397f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6398f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6399f5cda257STejun Heo  *
6400f5cda257STejun Heo  *	LOCKING:
6401f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6402f5cda257STejun Heo  *
6403f5cda257STejun Heo  *	RETURNS:
6404f5cda257STejun Heo  *	0 on success, -errno otherwise.
6405f5cda257STejun Heo  */
6406f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6407f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6408f5cda257STejun Heo 		      struct scsi_host_template *sht)
6409f5cda257STejun Heo {
6410f5cda257STejun Heo 	int rc;
6411f5cda257STejun Heo 
6412f5cda257STejun Heo 	rc = ata_host_start(host);
6413f5cda257STejun Heo 	if (rc)
6414f5cda257STejun Heo 		return rc;
6415f5cda257STejun Heo 
6416f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6417f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6418f5cda257STejun Heo 	if (rc)
6419f5cda257STejun Heo 		return rc;
6420f5cda257STejun Heo 
6421f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6422f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6423f5cda257STejun Heo 	if (rc)
6424f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6425f5cda257STejun Heo 
6426f5cda257STejun Heo 	return rc;
6427f5cda257STejun Heo }
6428f5cda257STejun Heo 
6429f5cda257STejun Heo /**
6430c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6431c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6432c6fd2807SJeff Garzik  *
6433c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6434c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6435c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6436c6fd2807SJeff Garzik  *
6437c6fd2807SJeff Garzik  *	LOCKING:
6438c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6439c6fd2807SJeff Garzik  */
6440c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap)
6441c6fd2807SJeff Garzik {
6442c6fd2807SJeff Garzik 	unsigned long flags;
6443c6fd2807SJeff Garzik 	int i;
6444c6fd2807SJeff Garzik 
6445c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6446c6fd2807SJeff Garzik 		goto skip_eh;
6447c6fd2807SJeff Garzik 
6448c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6449c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6450c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6451c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6452c6fd2807SJeff Garzik 
6453c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6454c6fd2807SJeff Garzik 
6455c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6456c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6457c6fd2807SJeff Garzik 	 */
6458c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6459c6fd2807SJeff Garzik 
6460c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_DEVICES; i++)
6461c6fd2807SJeff Garzik 		ata_dev_disable(&ap->device[i]);
6462c6fd2807SJeff Garzik 
6463c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6464c6fd2807SJeff Garzik 
6465c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6466c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6467c6fd2807SJeff Garzik 	 * target.
6468c6fd2807SJeff Garzik 	 */
6469c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6470c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6471c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6472c6fd2807SJeff Garzik 
6473c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6474c6fd2807SJeff Garzik 
6475c6fd2807SJeff Garzik 	/* Flush hotplug task.  The sequence is similar to
6476c6fd2807SJeff Garzik 	 * ata_port_flush_task().
6477c6fd2807SJeff Garzik 	 */
6478c6fd2807SJeff Garzik 	flush_workqueue(ata_aux_wq);
6479c6fd2807SJeff Garzik 	cancel_delayed_work(&ap->hotplug_task);
6480c6fd2807SJeff Garzik 	flush_workqueue(ata_aux_wq);
6481c6fd2807SJeff Garzik 
6482c6fd2807SJeff Garzik  skip_eh:
6483c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6484cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6485c6fd2807SJeff Garzik }
6486c6fd2807SJeff Garzik 
6487c6fd2807SJeff Garzik /**
64880529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
64890529c159STejun Heo  *	@host: Host to detach
64900529c159STejun Heo  *
64910529c159STejun Heo  *	Detach all ports of @host.
64920529c159STejun Heo  *
64930529c159STejun Heo  *	LOCKING:
64940529c159STejun Heo  *	Kernel thread context (may sleep).
64950529c159STejun Heo  */
64960529c159STejun Heo void ata_host_detach(struct ata_host *host)
64970529c159STejun Heo {
64980529c159STejun Heo 	int i;
64990529c159STejun Heo 
65000529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
65010529c159STejun Heo 		ata_port_detach(host->ports[i]);
65020529c159STejun Heo }
65030529c159STejun Heo 
6504c6fd2807SJeff Garzik /**
6505c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6506c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6507c6fd2807SJeff Garzik  *
6508c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6509c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6510c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6511c6fd2807SJeff Garzik  *	relative to cmd_addr.
6512c6fd2807SJeff Garzik  *
6513c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6514c6fd2807SJeff Garzik  */
6515c6fd2807SJeff Garzik 
6516c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6517c6fd2807SJeff Garzik {
6518c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6519c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6520c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6521c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6522c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6523c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6524c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6525c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6526c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6527c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6528c6fd2807SJeff Garzik }
6529c6fd2807SJeff Garzik 
6530c6fd2807SJeff Garzik 
6531c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6532c6fd2807SJeff Garzik 
6533c6fd2807SJeff Garzik /**
6534c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
6535c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
6536c6fd2807SJeff Garzik  *
6537b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
6538b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
6539b878ca5dSTejun Heo  *	release is handled via devres.
6540c6fd2807SJeff Garzik  *
6541c6fd2807SJeff Garzik  *	LOCKING:
6542c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
6543c6fd2807SJeff Garzik  */
6544c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
6545c6fd2807SJeff Garzik {
6546c6fd2807SJeff Garzik 	struct device *dev = pci_dev_to_dev(pdev);
6547cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
6548c6fd2807SJeff Garzik 
6549f0d36efdSTejun Heo 	ata_host_detach(host);
6550c6fd2807SJeff Garzik }
6551c6fd2807SJeff Garzik 
6552c6fd2807SJeff Garzik /* move to PCI subsystem */
6553c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6554c6fd2807SJeff Garzik {
6555c6fd2807SJeff Garzik 	unsigned long tmp = 0;
6556c6fd2807SJeff Garzik 
6557c6fd2807SJeff Garzik 	switch (bits->width) {
6558c6fd2807SJeff Garzik 	case 1: {
6559c6fd2807SJeff Garzik 		u8 tmp8 = 0;
6560c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6561c6fd2807SJeff Garzik 		tmp = tmp8;
6562c6fd2807SJeff Garzik 		break;
6563c6fd2807SJeff Garzik 	}
6564c6fd2807SJeff Garzik 	case 2: {
6565c6fd2807SJeff Garzik 		u16 tmp16 = 0;
6566c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
6567c6fd2807SJeff Garzik 		tmp = tmp16;
6568c6fd2807SJeff Garzik 		break;
6569c6fd2807SJeff Garzik 	}
6570c6fd2807SJeff Garzik 	case 4: {
6571c6fd2807SJeff Garzik 		u32 tmp32 = 0;
6572c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6573c6fd2807SJeff Garzik 		tmp = tmp32;
6574c6fd2807SJeff Garzik 		break;
6575c6fd2807SJeff Garzik 	}
6576c6fd2807SJeff Garzik 
6577c6fd2807SJeff Garzik 	default:
6578c6fd2807SJeff Garzik 		return -EINVAL;
6579c6fd2807SJeff Garzik 	}
6580c6fd2807SJeff Garzik 
6581c6fd2807SJeff Garzik 	tmp &= bits->mask;
6582c6fd2807SJeff Garzik 
6583c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
6584c6fd2807SJeff Garzik }
6585c6fd2807SJeff Garzik 
65866ffa01d8STejun Heo #ifdef CONFIG_PM
6587c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6588c6fd2807SJeff Garzik {
6589c6fd2807SJeff Garzik 	pci_save_state(pdev);
6590c6fd2807SJeff Garzik 	pci_disable_device(pdev);
65914c90d971STejun Heo 
65924c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
6593c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
6594c6fd2807SJeff Garzik }
6595c6fd2807SJeff Garzik 
6596553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
6597c6fd2807SJeff Garzik {
6598553c4aa6STejun Heo 	int rc;
6599553c4aa6STejun Heo 
6600c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
6601c6fd2807SJeff Garzik 	pci_restore_state(pdev);
6602553c4aa6STejun Heo 
6603f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
6604553c4aa6STejun Heo 	if (rc) {
6605553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
6606553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
6607553c4aa6STejun Heo 		return rc;
6608553c4aa6STejun Heo 	}
6609553c4aa6STejun Heo 
6610c6fd2807SJeff Garzik 	pci_set_master(pdev);
6611553c4aa6STejun Heo 	return 0;
6612c6fd2807SJeff Garzik }
6613c6fd2807SJeff Garzik 
6614c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6615c6fd2807SJeff Garzik {
6616cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6617c6fd2807SJeff Garzik 	int rc = 0;
6618c6fd2807SJeff Garzik 
6619cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
6620c6fd2807SJeff Garzik 	if (rc)
6621c6fd2807SJeff Garzik 		return rc;
6622c6fd2807SJeff Garzik 
6623c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
6624c6fd2807SJeff Garzik 
6625c6fd2807SJeff Garzik 	return 0;
6626c6fd2807SJeff Garzik }
6627c6fd2807SJeff Garzik 
6628c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
6629c6fd2807SJeff Garzik {
6630cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6631553c4aa6STejun Heo 	int rc;
6632c6fd2807SJeff Garzik 
6633553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
6634553c4aa6STejun Heo 	if (rc == 0)
6635cca3974eSJeff Garzik 		ata_host_resume(host);
6636553c4aa6STejun Heo 	return rc;
6637c6fd2807SJeff Garzik }
66386ffa01d8STejun Heo #endif /* CONFIG_PM */
66396ffa01d8STejun Heo 
6640c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6641c6fd2807SJeff Garzik 
6642c6fd2807SJeff Garzik 
6643c6fd2807SJeff Garzik static int __init ata_init(void)
6644c6fd2807SJeff Garzik {
6645c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
6646c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
6647c6fd2807SJeff Garzik 	if (!ata_wq)
6648c6fd2807SJeff Garzik 		return -ENOMEM;
6649c6fd2807SJeff Garzik 
6650c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6651c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
6652c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
6653c6fd2807SJeff Garzik 		return -ENOMEM;
6654c6fd2807SJeff Garzik 	}
6655c6fd2807SJeff Garzik 
6656c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6657c6fd2807SJeff Garzik 	return 0;
6658c6fd2807SJeff Garzik }
6659c6fd2807SJeff Garzik 
6660c6fd2807SJeff Garzik static void __exit ata_exit(void)
6661c6fd2807SJeff Garzik {
6662c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
6663c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
6664c6fd2807SJeff Garzik }
6665c6fd2807SJeff Garzik 
6666a4625085SBrian King subsys_initcall(ata_init);
6667c6fd2807SJeff Garzik module_exit(ata_exit);
6668c6fd2807SJeff Garzik 
6669c6fd2807SJeff Garzik static unsigned long ratelimit_time;
6670c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
6671c6fd2807SJeff Garzik 
6672c6fd2807SJeff Garzik int ata_ratelimit(void)
6673c6fd2807SJeff Garzik {
6674c6fd2807SJeff Garzik 	int rc;
6675c6fd2807SJeff Garzik 	unsigned long flags;
6676c6fd2807SJeff Garzik 
6677c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6678c6fd2807SJeff Garzik 
6679c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
6680c6fd2807SJeff Garzik 		rc = 1;
6681c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
6682c6fd2807SJeff Garzik 	} else
6683c6fd2807SJeff Garzik 		rc = 0;
6684c6fd2807SJeff Garzik 
6685c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6686c6fd2807SJeff Garzik 
6687c6fd2807SJeff Garzik 	return rc;
6688c6fd2807SJeff Garzik }
6689c6fd2807SJeff Garzik 
6690c6fd2807SJeff Garzik /**
6691c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
6692c6fd2807SJeff Garzik  *	@reg: IO-mapped register
6693c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
6694c6fd2807SJeff Garzik  *	@val: Wait condition
6695c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
6696c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
6697c6fd2807SJeff Garzik  *
6698c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
6699c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
6700c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
6701c6fd2807SJeff Garzik  *
6702c6fd2807SJeff Garzik  *	(*@reg & mask) != val
6703c6fd2807SJeff Garzik  *
6704c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
6705c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
6706c6fd2807SJeff Garzik  *
6707c6fd2807SJeff Garzik  *	LOCKING:
6708c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
6709c6fd2807SJeff Garzik  *
6710c6fd2807SJeff Garzik  *	RETURNS:
6711c6fd2807SJeff Garzik  *	The final register value.
6712c6fd2807SJeff Garzik  */
6713c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6714c6fd2807SJeff Garzik 		      unsigned long interval_msec,
6715c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
6716c6fd2807SJeff Garzik {
6717c6fd2807SJeff Garzik 	unsigned long timeout;
6718c6fd2807SJeff Garzik 	u32 tmp;
6719c6fd2807SJeff Garzik 
6720c6fd2807SJeff Garzik 	tmp = ioread32(reg);
6721c6fd2807SJeff Garzik 
6722c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
6723c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
6724c6fd2807SJeff Garzik 	 * eat away the timeout.
6725c6fd2807SJeff Garzik 	 */
6726c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6727c6fd2807SJeff Garzik 
6728c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6729c6fd2807SJeff Garzik 		msleep(interval_msec);
6730c6fd2807SJeff Garzik 		tmp = ioread32(reg);
6731c6fd2807SJeff Garzik 	}
6732c6fd2807SJeff Garzik 
6733c6fd2807SJeff Garzik 	return tmp;
6734c6fd2807SJeff Garzik }
6735c6fd2807SJeff Garzik 
6736c6fd2807SJeff Garzik /*
6737c6fd2807SJeff Garzik  * Dummy port_ops
6738c6fd2807SJeff Garzik  */
6739c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
6740c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6741c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6742c6fd2807SJeff Garzik 
6743c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
6744c6fd2807SJeff Garzik {
6745c6fd2807SJeff Garzik 	return ATA_DRDY;
6746c6fd2807SJeff Garzik }
6747c6fd2807SJeff Garzik 
6748c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6749c6fd2807SJeff Garzik {
6750c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
6751c6fd2807SJeff Garzik }
6752c6fd2807SJeff Garzik 
6753c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
6754c6fd2807SJeff Garzik 	.port_disable		= ata_port_disable,
6755c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
6756c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
6757c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
6758c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
6759c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
6760c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
6761c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
6762c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
6763c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
6764c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
6765c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
6766c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
6767c6fd2807SJeff Garzik };
6768c6fd2807SJeff Garzik 
676921b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
677021b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
677121b0ad4fSTejun Heo };
677221b0ad4fSTejun Heo 
6773c6fd2807SJeff Garzik /*
6774c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
6775c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
6776c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
6777c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
6778c6fd2807SJeff Garzik  */
6779c6fd2807SJeff Garzik 
6780c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6781c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6782c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6783c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
678421b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6785c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
6786c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
6787cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
6788f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
6789f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6790ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
6791f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
6792f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
67930529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
6794c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
6795c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
6796c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
6797c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
6798c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6799c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6800c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
6801c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
6802c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6803c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
680443727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
6805c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6806c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6807c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
6808c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
6809c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
6810c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
6811c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
681204351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
68130d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
68140d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6815c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
6816c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6817c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6818c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
6819c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6820c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
6821c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6822c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6823c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6824c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6825c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6826c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6827c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
682810305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
6829c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
6830c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_debounce);
6831c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_resume);
6832c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
6833c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
6834c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
6835c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
6836c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
6837b6103f6dSTejun Heo EXPORT_SYMBOL_GPL(sata_port_hardreset);
6838c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
6839c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
6840c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
6841c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
6842c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
6843c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
6844c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
6845c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
6846d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
6847c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
6848c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6849c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6850c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6851c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6852c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6853c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
6854c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
6855c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
6856c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
6857c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6858c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_online);
6859c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_offline);
68606ffa01d8STejun Heo #ifdef CONFIG_PM
6861cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
6862cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
68636ffa01d8STejun Heo #endif /* CONFIG_PM */
6864c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
6865c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
686610305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
68676919a0a6SAlan Cox EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6868c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6869c6fd2807SJeff Garzik 
6870c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6871c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
6872c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
6873c6fd2807SJeff Garzik 
6874c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6875c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
6876d491b27bSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
687721b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
6878c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
6879c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
68806ffa01d8STejun Heo #ifdef CONFIG_PM
6881c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6882c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6883c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6884c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
68856ffa01d8STejun Heo #endif /* CONFIG_PM */
6886c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6887c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6888c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6889c6fd2807SJeff Garzik 
68906ffa01d8STejun Heo #ifdef CONFIG_PM
6891c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6892c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
68936ffa01d8STejun Heo #endif /* CONFIG_PM */
6894c6fd2807SJeff Garzik 
6895c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
6896c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6897c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
6898c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
6899c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6900c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6901c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6902c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6903c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
690483625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
690583625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
690683625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_ack);
690783625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6908a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6909be0d18dfSAlan Cox 
6910be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
6911be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
6912be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
6913be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
6914