xref: /openbmc/linux/drivers/ata/libata-core.c (revision 1e582ba4)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
52c6fd2807SJeff Garzik #include <scsi/scsi.h>
53c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
55c6fd2807SJeff Garzik #include <linux/libata.h>
56c6fd2807SJeff Garzik #include <asm/io.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
62fda0efc5SJeff Garzik 
63c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
64c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
65c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
67c6fd2807SJeff Garzik 
68c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
69c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
70c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
719f45cbd3SKristen Carlson Accardi static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
72c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
7375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
74c6fd2807SJeff Garzik 
75f3187195STejun Heo unsigned int ata_print_id = 1;
76c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
77c6fd2807SJeff Garzik 
78c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
79c6fd2807SJeff Garzik 
80c6fd2807SJeff Garzik int atapi_enabled = 1;
81c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
82c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83c6fd2807SJeff Garzik 
84c6fd2807SJeff Garzik int atapi_dmadir = 0;
85c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
86c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87c6fd2807SJeff Garzik 
88baf4fdfaSMark Lord int atapi_passthru16 = 1;
89baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
90baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91baf4fdfaSMark Lord 
92c6fd2807SJeff Garzik int libata_fua = 0;
93c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
94c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95c6fd2807SJeff Garzik 
961e999736SAlan Cox static int ata_ignore_hpa = 0;
971e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
981e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
991e999736SAlan Cox 
100c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
102c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103c6fd2807SJeff Garzik 
104d7d0dad6SJeff Garzik int libata_noacpi = 1;
105d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
10611ef697bSKristen Carlson Accardi MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
10711ef697bSKristen Carlson Accardi 
108c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
109c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
110c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
111c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
112c6fd2807SJeff Garzik 
113c6fd2807SJeff Garzik 
114c6fd2807SJeff Garzik /**
115c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
117c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1189977126cSTejun Heo  *	@is_cmd: This FIS is for command
1199977126cSTejun Heo  *	@fis: Buffer into which data will output
120c6fd2807SJeff Garzik  *
121c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
122c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
123c6fd2807SJeff Garzik  *
124c6fd2807SJeff Garzik  *	LOCKING:
125c6fd2807SJeff Garzik  *	Inherited from caller.
126c6fd2807SJeff Garzik  */
1279977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
128c6fd2807SJeff Garzik {
129c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1309977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1319977126cSTejun Heo 	if (is_cmd)
1329977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1339977126cSTejun Heo 
134c6fd2807SJeff Garzik 	fis[2] = tf->command;
135c6fd2807SJeff Garzik 	fis[3] = tf->feature;
136c6fd2807SJeff Garzik 
137c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
138c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
139c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
140c6fd2807SJeff Garzik 	fis[7] = tf->device;
141c6fd2807SJeff Garzik 
142c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
143c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
144c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
145c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
146c6fd2807SJeff Garzik 
147c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
148c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
149c6fd2807SJeff Garzik 	fis[14] = 0;
150c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik 	fis[16] = 0;
153c6fd2807SJeff Garzik 	fis[17] = 0;
154c6fd2807SJeff Garzik 	fis[18] = 0;
155c6fd2807SJeff Garzik 	fis[19] = 0;
156c6fd2807SJeff Garzik }
157c6fd2807SJeff Garzik 
158c6fd2807SJeff Garzik /**
159c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
161c6fd2807SJeff Garzik  *	@tf: Taskfile to output
162c6fd2807SJeff Garzik  *
163c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
164c6fd2807SJeff Garzik  *
165c6fd2807SJeff Garzik  *	LOCKING:
166c6fd2807SJeff Garzik  *	Inherited from caller.
167c6fd2807SJeff Garzik  */
168c6fd2807SJeff Garzik 
169c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
170c6fd2807SJeff Garzik {
171c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
172c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
173c6fd2807SJeff Garzik 
174c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
175c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
176c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
177c6fd2807SJeff Garzik 	tf->device	= fis[7];
178c6fd2807SJeff Garzik 
179c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
180c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
181c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
182c6fd2807SJeff Garzik 
183c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
184c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
185c6fd2807SJeff Garzik }
186c6fd2807SJeff Garzik 
187c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
188c6fd2807SJeff Garzik 	/* pio multi */
189c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
190c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
191c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
192c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
193c6fd2807SJeff Garzik 	0,
194c6fd2807SJeff Garzik 	0,
195c6fd2807SJeff Garzik 	0,
196c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
197c6fd2807SJeff Garzik 	/* pio */
198c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
199c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
200c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
201c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
202c6fd2807SJeff Garzik 	0,
203c6fd2807SJeff Garzik 	0,
204c6fd2807SJeff Garzik 	0,
205c6fd2807SJeff Garzik 	0,
206c6fd2807SJeff Garzik 	/* dma */
207c6fd2807SJeff Garzik 	ATA_CMD_READ,
208c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
209c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
210c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
211c6fd2807SJeff Garzik 	0,
212c6fd2807SJeff Garzik 	0,
213c6fd2807SJeff Garzik 	0,
214c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
215c6fd2807SJeff Garzik };
216c6fd2807SJeff Garzik 
217c6fd2807SJeff Garzik /**
218c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
219bd056d7eSTejun Heo  *	@tf: command to examine and configure
220bd056d7eSTejun Heo  *	@dev: device tf belongs to
221c6fd2807SJeff Garzik  *
222c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
223c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
224c6fd2807SJeff Garzik  *
225c6fd2807SJeff Garzik  *	LOCKING:
226c6fd2807SJeff Garzik  *	caller.
227c6fd2807SJeff Garzik  */
228bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
229c6fd2807SJeff Garzik {
230c6fd2807SJeff Garzik 	u8 cmd;
231c6fd2807SJeff Garzik 
232c6fd2807SJeff Garzik 	int index, fua, lba48, write;
233c6fd2807SJeff Garzik 
234c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
235c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
237c6fd2807SJeff Garzik 
238c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
239c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
240c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2419af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
242c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
243c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
244c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
245c6fd2807SJeff Garzik 	} else {
246c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
247c6fd2807SJeff Garzik 		index = 16;
248c6fd2807SJeff Garzik 	}
249c6fd2807SJeff Garzik 
250c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
251c6fd2807SJeff Garzik 	if (cmd) {
252c6fd2807SJeff Garzik 		tf->command = cmd;
253c6fd2807SJeff Garzik 		return 0;
254c6fd2807SJeff Garzik 	}
255c6fd2807SJeff Garzik 	return -1;
256c6fd2807SJeff Garzik }
257c6fd2807SJeff Garzik 
258c6fd2807SJeff Garzik /**
25935b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
26035b649feSTejun Heo  *	@tf: ATA taskfile of interest
26135b649feSTejun Heo  *	@dev: ATA device @tf belongs to
26235b649feSTejun Heo  *
26335b649feSTejun Heo  *	LOCKING:
26435b649feSTejun Heo  *	None.
26535b649feSTejun Heo  *
26635b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
26735b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
26835b649feSTejun Heo  *	flags select the address format to use.
26935b649feSTejun Heo  *
27035b649feSTejun Heo  *	RETURNS:
27135b649feSTejun Heo  *	Block address read from @tf.
27235b649feSTejun Heo  */
27335b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
27435b649feSTejun Heo {
27535b649feSTejun Heo 	u64 block = 0;
27635b649feSTejun Heo 
27735b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
27835b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
27935b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
28035b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
28135b649feSTejun Heo 			block |= tf->hob_lbal << 24;
28235b649feSTejun Heo 		} else
28335b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
28435b649feSTejun Heo 
28535b649feSTejun Heo 		block |= tf->lbah << 16;
28635b649feSTejun Heo 		block |= tf->lbam << 8;
28735b649feSTejun Heo 		block |= tf->lbal;
28835b649feSTejun Heo 	} else {
28935b649feSTejun Heo 		u32 cyl, head, sect;
29035b649feSTejun Heo 
29135b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
29235b649feSTejun Heo 		head = tf->device & 0xf;
29335b649feSTejun Heo 		sect = tf->lbal;
29435b649feSTejun Heo 
29535b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
29635b649feSTejun Heo 	}
29735b649feSTejun Heo 
29835b649feSTejun Heo 	return block;
29935b649feSTejun Heo }
30035b649feSTejun Heo 
30135b649feSTejun Heo /**
302bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
303bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
304bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
305bd056d7eSTejun Heo  *	@block: Block address
306bd056d7eSTejun Heo  *	@n_block: Number of blocks
307bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
308bd056d7eSTejun Heo  *	@tag: tag
309bd056d7eSTejun Heo  *
310bd056d7eSTejun Heo  *	LOCKING:
311bd056d7eSTejun Heo  *	None.
312bd056d7eSTejun Heo  *
313bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
314bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
315bd056d7eSTejun Heo  *
316bd056d7eSTejun Heo  *	RETURNS:
317bd056d7eSTejun Heo  *
318bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
319bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
320bd056d7eSTejun Heo  */
321bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
323bd056d7eSTejun Heo 		    unsigned int tag)
324bd056d7eSTejun Heo {
325bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326bd056d7eSTejun Heo 	tf->flags |= tf_flags;
327bd056d7eSTejun Heo 
3286d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
329bd056d7eSTejun Heo 		/* yay, NCQ */
330bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
331bd056d7eSTejun Heo 			return -ERANGE;
332bd056d7eSTejun Heo 
333bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
334bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335bd056d7eSTejun Heo 
336bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
337bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
338bd056d7eSTejun Heo 		else
339bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
340bd056d7eSTejun Heo 
341bd056d7eSTejun Heo 		tf->nsect = tag << 3;
342bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
343bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
344bd056d7eSTejun Heo 
345bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
346bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
347bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
348bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
349bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
350bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
351bd056d7eSTejun Heo 
352bd056d7eSTejun Heo 		tf->device = 1 << 6;
353bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
354bd056d7eSTejun Heo 			tf->device |= 1 << 7;
355bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
356bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
357bd056d7eSTejun Heo 
358bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
359bd056d7eSTejun Heo 			/* use LBA28 */
360bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
361bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
362bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
363bd056d7eSTejun Heo 				return -ERANGE;
364bd056d7eSTejun Heo 
365bd056d7eSTejun Heo 			/* use LBA48 */
366bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
367bd056d7eSTejun Heo 
368bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
371bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
372bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
373bd056d7eSTejun Heo 		} else
374bd056d7eSTejun Heo 			/* request too large even for LBA48 */
375bd056d7eSTejun Heo 			return -ERANGE;
376bd056d7eSTejun Heo 
377bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378bd056d7eSTejun Heo 			return -EINVAL;
379bd056d7eSTejun Heo 
380bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
381bd056d7eSTejun Heo 
382bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
383bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
384bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
385bd056d7eSTejun Heo 
386bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
387bd056d7eSTejun Heo 	} else {
388bd056d7eSTejun Heo 		/* CHS */
389bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
390bd056d7eSTejun Heo 
391bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
392bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
393bd056d7eSTejun Heo 			return -ERANGE;
394bd056d7eSTejun Heo 
395bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396bd056d7eSTejun Heo 			return -EINVAL;
397bd056d7eSTejun Heo 
398bd056d7eSTejun Heo 		/* Convert LBA to CHS */
399bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
400bd056d7eSTejun Heo 		cyl   = track / dev->heads;
401bd056d7eSTejun Heo 		head  = track % dev->heads;
402bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
403bd056d7eSTejun Heo 
404bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
406bd056d7eSTejun Heo 
407bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
408bd056d7eSTejun Heo 		   Cylinder: 0-65535
409bd056d7eSTejun Heo 		   Head: 0-15
410bd056d7eSTejun Heo 		   Sector: 1-255*/
411bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412bd056d7eSTejun Heo 			return -ERANGE;
413bd056d7eSTejun Heo 
414bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415bd056d7eSTejun Heo 		tf->lbal = sect;
416bd056d7eSTejun Heo 		tf->lbam = cyl;
417bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
418bd056d7eSTejun Heo 		tf->device |= head;
419bd056d7eSTejun Heo 	}
420bd056d7eSTejun Heo 
421bd056d7eSTejun Heo 	return 0;
422bd056d7eSTejun Heo }
423bd056d7eSTejun Heo 
424bd056d7eSTejun Heo /**
425c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
427c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
428c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
429c6fd2807SJeff Garzik  *
430c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
432c6fd2807SJeff Garzik  *
433c6fd2807SJeff Garzik  *	LOCKING:
434c6fd2807SJeff Garzik  *	None.
435c6fd2807SJeff Garzik  *
436c6fd2807SJeff Garzik  *	RETURNS:
437c6fd2807SJeff Garzik  *	Packed xfer_mask.
438c6fd2807SJeff Garzik  */
439c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
441c6fd2807SJeff Garzik 				      unsigned int udma_mask)
442c6fd2807SJeff Garzik {
443c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446c6fd2807SJeff Garzik }
447c6fd2807SJeff Garzik 
448c6fd2807SJeff Garzik /**
449c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
451c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
452c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
453c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
454c6fd2807SJeff Garzik  *
455c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
457c6fd2807SJeff Garzik  */
458c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
459c6fd2807SJeff Garzik 				unsigned int *pio_mask,
460c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
461c6fd2807SJeff Garzik 				unsigned int *udma_mask)
462c6fd2807SJeff Garzik {
463c6fd2807SJeff Garzik 	if (pio_mask)
464c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465c6fd2807SJeff Garzik 	if (mwdma_mask)
466c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467c6fd2807SJeff Garzik 	if (udma_mask)
468c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469c6fd2807SJeff Garzik }
470c6fd2807SJeff Garzik 
471c6fd2807SJeff Garzik static const struct ata_xfer_ent {
472c6fd2807SJeff Garzik 	int shift, bits;
473c6fd2807SJeff Garzik 	u8 base;
474c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
475c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478c6fd2807SJeff Garzik 	{ -1, },
479c6fd2807SJeff Garzik };
480c6fd2807SJeff Garzik 
481c6fd2807SJeff Garzik /**
482c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
484c6fd2807SJeff Garzik  *
485c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
486c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
487c6fd2807SJeff Garzik  *
488c6fd2807SJeff Garzik  *	LOCKING:
489c6fd2807SJeff Garzik  *	None.
490c6fd2807SJeff Garzik  *
491c6fd2807SJeff Garzik  *	RETURNS:
492c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
493c6fd2807SJeff Garzik  */
494c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495c6fd2807SJeff Garzik {
496c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
497c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
498c6fd2807SJeff Garzik 
499c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
502c6fd2807SJeff Garzik 	return 0;
503c6fd2807SJeff Garzik }
504c6fd2807SJeff Garzik 
505c6fd2807SJeff Garzik /**
506c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
508c6fd2807SJeff Garzik  *
509c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
510c6fd2807SJeff Garzik  *
511c6fd2807SJeff Garzik  *	LOCKING:
512c6fd2807SJeff Garzik  *	None.
513c6fd2807SJeff Garzik  *
514c6fd2807SJeff Garzik  *	RETURNS:
515c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
516c6fd2807SJeff Garzik  */
517c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518c6fd2807SJeff Garzik {
519c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
520c6fd2807SJeff Garzik 
521c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
524c6fd2807SJeff Garzik 	return 0;
525c6fd2807SJeff Garzik }
526c6fd2807SJeff Garzik 
527c6fd2807SJeff Garzik /**
528c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
530c6fd2807SJeff Garzik  *
531c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
532c6fd2807SJeff Garzik  *
533c6fd2807SJeff Garzik  *	LOCKING:
534c6fd2807SJeff Garzik  *	None.
535c6fd2807SJeff Garzik  *
536c6fd2807SJeff Garzik  *	RETURNS:
537c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
538c6fd2807SJeff Garzik  */
539c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
540c6fd2807SJeff Garzik {
541c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
542c6fd2807SJeff Garzik 
543c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545c6fd2807SJeff Garzik 			return ent->shift;
546c6fd2807SJeff Garzik 	return -1;
547c6fd2807SJeff Garzik }
548c6fd2807SJeff Garzik 
549c6fd2807SJeff Garzik /**
550c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
551c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
552c6fd2807SJeff Garzik  *
553c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
554c6fd2807SJeff Garzik  *	(highest bit in @modemask).
555c6fd2807SJeff Garzik  *
556c6fd2807SJeff Garzik  *	LOCKING:
557c6fd2807SJeff Garzik  *	None.
558c6fd2807SJeff Garzik  *
559c6fd2807SJeff Garzik  *	RETURNS:
560c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
561c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
562c6fd2807SJeff Garzik  */
563c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
564c6fd2807SJeff Garzik {
565c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
566c6fd2807SJeff Garzik 		"PIO0",
567c6fd2807SJeff Garzik 		"PIO1",
568c6fd2807SJeff Garzik 		"PIO2",
569c6fd2807SJeff Garzik 		"PIO3",
570c6fd2807SJeff Garzik 		"PIO4",
571b352e57dSAlan Cox 		"PIO5",
572b352e57dSAlan Cox 		"PIO6",
573c6fd2807SJeff Garzik 		"MWDMA0",
574c6fd2807SJeff Garzik 		"MWDMA1",
575c6fd2807SJeff Garzik 		"MWDMA2",
576b352e57dSAlan Cox 		"MWDMA3",
577b352e57dSAlan Cox 		"MWDMA4",
578c6fd2807SJeff Garzik 		"UDMA/16",
579c6fd2807SJeff Garzik 		"UDMA/25",
580c6fd2807SJeff Garzik 		"UDMA/33",
581c6fd2807SJeff Garzik 		"UDMA/44",
582c6fd2807SJeff Garzik 		"UDMA/66",
583c6fd2807SJeff Garzik 		"UDMA/100",
584c6fd2807SJeff Garzik 		"UDMA/133",
585c6fd2807SJeff Garzik 		"UDMA7",
586c6fd2807SJeff Garzik 	};
587c6fd2807SJeff Garzik 	int highbit;
588c6fd2807SJeff Garzik 
589c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
590c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
592c6fd2807SJeff Garzik 	return "<n/a>";
593c6fd2807SJeff Garzik }
594c6fd2807SJeff Garzik 
595c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
596c6fd2807SJeff Garzik {
597c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
598c6fd2807SJeff Garzik 		"1.5 Gbps",
599c6fd2807SJeff Garzik 		"3.0 Gbps",
600c6fd2807SJeff Garzik 	};
601c6fd2807SJeff Garzik 
602c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603c6fd2807SJeff Garzik 		return "<unknown>";
604c6fd2807SJeff Garzik 	return spd_str[spd - 1];
605c6fd2807SJeff Garzik }
606c6fd2807SJeff Garzik 
607c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
608c6fd2807SJeff Garzik {
60909d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6109af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
611c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6124ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6134ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
614c6fd2807SJeff Garzik 		dev->class++;
615c6fd2807SJeff Garzik 	}
616c6fd2807SJeff Garzik }
617c6fd2807SJeff Garzik 
618c6fd2807SJeff Garzik /**
619c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
620c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
621c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
622c6fd2807SJeff Garzik  *
6230d5ff566STejun Heo  *	This technique was originally described in
6240d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6250d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6260d5ff566STejun Heo  *
6270d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6280d5ff566STejun Heo  *	and if a device is present, it will respond by
6290d5ff566STejun Heo  *	correctly storing and echoing back the
6300d5ff566STejun Heo  *	ATA shadow register contents.
631c6fd2807SJeff Garzik  *
632c6fd2807SJeff Garzik  *	LOCKING:
633c6fd2807SJeff Garzik  *	caller.
634c6fd2807SJeff Garzik  */
635c6fd2807SJeff Garzik 
6360d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
637c6fd2807SJeff Garzik {
6380d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6390d5ff566STejun Heo 	u8 nsect, lbal;
6400d5ff566STejun Heo 
6410d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6420d5ff566STejun Heo 
6430d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6440d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6450d5ff566STejun Heo 
6460d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6470d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6480d5ff566STejun Heo 
6490d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6500d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6510d5ff566STejun Heo 
6520d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6530d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6540d5ff566STejun Heo 
6550d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6560d5ff566STejun Heo 		return 1;	/* we found a device */
6570d5ff566STejun Heo 
6580d5ff566STejun Heo 	return 0;		/* nothing found */
659c6fd2807SJeff Garzik }
660c6fd2807SJeff Garzik 
661c6fd2807SJeff Garzik /**
662c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
663c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
664c6fd2807SJeff Garzik  *
665c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
666c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
667c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
668c6fd2807SJeff Garzik  *
669c6fd2807SJeff Garzik  *	LOCKING:
670c6fd2807SJeff Garzik  *	None.
671c6fd2807SJeff Garzik  *
672c6fd2807SJeff Garzik  *	RETURNS:
673c6fd2807SJeff Garzik  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674c6fd2807SJeff Garzik  *	the event of failure.
675c6fd2807SJeff Garzik  */
676c6fd2807SJeff Garzik 
677c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
678c6fd2807SJeff Garzik {
679c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
680c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
681c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
682c6fd2807SJeff Garzik 	 */
683c6fd2807SJeff Garzik 
684c6fd2807SJeff Garzik 	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685c6fd2807SJeff Garzik 	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
687c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
688c6fd2807SJeff Garzik 	}
689c6fd2807SJeff Garzik 
690c6fd2807SJeff Garzik 	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691c6fd2807SJeff Garzik 	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
693c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
694c6fd2807SJeff Garzik 	}
695c6fd2807SJeff Garzik 
696c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
697c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
698c6fd2807SJeff Garzik }
699c6fd2807SJeff Garzik 
700c6fd2807SJeff Garzik /**
701c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
7023f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
7033f19859eSTejun Heo  *	@present: device seems present
704c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
705c6fd2807SJeff Garzik  *
706c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
708c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
709c6fd2807SJeff Garzik  *	and diagnostics.
710c6fd2807SJeff Garzik  *
711c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
712c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
713c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
714c6fd2807SJeff Garzik  *
715c6fd2807SJeff Garzik  *	LOCKING:
716c6fd2807SJeff Garzik  *	caller.
717c6fd2807SJeff Garzik  *
718c6fd2807SJeff Garzik  *	RETURNS:
719c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
720c6fd2807SJeff Garzik  */
7213f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
7223f19859eSTejun Heo 				  u8 *r_err)
723c6fd2807SJeff Garzik {
7243f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
725c6fd2807SJeff Garzik 	struct ata_taskfile tf;
726c6fd2807SJeff Garzik 	unsigned int class;
727c6fd2807SJeff Garzik 	u8 err;
728c6fd2807SJeff Garzik 
7293f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
730c6fd2807SJeff Garzik 
731c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
732c6fd2807SJeff Garzik 
733c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
734c6fd2807SJeff Garzik 	err = tf.feature;
735c6fd2807SJeff Garzik 	if (r_err)
736c6fd2807SJeff Garzik 		*r_err = err;
737c6fd2807SJeff Garzik 
73893590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
7393f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
74093590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
7413f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
74293590859SAlan Cox 	else if (err == 1)
743c6fd2807SJeff Garzik 		/* do nothing */ ;
7443f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
745c6fd2807SJeff Garzik 		/* do nothing */ ;
746c6fd2807SJeff Garzik 	else
747c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
748c6fd2807SJeff Garzik 
749c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
750c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
751c6fd2807SJeff Garzik 
752d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
753d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
754d7fbee05STejun Heo 		 * have reported incorrect device signature too.
755d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
756d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
757d7fbee05STejun Heo 		 * failure.
758d7fbee05STejun Heo 		 */
759d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760d7fbee05STejun Heo 			class = ATA_DEV_ATA;
761d7fbee05STejun Heo 		else
762d7fbee05STejun Heo 			class = ATA_DEV_NONE;
763d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764d7fbee05STejun Heo 		class = ATA_DEV_NONE;
765d7fbee05STejun Heo 
766c6fd2807SJeff Garzik 	return class;
767c6fd2807SJeff Garzik }
768c6fd2807SJeff Garzik 
769c6fd2807SJeff Garzik /**
770c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
771c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
772c6fd2807SJeff Garzik  *	@s: string into which data is output
773c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
774c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
775c6fd2807SJeff Garzik  *
776c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
777c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
778c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
779c6fd2807SJeff Garzik  *
780c6fd2807SJeff Garzik  *	LOCKING:
781c6fd2807SJeff Garzik  *	caller.
782c6fd2807SJeff Garzik  */
783c6fd2807SJeff Garzik 
784c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
785c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
786c6fd2807SJeff Garzik {
787c6fd2807SJeff Garzik 	unsigned int c;
788c6fd2807SJeff Garzik 
789c6fd2807SJeff Garzik 	while (len > 0) {
790c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
791c6fd2807SJeff Garzik 		*s = c;
792c6fd2807SJeff Garzik 		s++;
793c6fd2807SJeff Garzik 
794c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
795c6fd2807SJeff Garzik 		*s = c;
796c6fd2807SJeff Garzik 		s++;
797c6fd2807SJeff Garzik 
798c6fd2807SJeff Garzik 		ofs++;
799c6fd2807SJeff Garzik 		len -= 2;
800c6fd2807SJeff Garzik 	}
801c6fd2807SJeff Garzik }
802c6fd2807SJeff Garzik 
803c6fd2807SJeff Garzik /**
804c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
805c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
806c6fd2807SJeff Garzik  *	@s: string into which data is output
807c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
808c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
809c6fd2807SJeff Garzik  *
810c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
811c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
812c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
813c6fd2807SJeff Garzik  *
814c6fd2807SJeff Garzik  *	LOCKING:
815c6fd2807SJeff Garzik  *	caller.
816c6fd2807SJeff Garzik  */
817c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
818c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
819c6fd2807SJeff Garzik {
820c6fd2807SJeff Garzik 	unsigned char *p;
821c6fd2807SJeff Garzik 
822c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
823c6fd2807SJeff Garzik 
824c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
825c6fd2807SJeff Garzik 
826c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
827c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
828c6fd2807SJeff Garzik 		p--;
829c6fd2807SJeff Garzik 	*p = '\0';
830c6fd2807SJeff Garzik }
831c6fd2807SJeff Garzik 
832db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
833db6f8759STejun Heo {
834db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
835db6f8759STejun Heo 		if (ata_id_has_lba48(id))
836db6f8759STejun Heo 			return ata_id_u64(id, 100);
837db6f8759STejun Heo 		else
838db6f8759STejun Heo 			return ata_id_u32(id, 60);
839db6f8759STejun Heo 	} else {
840db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
841db6f8759STejun Heo 			return ata_id_u32(id, 57);
842db6f8759STejun Heo 		else
843db6f8759STejun Heo 			return id[1] * id[3] * id[6];
844db6f8759STejun Heo 	}
845db6f8759STejun Heo }
846db6f8759STejun Heo 
8471e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8481e999736SAlan Cox {
8491e999736SAlan Cox 	u64 sectors = 0;
8501e999736SAlan Cox 
8511e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8521e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8531e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8541e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8551e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8561e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8571e999736SAlan Cox 
8581e999736SAlan Cox 	return ++sectors;
8591e999736SAlan Cox }
8601e999736SAlan Cox 
8611e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8621e999736SAlan Cox {
8631e999736SAlan Cox 	u64 sectors = 0;
8641e999736SAlan Cox 
8651e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8661e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8671e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8681e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8691e999736SAlan Cox 
8701e999736SAlan Cox 	return ++sectors;
8711e999736SAlan Cox }
8721e999736SAlan Cox 
8731e999736SAlan Cox /**
874c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
875c728a914STejun Heo  *	@dev: target device
876c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
8771e999736SAlan Cox  *
878c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
879c728a914STejun Heo  *	question.
880c728a914STejun Heo  *
881c728a914STejun Heo  *	RETURNS:
882c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
883c728a914STejun Heo  *	-EIO on other errors.
8841e999736SAlan Cox  */
885c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
8861e999736SAlan Cox {
887c728a914STejun Heo 	unsigned int err_mask;
8881e999736SAlan Cox 	struct ata_taskfile tf;
889c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
8901e999736SAlan Cox 
8911e999736SAlan Cox 	ata_tf_init(dev, &tf);
8921e999736SAlan Cox 
893c728a914STejun Heo 	/* always clear all address registers */
8941e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
895c728a914STejun Heo 
896c728a914STejun Heo 	if (lba48) {
897c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
899c728a914STejun Heo 	} else
900c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
901c728a914STejun Heo 
9021e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
903c728a914STejun Heo 	tf.device |= ATA_LBA;
9041e999736SAlan Cox 
905c728a914STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906c728a914STejun Heo 	if (err_mask) {
907c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
909c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910c728a914STejun Heo 			return -EACCES;
911c728a914STejun Heo 		return -EIO;
912c728a914STejun Heo 	}
913c728a914STejun Heo 
914c728a914STejun Heo 	if (lba48)
915c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
916c728a914STejun Heo 	else
917c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
918c728a914STejun Heo 
9191e999736SAlan Cox 	return 0;
9201e999736SAlan Cox }
9211e999736SAlan Cox 
9221e999736SAlan Cox /**
923c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
924c728a914STejun Heo  *	@dev: target device
9256b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9261e999736SAlan Cox  *
927c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
928c728a914STejun Heo  *
929c728a914STejun Heo  *	RETURNS:
930c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
931c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
932c728a914STejun Heo  *	errors.
9331e999736SAlan Cox  */
93405027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
9351e999736SAlan Cox {
936c728a914STejun Heo 	unsigned int err_mask;
9371e999736SAlan Cox 	struct ata_taskfile tf;
938c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
9391e999736SAlan Cox 
9401e999736SAlan Cox 	new_sectors--;
9411e999736SAlan Cox 
9421e999736SAlan Cox 	ata_tf_init(dev, &tf);
9431e999736SAlan Cox 
944c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9451e999736SAlan Cox 
946c728a914STejun Heo 	if (lba48) {
947c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
948c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
9491e999736SAlan Cox 
9501e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
9511e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
9521e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
9531e582ba4STejun Heo 	} else {
9541e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
955c728a914STejun Heo 
9561e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
9571e582ba4STejun Heo 	}
9581e582ba4STejun Heo 
9591e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
960c728a914STejun Heo 	tf.device |= ATA_LBA;
9611e999736SAlan Cox 
9621e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9631e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9641e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9651e999736SAlan Cox 
966c728a914STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
967c728a914STejun Heo 	if (err_mask) {
968c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
969c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
970c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
971c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
972c728a914STejun Heo 			return -EACCES;
973c728a914STejun Heo 		return -EIO;
974c728a914STejun Heo 	}
975c728a914STejun Heo 
9761e999736SAlan Cox 	return 0;
9771e999736SAlan Cox }
9781e999736SAlan Cox 
9791e999736SAlan Cox /**
9801e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
9811e999736SAlan Cox  *	@dev: Device to resize
9821e999736SAlan Cox  *
9831e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
9841e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
9851e999736SAlan Cox  *	the drive has the HPA feature set enabled.
98605027adcSTejun Heo  *
98705027adcSTejun Heo  *	RETURNS:
98805027adcSTejun Heo  *	0 on success, -errno on failure.
9891e999736SAlan Cox  */
99005027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
9911e999736SAlan Cox {
99205027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
99305027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
99405027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
99505027adcSTejun Heo 	u64 native_sectors;
996c728a914STejun Heo 	int rc;
9971e999736SAlan Cox 
99805027adcSTejun Heo 	/* do we need to do it? */
99905027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
100005027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
100105027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1002c728a914STejun Heo 		return 0;
10031e999736SAlan Cox 
100405027adcSTejun Heo 	/* read native max address */
100505027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
100605027adcSTejun Heo 	if (rc) {
100705027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
100805027adcSTejun Heo 		 * resizing from the next try.
100905027adcSTejun Heo 		 */
101005027adcSTejun Heo 		if (!ata_ignore_hpa) {
101105027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
101205027adcSTejun Heo 				       "broken, will skip HPA handling\n");
101305027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
101405027adcSTejun Heo 
101505027adcSTejun Heo 			/* we can continue if device aborted the command */
101605027adcSTejun Heo 			if (rc == -EACCES)
101705027adcSTejun Heo 				rc = 0;
101805027adcSTejun Heo 		}
101905027adcSTejun Heo 
102005027adcSTejun Heo 		return rc;
102105027adcSTejun Heo 	}
102205027adcSTejun Heo 
102305027adcSTejun Heo 	/* nothing to do? */
102405027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
102505027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
102605027adcSTejun Heo 			return 0;
102705027adcSTejun Heo 
102805027adcSTejun Heo 		if (native_sectors > sectors)
10291e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
103005027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
103105027adcSTejun Heo 				(unsigned long long)sectors,
103205027adcSTejun Heo 				(unsigned long long)native_sectors);
103305027adcSTejun Heo 		else if (native_sectors < sectors)
103405027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
103505027adcSTejun Heo 				"native sectors (%llu) is smaller than "
103605027adcSTejun Heo 				"sectors (%llu)\n",
103705027adcSTejun Heo 				(unsigned long long)native_sectors,
103805027adcSTejun Heo 				(unsigned long long)sectors);
103905027adcSTejun Heo 		return 0;
10401e999736SAlan Cox 	}
104137301a55STejun Heo 
104205027adcSTejun Heo 	/* let's unlock HPA */
104305027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
104405027adcSTejun Heo 	if (rc == -EACCES) {
104505027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
104605027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
104705027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
104805027adcSTejun Heo 			       (unsigned long long)sectors,
104905027adcSTejun Heo 			       (unsigned long long)native_sectors);
105005027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
105105027adcSTejun Heo 		return 0;
105205027adcSTejun Heo 	} else if (rc)
105305027adcSTejun Heo 		return rc;
105405027adcSTejun Heo 
105505027adcSTejun Heo 	/* re-read IDENTIFY data */
105605027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
105705027adcSTejun Heo 	if (rc) {
105805027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
105905027adcSTejun Heo 			       "data after HPA resizing\n");
106005027adcSTejun Heo 		return rc;
106105027adcSTejun Heo 	}
106205027adcSTejun Heo 
106305027adcSTejun Heo 	if (print_info) {
106405027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
106505027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
106605027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
106705027adcSTejun Heo 			(unsigned long long)sectors,
106805027adcSTejun Heo 			(unsigned long long)new_sectors,
106905027adcSTejun Heo 			(unsigned long long)native_sectors);
107005027adcSTejun Heo 	}
107105027adcSTejun Heo 
107205027adcSTejun Heo 	return 0;
10731e999736SAlan Cox }
10741e999736SAlan Cox 
1075c6fd2807SJeff Garzik /**
107610305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
107710305f0fSAlan  *	@dev: device to identify
1078cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
107910305f0fSAlan  *
108010305f0fSAlan  *	Set up the timing values for the device based upon the identify
108110305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
108210305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
108310305f0fSAlan  *	mode correctly when possible.
108410305f0fSAlan  *
108510305f0fSAlan  *	In addition we emit similarly formatted messages to the default
108610305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
108710305f0fSAlan  *	presentation.
108810305f0fSAlan  */
108910305f0fSAlan 
109010305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
109110305f0fSAlan {
109210305f0fSAlan 	unsigned int mask;
109310305f0fSAlan 	u8 mode;
109410305f0fSAlan 
109510305f0fSAlan 	/* Pack the DMA modes */
109610305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
109710305f0fSAlan 	if (dev->id[53] & 0x04)
109810305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
109910305f0fSAlan 
110010305f0fSAlan 	/* Select the mode in use */
110110305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
110210305f0fSAlan 
110310305f0fSAlan 	if (mode != 0) {
110410305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
110510305f0fSAlan 		       ata_mode_string(mask));
110610305f0fSAlan 	} else {
110710305f0fSAlan 		/* SWDMA perhaps ? */
110810305f0fSAlan 		mode = unknown;
110910305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
111010305f0fSAlan 	}
111110305f0fSAlan 
111210305f0fSAlan 	/* Configure the device reporting */
111310305f0fSAlan 	dev->xfer_mode = mode;
111410305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
111510305f0fSAlan }
111610305f0fSAlan 
111710305f0fSAlan /**
1118c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1119c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1120c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1121c6fd2807SJeff Garzik  *
1122c6fd2807SJeff Garzik  *	This function performs no actual function.
1123c6fd2807SJeff Garzik  *
1124c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1125c6fd2807SJeff Garzik  *
1126c6fd2807SJeff Garzik  *	LOCKING:
1127c6fd2807SJeff Garzik  *	caller.
1128c6fd2807SJeff Garzik  */
1129c6fd2807SJeff Garzik void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1130c6fd2807SJeff Garzik {
1131c6fd2807SJeff Garzik }
1132c6fd2807SJeff Garzik 
1133c6fd2807SJeff Garzik 
1134c6fd2807SJeff Garzik /**
1135c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1136c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1137c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1138c6fd2807SJeff Garzik  *
1139c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1140c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1141c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1142c6fd2807SJeff Garzik  *
1143c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1144c6fd2807SJeff Garzik  *
1145c6fd2807SJeff Garzik  *	LOCKING:
1146c6fd2807SJeff Garzik  *	caller.
1147c6fd2807SJeff Garzik  */
1148c6fd2807SJeff Garzik 
1149c6fd2807SJeff Garzik void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1150c6fd2807SJeff Garzik {
1151c6fd2807SJeff Garzik 	u8 tmp;
1152c6fd2807SJeff Garzik 
1153c6fd2807SJeff Garzik 	if (device == 0)
1154c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1155c6fd2807SJeff Garzik 	else
1156c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1157c6fd2807SJeff Garzik 
11580d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1159c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1160c6fd2807SJeff Garzik }
1161c6fd2807SJeff Garzik 
1162c6fd2807SJeff Garzik /**
1163c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1164c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1165c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1166c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1167c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1168c6fd2807SJeff Garzik  *
1169c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1170c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1171c6fd2807SJeff Garzik  *	ATA channel.
1172c6fd2807SJeff Garzik  *
1173c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1174c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1175c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1176c6fd2807SJeff Garzik  *
1177c6fd2807SJeff Garzik  *	LOCKING:
1178c6fd2807SJeff Garzik  *	caller.
1179c6fd2807SJeff Garzik  */
1180c6fd2807SJeff Garzik 
1181c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1182c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1183c6fd2807SJeff Garzik {
1184c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
118544877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
118644877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1187c6fd2807SJeff Garzik 
1188c6fd2807SJeff Garzik 	if (wait)
1189c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1190c6fd2807SJeff Garzik 
1191c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1192c6fd2807SJeff Garzik 
1193c6fd2807SJeff Garzik 	if (wait) {
11949af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1195c6fd2807SJeff Garzik 			msleep(150);
1196c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1197c6fd2807SJeff Garzik 	}
1198c6fd2807SJeff Garzik }
1199c6fd2807SJeff Garzik 
1200c6fd2807SJeff Garzik /**
1201c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1202c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1203c6fd2807SJeff Garzik  *
1204c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1205c6fd2807SJeff Garzik  *	page.
1206c6fd2807SJeff Garzik  *
1207c6fd2807SJeff Garzik  *	LOCKING:
1208c6fd2807SJeff Garzik  *	caller.
1209c6fd2807SJeff Garzik  */
1210c6fd2807SJeff Garzik 
1211c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1212c6fd2807SJeff Garzik {
1213c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1214c6fd2807SJeff Garzik 		"53==0x%04x  "
1215c6fd2807SJeff Garzik 		"63==0x%04x  "
1216c6fd2807SJeff Garzik 		"64==0x%04x  "
1217c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1218c6fd2807SJeff Garzik 		id[49],
1219c6fd2807SJeff Garzik 		id[53],
1220c6fd2807SJeff Garzik 		id[63],
1221c6fd2807SJeff Garzik 		id[64],
1222c6fd2807SJeff Garzik 		id[75]);
1223c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1224c6fd2807SJeff Garzik 		"81==0x%04x  "
1225c6fd2807SJeff Garzik 		"82==0x%04x  "
1226c6fd2807SJeff Garzik 		"83==0x%04x  "
1227c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1228c6fd2807SJeff Garzik 		id[80],
1229c6fd2807SJeff Garzik 		id[81],
1230c6fd2807SJeff Garzik 		id[82],
1231c6fd2807SJeff Garzik 		id[83],
1232c6fd2807SJeff Garzik 		id[84]);
1233c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1234c6fd2807SJeff Garzik 		"93==0x%04x\n",
1235c6fd2807SJeff Garzik 		id[88],
1236c6fd2807SJeff Garzik 		id[93]);
1237c6fd2807SJeff Garzik }
1238c6fd2807SJeff Garzik 
1239c6fd2807SJeff Garzik /**
1240c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1241c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1242c6fd2807SJeff Garzik  *
1243c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1244c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1245c6fd2807SJeff Garzik  *
1246c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1247c6fd2807SJeff Garzik  *
1248c6fd2807SJeff Garzik  *	LOCKING:
1249c6fd2807SJeff Garzik  *	None.
1250c6fd2807SJeff Garzik  *
1251c6fd2807SJeff Garzik  *	RETURNS:
1252c6fd2807SJeff Garzik  *	Computed xfermask
1253c6fd2807SJeff Garzik  */
1254c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1255c6fd2807SJeff Garzik {
1256c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1257c6fd2807SJeff Garzik 
1258c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1259c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1260c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1261c6fd2807SJeff Garzik 		pio_mask <<= 3;
1262c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1263c6fd2807SJeff Garzik 	} else {
1264c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1265c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1266c6fd2807SJeff Garzik 		 * a mask.
1267c6fd2807SJeff Garzik 		 */
12687a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
126946767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
127046767aebSAlan Cox                 	pio_mask = (2 << mode) - 1;
127146767aebSAlan Cox 		else
127246767aebSAlan Cox 			pio_mask = 1;
1273c6fd2807SJeff Garzik 
1274c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1275c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1276c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1277c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1278c6fd2807SJeff Garzik 		 * will get this right anyway
1279c6fd2807SJeff Garzik 		 */
1280c6fd2807SJeff Garzik 	}
1281c6fd2807SJeff Garzik 
1282c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1283c6fd2807SJeff Garzik 
1284b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1285b352e57dSAlan Cox 		/*
1286b352e57dSAlan Cox 		 *	Process compact flash extended modes
1287b352e57dSAlan Cox 		 */
1288b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1289b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1290b352e57dSAlan Cox 
1291b352e57dSAlan Cox 		if (pio)
1292b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1293b352e57dSAlan Cox 		if (pio > 1)
1294b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1295b352e57dSAlan Cox 		if (dma)
1296b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1297b352e57dSAlan Cox 		if (dma > 1)
1298b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1299b352e57dSAlan Cox 	}
1300b352e57dSAlan Cox 
1301c6fd2807SJeff Garzik 	udma_mask = 0;
1302c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1303c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1304c6fd2807SJeff Garzik 
1305c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1306c6fd2807SJeff Garzik }
1307c6fd2807SJeff Garzik 
1308c6fd2807SJeff Garzik /**
1309c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1310c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1311c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
131265f27f38SDavid Howells  *	@data: data for @fn to use
1313c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1314c6fd2807SJeff Garzik  *
1315c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1316c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1317c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1318c6fd2807SJeff Garzik  *	one task is active at any given time.
1319c6fd2807SJeff Garzik  *
1320c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1321c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1322c6fd2807SJeff Garzik  *	synchronization.
1323c6fd2807SJeff Garzik  *
1324c6fd2807SJeff Garzik  *	LOCKING:
1325c6fd2807SJeff Garzik  *	Inherited from caller.
1326c6fd2807SJeff Garzik  */
132765f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1328c6fd2807SJeff Garzik 			 unsigned long delay)
1329c6fd2807SJeff Garzik {
133065f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
133165f27f38SDavid Howells 	ap->port_task_data = data;
1332c6fd2807SJeff Garzik 
133345a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
133445a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1335c6fd2807SJeff Garzik }
1336c6fd2807SJeff Garzik 
1337c6fd2807SJeff Garzik /**
1338c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1339c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1340c6fd2807SJeff Garzik  *
1341c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1342c6fd2807SJeff Garzik  *	be running or scheduled.
1343c6fd2807SJeff Garzik  *
1344c6fd2807SJeff Garzik  *	LOCKING:
1345c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1346c6fd2807SJeff Garzik  */
1347c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1348c6fd2807SJeff Garzik {
1349c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1350c6fd2807SJeff Garzik 
135145a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1352c6fd2807SJeff Garzik 
1353c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1354c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1355c6fd2807SJeff Garzik }
1356c6fd2807SJeff Garzik 
13577102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1358c6fd2807SJeff Garzik {
1359c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1360c6fd2807SJeff Garzik 
1361c6fd2807SJeff Garzik 	complete(waiting);
1362c6fd2807SJeff Garzik }
1363c6fd2807SJeff Garzik 
1364c6fd2807SJeff Garzik /**
13652432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1366c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1367c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1368c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1369c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13702432697bSTejun Heo  *	@sg: sg list for the data buffer of the command
13712432697bSTejun Heo  *	@n_elem: Number of sg entries
1372c6fd2807SJeff Garzik  *
1373c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1374c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1375c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1376c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1377c6fd2807SJeff Garzik  *	clean up after timeout.
1378c6fd2807SJeff Garzik  *
1379c6fd2807SJeff Garzik  *	LOCKING:
1380c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1381c6fd2807SJeff Garzik  *
1382c6fd2807SJeff Garzik  *	RETURNS:
1383c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1384c6fd2807SJeff Garzik  */
13852432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1386c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
13872432697bSTejun Heo 			      int dma_dir, struct scatterlist *sg,
13882432697bSTejun Heo 			      unsigned int n_elem)
1389c6fd2807SJeff Garzik {
13909af5c9c9STejun Heo 	struct ata_link *link = dev->link;
13919af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1392c6fd2807SJeff Garzik 	u8 command = tf->command;
1393c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1394c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1395c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1396da917d69STejun Heo 	int preempted_nr_active_links;
1397c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1398c6fd2807SJeff Garzik 	unsigned long flags;
1399c6fd2807SJeff Garzik 	unsigned int err_mask;
1400c6fd2807SJeff Garzik 	int rc;
1401c6fd2807SJeff Garzik 
1402c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1403c6fd2807SJeff Garzik 
1404c6fd2807SJeff Garzik 	/* no internal command while frozen */
1405c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1406c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1407c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1408c6fd2807SJeff Garzik 	}
1409c6fd2807SJeff Garzik 
1410c6fd2807SJeff Garzik 	/* initialize internal qc */
1411c6fd2807SJeff Garzik 
1412c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1413c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1414c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1415c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1416c6fd2807SJeff Garzik 	 */
1417c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1418c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1419c6fd2807SJeff Garzik 	else
1420c6fd2807SJeff Garzik 		tag = 0;
1421c6fd2807SJeff Garzik 
1422c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1423c6fd2807SJeff Garzik 		BUG();
1424c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1425c6fd2807SJeff Garzik 
1426c6fd2807SJeff Garzik 	qc->tag = tag;
1427c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1428c6fd2807SJeff Garzik 	qc->ap = ap;
1429c6fd2807SJeff Garzik 	qc->dev = dev;
1430c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1431c6fd2807SJeff Garzik 
14329af5c9c9STejun Heo 	preempted_tag = link->active_tag;
14339af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1434c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1435da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
14369af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
14379af5c9c9STejun Heo 	link->sactive = 0;
1438c6fd2807SJeff Garzik 	ap->qc_active = 0;
1439da917d69STejun Heo 	ap->nr_active_links = 0;
1440c6fd2807SJeff Garzik 
1441c6fd2807SJeff Garzik 	/* prepare & issue qc */
1442c6fd2807SJeff Garzik 	qc->tf = *tf;
1443c6fd2807SJeff Garzik 	if (cdb)
1444c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1445c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1446c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1447c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14482432697bSTejun Heo 		unsigned int i, buflen = 0;
14492432697bSTejun Heo 
14502432697bSTejun Heo 		for (i = 0; i < n_elem; i++)
14512432697bSTejun Heo 			buflen += sg[i].length;
14522432697bSTejun Heo 
14532432697bSTejun Heo 		ata_sg_init(qc, sg, n_elem);
145449c80429SBrian King 		qc->nbytes = buflen;
1455c6fd2807SJeff Garzik 	}
1456c6fd2807SJeff Garzik 
1457c6fd2807SJeff Garzik 	qc->private_data = &wait;
1458c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1459c6fd2807SJeff Garzik 
1460c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1461c6fd2807SJeff Garzik 
1462c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1463c6fd2807SJeff Garzik 
1464c6fd2807SJeff Garzik 	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1465c6fd2807SJeff Garzik 
1466c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1467c6fd2807SJeff Garzik 
1468c6fd2807SJeff Garzik 	if (!rc) {
1469c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1470c6fd2807SJeff Garzik 
1471c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1472c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1473c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1474c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1475c6fd2807SJeff Garzik 		 */
1476c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1477c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1478c6fd2807SJeff Garzik 
1479c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1480c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1481c6fd2807SJeff Garzik 			else
1482c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1483c6fd2807SJeff Garzik 
1484c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1485c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1486c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1487c6fd2807SJeff Garzik 		}
1488c6fd2807SJeff Garzik 
1489c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1490c6fd2807SJeff Garzik 	}
1491c6fd2807SJeff Garzik 
1492c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1493c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1494c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1495c6fd2807SJeff Garzik 
1496a51d644aSTejun Heo 	/* perform minimal error analysis */
1497a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1498a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1499a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1500a51d644aSTejun Heo 
1501a51d644aSTejun Heo 		if (!qc->err_mask)
1502c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1503a51d644aSTejun Heo 
1504a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1505a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1506c6fd2807SJeff Garzik 	}
1507c6fd2807SJeff Garzik 
1508c6fd2807SJeff Garzik 	/* finish up */
1509c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1510c6fd2807SJeff Garzik 
1511c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1512c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1513c6fd2807SJeff Garzik 
1514c6fd2807SJeff Garzik 	ata_qc_free(qc);
15159af5c9c9STejun Heo 	link->active_tag = preempted_tag;
15169af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1517c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1518da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1519c6fd2807SJeff Garzik 
1520c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1521c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1522c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1523c6fd2807SJeff Garzik 	 * port.
1524c6fd2807SJeff Garzik 	 *
1525c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1526c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1527c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1528c6fd2807SJeff Garzik 	 *
1529c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1530c6fd2807SJeff Garzik 	 */
1531c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1532c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1533c6fd2807SJeff Garzik 		ata_port_probe(ap);
1534c6fd2807SJeff Garzik 	}
1535c6fd2807SJeff Garzik 
1536c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1537c6fd2807SJeff Garzik 
1538c6fd2807SJeff Garzik 	return err_mask;
1539c6fd2807SJeff Garzik }
1540c6fd2807SJeff Garzik 
1541c6fd2807SJeff Garzik /**
154233480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15432432697bSTejun Heo  *	@dev: Device to which the command is sent
15442432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15452432697bSTejun Heo  *	@cdb: CDB for packet command
15462432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15472432697bSTejun Heo  *	@buf: Data buffer of the command
15482432697bSTejun Heo  *	@buflen: Length of data buffer
15492432697bSTejun Heo  *
15502432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15512432697bSTejun Heo  *	buffer instead of sg list.
15522432697bSTejun Heo  *
15532432697bSTejun Heo  *	LOCKING:
15542432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15552432697bSTejun Heo  *
15562432697bSTejun Heo  *	RETURNS:
15572432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15582432697bSTejun Heo  */
15592432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15602432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15612432697bSTejun Heo 			   int dma_dir, void *buf, unsigned int buflen)
15622432697bSTejun Heo {
156333480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
156433480a0eSTejun Heo 	unsigned int n_elem = 0;
15652432697bSTejun Heo 
156633480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
156733480a0eSTejun Heo 		WARN_ON(!buf);
15682432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
156933480a0eSTejun Heo 		psg = &sg;
157033480a0eSTejun Heo 		n_elem++;
157133480a0eSTejun Heo 	}
15722432697bSTejun Heo 
157333480a0eSTejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
15742432697bSTejun Heo }
15752432697bSTejun Heo 
15762432697bSTejun Heo /**
1577c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1578c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1579c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1580c6fd2807SJeff Garzik  *
1581c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1582c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1583c6fd2807SJeff Garzik  *
1584c6fd2807SJeff Garzik  *	LOCKING:
1585c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1586c6fd2807SJeff Garzik  *
1587c6fd2807SJeff Garzik  *	RETURNS:
1588c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1589c6fd2807SJeff Garzik  */
1590c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1591c6fd2807SJeff Garzik {
1592c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1593c6fd2807SJeff Garzik 
1594c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1595c6fd2807SJeff Garzik 
1596c6fd2807SJeff Garzik 	tf.command = cmd;
1597c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1598c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1599c6fd2807SJeff Garzik 
1600c6fd2807SJeff Garzik 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1601c6fd2807SJeff Garzik }
1602c6fd2807SJeff Garzik 
1603c6fd2807SJeff Garzik /**
1604c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1605c6fd2807SJeff Garzik  *	@adev: ATA device
1606c6fd2807SJeff Garzik  *
1607c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1608c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1609c6fd2807SJeff Garzik  */
1610c6fd2807SJeff Garzik 
1611c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1612c6fd2807SJeff Garzik {
1613432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1614432729f0SAlan Cox 	   as the caller should know this */
16159af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1616c6fd2807SJeff Garzik 		return 0;
1617432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1618432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1619c6fd2807SJeff Garzik 		return 1;
1620432729f0SAlan Cox 	/* We turn it on when possible */
1621432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1622432729f0SAlan Cox 		return 1;
1623432729f0SAlan Cox 	return 0;
1624432729f0SAlan Cox }
1625c6fd2807SJeff Garzik 
1626432729f0SAlan Cox /**
1627432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1628432729f0SAlan Cox  *	@adev: ATA device
1629432729f0SAlan Cox  *
1630432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1631432729f0SAlan Cox  *	-1 if no iordy mode is available.
1632432729f0SAlan Cox  */
1633432729f0SAlan Cox 
1634432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1635432729f0SAlan Cox {
1636c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1637c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1638432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1639c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1640c6fd2807SJeff Garzik 		if (pio) {
1641c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1642c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1643432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1644432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1645c6fd2807SJeff Garzik 		}
1646c6fd2807SJeff Garzik 	}
1647432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1648c6fd2807SJeff Garzik }
1649c6fd2807SJeff Garzik 
1650c6fd2807SJeff Garzik /**
1651c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1652c6fd2807SJeff Garzik  *	@dev: target device
1653c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1654bff04647STejun Heo  *	@flags: ATA_READID_* flags
1655c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1656c6fd2807SJeff Garzik  *
1657c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1658c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1659c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1660c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1661c6fd2807SJeff Garzik  *
166250a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
166350a99018SAlan Cox  *	now we abort if we hit that case.
166450a99018SAlan Cox  *
1665c6fd2807SJeff Garzik  *	LOCKING:
1666c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1667c6fd2807SJeff Garzik  *
1668c6fd2807SJeff Garzik  *	RETURNS:
1669c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1670c6fd2807SJeff Garzik  */
1671c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1672bff04647STejun Heo 		    unsigned int flags, u16 *id)
1673c6fd2807SJeff Garzik {
16749af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1675c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1676c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1677c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1678c6fd2807SJeff Garzik 	const char *reason;
167954936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1680c6fd2807SJeff Garzik 	int rc;
1681c6fd2807SJeff Garzik 
1682c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
168344877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1684c6fd2807SJeff Garzik 
1685c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1686c6fd2807SJeff Garzik  retry:
1687c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1688c6fd2807SJeff Garzik 
1689c6fd2807SJeff Garzik 	switch (class) {
1690c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1691c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1692c6fd2807SJeff Garzik 		break;
1693c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1694c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1695c6fd2807SJeff Garzik 		break;
1696c6fd2807SJeff Garzik 	default:
1697c6fd2807SJeff Garzik 		rc = -ENODEV;
1698c6fd2807SJeff Garzik 		reason = "unsupported class";
1699c6fd2807SJeff Garzik 		goto err_out;
1700c6fd2807SJeff Garzik 	}
1701c6fd2807SJeff Garzik 
1702c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
170381afe893STejun Heo 
170481afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
170581afe893STejun Heo 	 * sure those are properly initialized.
170681afe893STejun Heo 	 */
170781afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
170881afe893STejun Heo 
170981afe893STejun Heo 	/* Device presence detection is unreliable on some
171081afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
171181afe893STejun Heo 	 */
171281afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1713c6fd2807SJeff Garzik 
1714c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1715c6fd2807SJeff Garzik 				     id, sizeof(id[0]) * ATA_ID_WORDS);
1716c6fd2807SJeff Garzik 	if (err_mask) {
1717800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
171855a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
171944877b4eSTejun Heo 				ap->print_id, dev->devno);
172055a8e2c8STejun Heo 			return -ENOENT;
172155a8e2c8STejun Heo 		}
172255a8e2c8STejun Heo 
172354936f8bSTejun Heo 		/* Device or controller might have reported the wrong
172454936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
172554936f8bSTejun Heo 		 * the current one is aborted by the device.
172654936f8bSTejun Heo 		 */
172754936f8bSTejun Heo 		if (may_fallback &&
172854936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
172954936f8bSTejun Heo 			may_fallback = 0;
173054936f8bSTejun Heo 
173154936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
173254936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
173354936f8bSTejun Heo 			else
173454936f8bSTejun Heo 				class = ATA_DEV_ATA;
173554936f8bSTejun Heo 			goto retry;
173654936f8bSTejun Heo 		}
173754936f8bSTejun Heo 
1738c6fd2807SJeff Garzik 		rc = -EIO;
1739c6fd2807SJeff Garzik 		reason = "I/O error";
1740c6fd2807SJeff Garzik 		goto err_out;
1741c6fd2807SJeff Garzik 	}
1742c6fd2807SJeff Garzik 
174354936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
174454936f8bSTejun Heo 	 * successfully at least once.
174554936f8bSTejun Heo 	 */
174654936f8bSTejun Heo 	may_fallback = 0;
174754936f8bSTejun Heo 
1748c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1749c6fd2807SJeff Garzik 
1750c6fd2807SJeff Garzik 	/* sanity check */
1751c6fd2807SJeff Garzik 	rc = -EINVAL;
17526070068bSAlan Cox 	reason = "device reports invalid type";
17534a3381feSJeff Garzik 
17544a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17554a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17564a3381feSJeff Garzik 			goto err_out;
17574a3381feSJeff Garzik 	} else {
17584a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1759c6fd2807SJeff Garzik 			goto err_out;
1760c6fd2807SJeff Garzik 	}
1761c6fd2807SJeff Garzik 
1762169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1763169439c2SMark Lord 		tried_spinup = 1;
1764169439c2SMark Lord 		/*
1765169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1766169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1767169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1768169439c2SMark Lord 		 */
1769169439c2SMark Lord 		ata_tf_init(dev, &tf);
1770169439c2SMark Lord 		tf.command = ATA_CMD_SET_FEATURES;
1771169439c2SMark Lord 		tf.feature = SETFEATURES_SPINUP;
1772169439c2SMark Lord 		tf.protocol = ATA_PROT_NODATA;
1773169439c2SMark Lord 		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1774169439c2SMark Lord 		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1775fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1776169439c2SMark Lord 			rc = -EIO;
1777169439c2SMark Lord 			reason = "SPINUP failed";
1778169439c2SMark Lord 			goto err_out;
1779169439c2SMark Lord 		}
1780169439c2SMark Lord 		/*
1781169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1782169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1783169439c2SMark Lord 		 */
1784169439c2SMark Lord 		if (id[2] == 0x37c8)
1785169439c2SMark Lord 			goto retry;
1786169439c2SMark Lord 	}
1787169439c2SMark Lord 
1788bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1789c6fd2807SJeff Garzik 		/*
1790c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1791c6fd2807SJeff Garzik 		 * SRST RESET
179250a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
179350a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1794c6fd2807SJeff Garzik 		 * anything else..
1795c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
179650a99018SAlan Cox 		 *
179750a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
179850a99018SAlan Cox 		 * shoud never trigger.
1799c6fd2807SJeff Garzik 		 */
1800c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1801c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1802c6fd2807SJeff Garzik 			if (err_mask) {
1803c6fd2807SJeff Garzik 				rc = -EIO;
1804c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1805c6fd2807SJeff Garzik 				goto err_out;
1806c6fd2807SJeff Garzik 			}
1807c6fd2807SJeff Garzik 
1808c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1809c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1810c6fd2807SJeff Garzik 			 */
1811bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1812c6fd2807SJeff Garzik 			goto retry;
1813c6fd2807SJeff Garzik 		}
1814c6fd2807SJeff Garzik 	}
1815c6fd2807SJeff Garzik 
1816c6fd2807SJeff Garzik 	*p_class = class;
1817c6fd2807SJeff Garzik 
1818c6fd2807SJeff Garzik 	return 0;
1819c6fd2807SJeff Garzik 
1820c6fd2807SJeff Garzik  err_out:
1821c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1822c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1823c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1824c6fd2807SJeff Garzik 	return rc;
1825c6fd2807SJeff Garzik }
1826c6fd2807SJeff Garzik 
1827c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1828c6fd2807SJeff Garzik {
18299af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
18309af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1831c6fd2807SJeff Garzik }
1832c6fd2807SJeff Garzik 
1833c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1834c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1835c6fd2807SJeff Garzik {
18369af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1837c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1838c6fd2807SJeff Garzik 
1839c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1840c6fd2807SJeff Garzik 		desc[0] = '\0';
1841c6fd2807SJeff Garzik 		return;
1842c6fd2807SJeff Garzik 	}
184375683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
18446919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
18456919a0a6SAlan Cox 		return;
18466919a0a6SAlan Cox 	}
1847c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1848cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1849c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1850c6fd2807SJeff Garzik 	}
1851c6fd2807SJeff Garzik 
1852c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1853c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1854c6fd2807SJeff Garzik 	else
1855c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1856c6fd2807SJeff Garzik }
1857c6fd2807SJeff Garzik 
1858c6fd2807SJeff Garzik /**
1859c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1860c6fd2807SJeff Garzik  *	@dev: Target device to configure
1861c6fd2807SJeff Garzik  *
1862c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1863c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1864c6fd2807SJeff Garzik  *
1865c6fd2807SJeff Garzik  *	LOCKING:
1866c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1867c6fd2807SJeff Garzik  *
1868c6fd2807SJeff Garzik  *	RETURNS:
1869c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1870c6fd2807SJeff Garzik  */
1871efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1872c6fd2807SJeff Garzik {
18739af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
18749af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
18756746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1876c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1877c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1878b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
18793f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
18803f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1881c6fd2807SJeff Garzik 	int rc;
1882c6fd2807SJeff Garzik 
1883c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
188444877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
188544877b4eSTejun Heo 			       __FUNCTION__);
1886c6fd2807SJeff Garzik 		return 0;
1887c6fd2807SJeff Garzik 	}
1888c6fd2807SJeff Garzik 
1889c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
189044877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1891c6fd2807SJeff Garzik 
189275683fe7STejun Heo 	/* set horkage */
189375683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
189475683fe7STejun Heo 
18956746544cSTejun Heo 	/* let ACPI work its magic */
18966746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
18976746544cSTejun Heo 	if (rc)
18986746544cSTejun Heo 		return rc;
189908573a86SKristen Carlson Accardi 
190005027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
190105027adcSTejun Heo 	rc = ata_hpa_resize(dev);
190205027adcSTejun Heo 	if (rc)
190305027adcSTejun Heo 		return rc;
190405027adcSTejun Heo 
1905c6fd2807SJeff Garzik 	/* print device capabilities */
1906c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1907c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1908c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1909c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1910c6fd2807SJeff Garzik 			       __FUNCTION__,
1911c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1912c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1913c6fd2807SJeff Garzik 
1914c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1915c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1916c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1917c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1918c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1919c6fd2807SJeff Garzik 	dev->cylinders = 0;
1920c6fd2807SJeff Garzik 	dev->heads = 0;
1921c6fd2807SJeff Garzik 	dev->sectors = 0;
1922c6fd2807SJeff Garzik 
1923c6fd2807SJeff Garzik 	/*
1924c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1925c6fd2807SJeff Garzik 	 */
1926c6fd2807SJeff Garzik 
1927c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1928c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1929c6fd2807SJeff Garzik 
1930c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1931c6fd2807SJeff Garzik 		ata_dump_id(id);
1932c6fd2807SJeff Garzik 
1933ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1934ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1935ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1936ef143d57SAlbert Lee 
1937ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1938ef143d57SAlbert Lee 			sizeof(modelbuf));
1939ef143d57SAlbert Lee 
1940c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1941c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1942b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1943b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
194444877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
194544877b4eSTejun Heo 					       "supports DRM functions and may "
194644877b4eSTejun Heo 					       "not be fully accessable.\n");
1947b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
1948b352e57dSAlan Cox 		}
1949b352e57dSAlan Cox 		else
1950b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1951b352e57dSAlan Cox 
1952c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1953c6fd2807SJeff Garzik 
19543f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19553f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19563f64f565SEric D. Mudama 
1957c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1958c6fd2807SJeff Garzik 			const char *lba_desc;
1959c6fd2807SJeff Garzik 			char ncq_desc[20];
1960c6fd2807SJeff Garzik 
1961c6fd2807SJeff Garzik 			lba_desc = "LBA";
1962c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1963c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1964c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1965c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19666fc49adbSTejun Heo 
19676fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19686fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19696fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1970c6fd2807SJeff Garzik 			}
1971c6fd2807SJeff Garzik 
1972c6fd2807SJeff Garzik 			/* config NCQ */
1973c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1974c6fd2807SJeff Garzik 
1975c6fd2807SJeff Garzik 			/* print device info to dmesg */
19763f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
19773f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19783f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
19793f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
19803f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
19813f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
19823f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
1983c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
19843f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
19853f64f565SEric D. Mudama 			}
1986c6fd2807SJeff Garzik 		} else {
1987c6fd2807SJeff Garzik 			/* CHS */
1988c6fd2807SJeff Garzik 
1989c6fd2807SJeff Garzik 			/* Default translation */
1990c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
1991c6fd2807SJeff Garzik 			dev->heads	= id[3];
1992c6fd2807SJeff Garzik 			dev->sectors	= id[6];
1993c6fd2807SJeff Garzik 
1994c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
1995c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
1996c6fd2807SJeff Garzik 				dev->cylinders = id[54];
1997c6fd2807SJeff Garzik 				dev->heads     = id[55];
1998c6fd2807SJeff Garzik 				dev->sectors   = id[56];
1999c6fd2807SJeff Garzik 			}
2000c6fd2807SJeff Garzik 
2001c6fd2807SJeff Garzik 			/* print device info to dmesg */
20023f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2003c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
20043f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
20053f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
20063f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
20073f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20083f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
20093f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
20103f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
20113f64f565SEric D. Mudama 					dev->heads, dev->sectors);
20123f64f565SEric D. Mudama 			}
2013c6fd2807SJeff Garzik 		}
2014c6fd2807SJeff Garzik 
2015c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2016c6fd2807SJeff Garzik 	}
2017c6fd2807SJeff Garzik 
2018c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2019c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2020854c73a2STejun Heo 		const char *cdb_intr_string = "";
2021854c73a2STejun Heo 		const char *atapi_an_string = "";
20227d77b247STejun Heo 		u32 sntf;
2023c6fd2807SJeff Garzik 
2024c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2025c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2026c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2027c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2028c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2029c6fd2807SJeff Garzik 			rc = -EINVAL;
2030c6fd2807SJeff Garzik 			goto err_out_nosup;
2031c6fd2807SJeff Garzik 		}
2032c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2033c6fd2807SJeff Garzik 
20347d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
20357d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
20367d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
20377d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
20389f45cbd3SKristen Carlson Accardi 		 */
20397d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
20407d77b247STejun Heo 		    (!ap->nr_pmp_links ||
20417d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2042854c73a2STejun Heo 			unsigned int err_mask;
2043854c73a2STejun Heo 
20449f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2045854c73a2STejun Heo 			err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2046854c73a2STejun Heo 			if (err_mask)
20479f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2048854c73a2STejun Heo 					"failed to enable ATAPI AN "
2049854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2050854c73a2STejun Heo 			else {
20519f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2052854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2053854c73a2STejun Heo 			}
20549f45cbd3SKristen Carlson Accardi 		}
20559f45cbd3SKristen Carlson Accardi 
2056c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2057c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2058c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2059c6fd2807SJeff Garzik 		}
2060c6fd2807SJeff Garzik 
2061c6fd2807SJeff Garzik 		/* print device info to dmesg */
2062c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2063ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2064854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2065ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2066c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2067854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2068c6fd2807SJeff Garzik 	}
2069c6fd2807SJeff Garzik 
2070914ed354STejun Heo 	/* determine max_sectors */
2071914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2072914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2073914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2074914ed354STejun Heo 
207593590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
207693590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
207793590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
207893590859SAlan Cox 		   idiot */
207993590859SAlan Cox                 if (print_info) {
208093590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
208193590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
208293590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
208393590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
208493590859SAlan Cox 		}
208593590859SAlan Cox 	}
208693590859SAlan Cox 
2087c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2088c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2089c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2090c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2091c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2092c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2093c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2094c6fd2807SJeff Garzik 	}
2095c6fd2807SJeff Garzik 
209675683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
209703ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
209803ec52deSTejun Heo 					 dev->max_sectors);
209918d6e9d5SAlbert Lee 
2100c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2101cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2102c6fd2807SJeff Garzik 
2103c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2104c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2105c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2106c6fd2807SJeff Garzik 	return 0;
2107c6fd2807SJeff Garzik 
2108c6fd2807SJeff Garzik err_out_nosup:
2109c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2110c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2111c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2112c6fd2807SJeff Garzik 	return rc;
2113c6fd2807SJeff Garzik }
2114c6fd2807SJeff Garzik 
2115c6fd2807SJeff Garzik /**
21162e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2117be0d18dfSAlan Cox  *	@ap: port
2118be0d18dfSAlan Cox  *
21192e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2120be0d18dfSAlan Cox  *	detection.
2121be0d18dfSAlan Cox  */
2122be0d18dfSAlan Cox 
2123be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2124be0d18dfSAlan Cox {
2125be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2126be0d18dfSAlan Cox }
2127be0d18dfSAlan Cox 
2128be0d18dfSAlan Cox /**
21292e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2130be0d18dfSAlan Cox  *	@ap: port
2131be0d18dfSAlan Cox  *
21322e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2133be0d18dfSAlan Cox  *	detection.
2134be0d18dfSAlan Cox  */
2135be0d18dfSAlan Cox 
2136be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2137be0d18dfSAlan Cox {
2138be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2139be0d18dfSAlan Cox }
2140be0d18dfSAlan Cox 
2141be0d18dfSAlan Cox /**
2142be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2143be0d18dfSAlan Cox  *	@ap: port
2144be0d18dfSAlan Cox  *
2145be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2146be0d18dfSAlan Cox  */
2147be0d18dfSAlan Cox 
2148be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2149be0d18dfSAlan Cox {
2150be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2151be0d18dfSAlan Cox }
2152be0d18dfSAlan Cox 
2153be0d18dfSAlan Cox /**
2154be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2155be0d18dfSAlan Cox  *	@ap: port
2156be0d18dfSAlan Cox  *
2157be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2158be0d18dfSAlan Cox  */
2159be0d18dfSAlan Cox 
2160be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2161be0d18dfSAlan Cox {
2162be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2163be0d18dfSAlan Cox }
2164be0d18dfSAlan Cox 
2165be0d18dfSAlan Cox /**
2166c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2167c6fd2807SJeff Garzik  *	@ap: Bus to probe
2168c6fd2807SJeff Garzik  *
2169c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2170c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2171c6fd2807SJeff Garzik  *	the bus.
2172c6fd2807SJeff Garzik  *
2173c6fd2807SJeff Garzik  *	LOCKING:
2174c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2175c6fd2807SJeff Garzik  *
2176c6fd2807SJeff Garzik  *	RETURNS:
2177c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2178c6fd2807SJeff Garzik  */
2179c6fd2807SJeff Garzik 
2180c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2181c6fd2807SJeff Garzik {
2182c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2183c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2184f58229f8STejun Heo 	int rc;
2185c6fd2807SJeff Garzik 	struct ata_device *dev;
2186c6fd2807SJeff Garzik 
2187c6fd2807SJeff Garzik 	ata_port_probe(ap);
2188c6fd2807SJeff Garzik 
2189f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2190f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2191c6fd2807SJeff Garzik 
2192c6fd2807SJeff Garzik  retry:
2193c6fd2807SJeff Garzik 	/* reset and determine device classes */
2194c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2195c6fd2807SJeff Garzik 
2196f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2197c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2198c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2199c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2200c6fd2807SJeff Garzik 		else
2201c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2202c6fd2807SJeff Garzik 
2203c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2204c6fd2807SJeff Garzik 	}
2205c6fd2807SJeff Garzik 
2206c6fd2807SJeff Garzik 	ata_port_probe(ap);
2207c6fd2807SJeff Garzik 
2208c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2209c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2210c6fd2807SJeff Garzik 
2211f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2212f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2213c6fd2807SJeff Garzik 
2214f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2215f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2216f31f0cc2SJeff Garzik 	   the slave device */
2217f31f0cc2SJeff Garzik 
2218f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2219f58229f8STejun Heo 		if (tries[dev->devno])
2220f58229f8STejun Heo 			dev->class = classes[dev->devno];
2221c6fd2807SJeff Garzik 
2222c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2223c6fd2807SJeff Garzik 			continue;
2224c6fd2807SJeff Garzik 
2225bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2226bff04647STejun Heo 				     dev->id);
2227c6fd2807SJeff Garzik 		if (rc)
2228c6fd2807SJeff Garzik 			goto fail;
2229f31f0cc2SJeff Garzik 	}
2230f31f0cc2SJeff Garzik 
2231be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2232be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2233be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2234be0d18dfSAlan Cox 
2235614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2236614fe29bSAlan Cox 	   reported cable types and sensed types */
2237614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2238614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2239614fe29bSAlan Cox 			continue;
2240614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2241614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2242614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2243614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2244614fe29bSAlan Cox 	}
2245614fe29bSAlan Cox 
2246f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2247f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2248f31f0cc2SJeff Garzik 
2249f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2250f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2251f31f0cc2SJeff Garzik 			continue;
2252c6fd2807SJeff Garzik 
22539af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2254efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
22559af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2256c6fd2807SJeff Garzik 		if (rc)
2257c6fd2807SJeff Garzik 			goto fail;
2258c6fd2807SJeff Garzik 	}
2259c6fd2807SJeff Garzik 
2260c6fd2807SJeff Garzik 	/* configure transfer mode */
22610260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
22624ae72a1eSTejun Heo 	if (rc)
2263c6fd2807SJeff Garzik 		goto fail;
2264c6fd2807SJeff Garzik 
2265f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2266f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2267c6fd2807SJeff Garzik 			return 0;
2268c6fd2807SJeff Garzik 
2269c6fd2807SJeff Garzik 	/* no device present, disable port */
2270c6fd2807SJeff Garzik 	ata_port_disable(ap);
2271c6fd2807SJeff Garzik 	return -ENODEV;
2272c6fd2807SJeff Garzik 
2273c6fd2807SJeff Garzik  fail:
22744ae72a1eSTejun Heo 	tries[dev->devno]--;
22754ae72a1eSTejun Heo 
2276c6fd2807SJeff Garzik 	switch (rc) {
2277c6fd2807SJeff Garzik 	case -EINVAL:
22784ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2279c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2280c6fd2807SJeff Garzik 		break;
22814ae72a1eSTejun Heo 
22824ae72a1eSTejun Heo 	case -ENODEV:
22834ae72a1eSTejun Heo 		/* give it just one more chance */
22844ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2285c6fd2807SJeff Garzik 	case -EIO:
22864ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
22874ae72a1eSTejun Heo 			/* This is the last chance, better to slow
22884ae72a1eSTejun Heo 			 * down than lose it.
22894ae72a1eSTejun Heo 			 */
2290936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
22914ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
22924ae72a1eSTejun Heo 		}
2293c6fd2807SJeff Garzik 	}
2294c6fd2807SJeff Garzik 
22954ae72a1eSTejun Heo 	if (!tries[dev->devno])
2296c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2297c6fd2807SJeff Garzik 
2298c6fd2807SJeff Garzik 	goto retry;
2299c6fd2807SJeff Garzik }
2300c6fd2807SJeff Garzik 
2301c6fd2807SJeff Garzik /**
2302c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2303c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2304c6fd2807SJeff Garzik  *
2305c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2306c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2307c6fd2807SJeff Garzik  *
2308cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2309c6fd2807SJeff Garzik  *	serialization.
2310c6fd2807SJeff Garzik  */
2311c6fd2807SJeff Garzik 
2312c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2313c6fd2807SJeff Garzik {
2314c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2315c6fd2807SJeff Garzik }
2316c6fd2807SJeff Garzik 
2317c6fd2807SJeff Garzik /**
2318c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2319936fd732STejun Heo  *	@link: SATA link to printk link status about
2320c6fd2807SJeff Garzik  *
2321c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2322c6fd2807SJeff Garzik  *
2323c6fd2807SJeff Garzik  *	LOCKING:
2324c6fd2807SJeff Garzik  *	None.
2325c6fd2807SJeff Garzik  */
2326936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2327c6fd2807SJeff Garzik {
2328c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2329c6fd2807SJeff Garzik 
2330936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2331c6fd2807SJeff Garzik 		return;
2332936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2333c6fd2807SJeff Garzik 
2334936fd732STejun Heo 	if (ata_link_online(link)) {
2335c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2336936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2337c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2338c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2339c6fd2807SJeff Garzik 	} else {
2340936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2341c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2342c6fd2807SJeff Garzik 				sstatus, scontrol);
2343c6fd2807SJeff Garzik 	}
2344c6fd2807SJeff Garzik }
2345c6fd2807SJeff Garzik 
2346c6fd2807SJeff Garzik /**
2347c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2348c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2349c6fd2807SJeff Garzik  *
2350c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2351c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2352c6fd2807SJeff Garzik  *	clear any reset condition.
2353c6fd2807SJeff Garzik  *
2354c6fd2807SJeff Garzik  *	LOCKING:
2355c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2356c6fd2807SJeff Garzik  *
2357c6fd2807SJeff Garzik  */
2358c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2359c6fd2807SJeff Garzik {
2360936fd732STejun Heo 	struct ata_link *link = &ap->link;
2361c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2362936fd732STejun Heo 	u32 sstatus;
2363c6fd2807SJeff Garzik 
2364c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2365c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2366936fd732STejun Heo 		sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2367c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2368c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2369c6fd2807SJeff Garzik 		mdelay(1);
2370c6fd2807SJeff Garzik 	}
2371c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2372936fd732STejun Heo 	sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2373c6fd2807SJeff Garzik 
2374c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2375c6fd2807SJeff Garzik 	do {
2376c6fd2807SJeff Garzik 		msleep(200);
2377936fd732STejun Heo 		sata_scr_read(link, SCR_STATUS, &sstatus);
2378c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2379c6fd2807SJeff Garzik 			break;
2380c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2381c6fd2807SJeff Garzik 
2382c6fd2807SJeff Garzik 	/* print link status */
2383936fd732STejun Heo 	sata_print_link_status(link);
2384c6fd2807SJeff Garzik 
2385c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2386936fd732STejun Heo 	if (!ata_link_offline(link))
2387c6fd2807SJeff Garzik 		ata_port_probe(ap);
2388c6fd2807SJeff Garzik 	else
2389c6fd2807SJeff Garzik 		ata_port_disable(ap);
2390c6fd2807SJeff Garzik 
2391c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2392c6fd2807SJeff Garzik 		return;
2393c6fd2807SJeff Garzik 
2394c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2395c6fd2807SJeff Garzik 		ata_port_disable(ap);
2396c6fd2807SJeff Garzik 		return;
2397c6fd2807SJeff Garzik 	}
2398c6fd2807SJeff Garzik 
2399c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2400c6fd2807SJeff Garzik }
2401c6fd2807SJeff Garzik 
2402c6fd2807SJeff Garzik /**
2403c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2404c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2405c6fd2807SJeff Garzik  *
2406c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2407c6fd2807SJeff Garzik  *	the bus for devices.
2408c6fd2807SJeff Garzik  *
2409c6fd2807SJeff Garzik  *	LOCKING:
2410c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2411c6fd2807SJeff Garzik  *
2412c6fd2807SJeff Garzik  */
2413c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2414c6fd2807SJeff Garzik {
2415c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2416c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2417c6fd2807SJeff Garzik 		return;
2418c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2419c6fd2807SJeff Garzik }
2420c6fd2807SJeff Garzik 
2421c6fd2807SJeff Garzik /**
2422c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2423c6fd2807SJeff Garzik  *	@adev: device
2424c6fd2807SJeff Garzik  *
2425c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2426c6fd2807SJeff Garzik  *	present NULL is returned
2427c6fd2807SJeff Garzik  */
2428c6fd2807SJeff Garzik 
2429c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2430c6fd2807SJeff Garzik {
24319af5c9c9STejun Heo 	struct ata_link *link = adev->link;
24329af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2433c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2434c6fd2807SJeff Garzik 		return NULL;
2435c6fd2807SJeff Garzik 	return pair;
2436c6fd2807SJeff Garzik }
2437c6fd2807SJeff Garzik 
2438c6fd2807SJeff Garzik /**
2439c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2440c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2441c6fd2807SJeff Garzik  *
2442c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2443c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2444c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2445c6fd2807SJeff Garzik  *	on this port.
2446c6fd2807SJeff Garzik  *
2447cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2448c6fd2807SJeff Garzik  *	serialization.
2449c6fd2807SJeff Garzik  */
2450c6fd2807SJeff Garzik 
2451c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2452c6fd2807SJeff Garzik {
24539af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
24549af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2455c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2456c6fd2807SJeff Garzik }
2457c6fd2807SJeff Garzik 
2458c6fd2807SJeff Garzik /**
2459c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2460936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2461c6fd2807SJeff Garzik  *
2462936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2463c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2464c6fd2807SJeff Garzik  *	using sata_set_spd().
2465c6fd2807SJeff Garzik  *
2466c6fd2807SJeff Garzik  *	LOCKING:
2467c6fd2807SJeff Garzik  *	Inherited from caller.
2468c6fd2807SJeff Garzik  *
2469c6fd2807SJeff Garzik  *	RETURNS:
2470c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2471c6fd2807SJeff Garzik  */
2472936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2473c6fd2807SJeff Garzik {
2474c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2475c6fd2807SJeff Garzik 	int rc, highbit;
2476c6fd2807SJeff Garzik 
2477936fd732STejun Heo 	if (!sata_scr_valid(link))
2478008a7896STejun Heo 		return -EOPNOTSUPP;
2479008a7896STejun Heo 
2480008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2481936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2482008a7896STejun Heo 	 */
2483936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2484008a7896STejun Heo 	if (rc == 0)
2485008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2486008a7896STejun Heo 	else
2487936fd732STejun Heo 		spd = link->sata_spd;
2488c6fd2807SJeff Garzik 
2489936fd732STejun Heo 	mask = link->sata_spd_limit;
2490c6fd2807SJeff Garzik 	if (mask <= 1)
2491c6fd2807SJeff Garzik 		return -EINVAL;
2492008a7896STejun Heo 
2493008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2494c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2495c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2496c6fd2807SJeff Garzik 
2497008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2498008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2499008a7896STejun Heo 	 */
2500008a7896STejun Heo 	if (spd > 1)
2501008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2502008a7896STejun Heo 	else
2503008a7896STejun Heo 		mask &= 1;
2504008a7896STejun Heo 
2505008a7896STejun Heo 	/* were we already at the bottom? */
2506c6fd2807SJeff Garzik 	if (!mask)
2507c6fd2807SJeff Garzik 		return -EINVAL;
2508c6fd2807SJeff Garzik 
2509936fd732STejun Heo 	link->sata_spd_limit = mask;
2510c6fd2807SJeff Garzik 
2511936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2512c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2513c6fd2807SJeff Garzik 
2514c6fd2807SJeff Garzik 	return 0;
2515c6fd2807SJeff Garzik }
2516c6fd2807SJeff Garzik 
2517936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2518c6fd2807SJeff Garzik {
2519c6fd2807SJeff Garzik 	u32 spd, limit;
2520c6fd2807SJeff Garzik 
2521936fd732STejun Heo 	if (link->sata_spd_limit == UINT_MAX)
2522c6fd2807SJeff Garzik 		limit = 0;
2523c6fd2807SJeff Garzik 	else
2524936fd732STejun Heo 		limit = fls(link->sata_spd_limit);
2525c6fd2807SJeff Garzik 
2526c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2527c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2528c6fd2807SJeff Garzik 
2529c6fd2807SJeff Garzik 	return spd != limit;
2530c6fd2807SJeff Garzik }
2531c6fd2807SJeff Garzik 
2532c6fd2807SJeff Garzik /**
2533c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2534936fd732STejun Heo  *	@link: Link in question
2535c6fd2807SJeff Garzik  *
2536c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2537936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2538c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2539c6fd2807SJeff Garzik  *	configuration.
2540c6fd2807SJeff Garzik  *
2541c6fd2807SJeff Garzik  *	LOCKING:
2542c6fd2807SJeff Garzik  *	Inherited from caller.
2543c6fd2807SJeff Garzik  *
2544c6fd2807SJeff Garzik  *	RETURNS:
2545c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2546c6fd2807SJeff Garzik  */
2547936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2548c6fd2807SJeff Garzik {
2549c6fd2807SJeff Garzik 	u32 scontrol;
2550c6fd2807SJeff Garzik 
2551936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2552c6fd2807SJeff Garzik 		return 0;
2553c6fd2807SJeff Garzik 
2554936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2555c6fd2807SJeff Garzik }
2556c6fd2807SJeff Garzik 
2557c6fd2807SJeff Garzik /**
2558c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2559936fd732STejun Heo  *	@link: Link to set SATA spd for
2560c6fd2807SJeff Garzik  *
2561936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2562c6fd2807SJeff Garzik  *
2563c6fd2807SJeff Garzik  *	LOCKING:
2564c6fd2807SJeff Garzik  *	Inherited from caller.
2565c6fd2807SJeff Garzik  *
2566c6fd2807SJeff Garzik  *	RETURNS:
2567c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2568c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2569c6fd2807SJeff Garzik  */
2570936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2571c6fd2807SJeff Garzik {
2572c6fd2807SJeff Garzik 	u32 scontrol;
2573c6fd2807SJeff Garzik 	int rc;
2574c6fd2807SJeff Garzik 
2575936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2576c6fd2807SJeff Garzik 		return rc;
2577c6fd2807SJeff Garzik 
2578936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2579c6fd2807SJeff Garzik 		return 0;
2580c6fd2807SJeff Garzik 
2581936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2582c6fd2807SJeff Garzik 		return rc;
2583c6fd2807SJeff Garzik 
2584c6fd2807SJeff Garzik 	return 1;
2585c6fd2807SJeff Garzik }
2586c6fd2807SJeff Garzik 
2587c6fd2807SJeff Garzik /*
2588c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2589c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2590c6fd2807SJeff Garzik  */
2591c6fd2807SJeff Garzik /*
2592b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2593c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2594b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2595b352e57dSAlan Cox  *
2596b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2597c6fd2807SJeff Garzik  */
2598c6fd2807SJeff Garzik 
2599c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2600c6fd2807SJeff Garzik 
2601c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2602c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2603c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2604c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2605c6fd2807SJeff Garzik 
2606b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2607b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2608c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2609c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2610c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2611c6fd2807SJeff Garzik 
2612c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2613c6fd2807SJeff Garzik 
2614c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2615c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2616c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2617c6fd2807SJeff Garzik 
2618c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2619c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2620c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2621c6fd2807SJeff Garzik 
2622b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2623b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2624c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2625c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2626c6fd2807SJeff Garzik 
2627c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2628c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2629c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2630c6fd2807SJeff Garzik 
2631c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2632c6fd2807SJeff Garzik 
2633c6fd2807SJeff Garzik 	{ 0xFF }
2634c6fd2807SJeff Garzik };
2635c6fd2807SJeff Garzik 
2636c6fd2807SJeff Garzik #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2637c6fd2807SJeff Garzik #define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2638c6fd2807SJeff Garzik 
2639c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2640c6fd2807SJeff Garzik {
2641c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2642c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2643c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2644c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2645c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2646c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2647c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2648c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2649c6fd2807SJeff Garzik }
2650c6fd2807SJeff Garzik 
2651c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2652c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2653c6fd2807SJeff Garzik {
2654c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2655c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2656c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2657c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2658c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2659c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2660c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2661c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2662c6fd2807SJeff Garzik }
2663c6fd2807SJeff Garzik 
2664c6fd2807SJeff Garzik static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2665c6fd2807SJeff Garzik {
2666c6fd2807SJeff Garzik 	const struct ata_timing *t;
2667c6fd2807SJeff Garzik 
2668c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2669c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2670c6fd2807SJeff Garzik 			return NULL;
2671c6fd2807SJeff Garzik 	return t;
2672c6fd2807SJeff Garzik }
2673c6fd2807SJeff Garzik 
2674c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2675c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2676c6fd2807SJeff Garzik {
2677c6fd2807SJeff Garzik 	const struct ata_timing *s;
2678c6fd2807SJeff Garzik 	struct ata_timing p;
2679c6fd2807SJeff Garzik 
2680c6fd2807SJeff Garzik 	/*
2681c6fd2807SJeff Garzik 	 * Find the mode.
2682c6fd2807SJeff Garzik 	 */
2683c6fd2807SJeff Garzik 
2684c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2685c6fd2807SJeff Garzik 		return -EINVAL;
2686c6fd2807SJeff Garzik 
2687c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2688c6fd2807SJeff Garzik 
2689c6fd2807SJeff Garzik 	/*
2690c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2691c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2692c6fd2807SJeff Garzik 	 */
2693c6fd2807SJeff Garzik 
2694c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2695c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2696c6fd2807SJeff Garzik 		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2697c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2698c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2699c6fd2807SJeff Garzik 		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2700c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2701c6fd2807SJeff Garzik 		}
2702c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2703c6fd2807SJeff Garzik 	}
2704c6fd2807SJeff Garzik 
2705c6fd2807SJeff Garzik 	/*
2706c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2707c6fd2807SJeff Garzik 	 */
2708c6fd2807SJeff Garzik 
2709c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2710c6fd2807SJeff Garzik 
2711c6fd2807SJeff Garzik 	/*
2712c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2713c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2714c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2715c6fd2807SJeff Garzik 	 */
2716c6fd2807SJeff Garzik 
2717fd3367afSAlan 	if (speed > XFER_PIO_6) {
2718c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2719c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2720c6fd2807SJeff Garzik 	}
2721c6fd2807SJeff Garzik 
2722c6fd2807SJeff Garzik 	/*
2723c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2724c6fd2807SJeff Garzik 	 */
2725c6fd2807SJeff Garzik 
2726c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2727c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2728c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2729c6fd2807SJeff Garzik 	}
2730c6fd2807SJeff Garzik 
2731c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2732c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2733c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2734c6fd2807SJeff Garzik 	}
27354f701d1eSAlan Cox 
27364f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
27374f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
27384f701d1eSAlan Cox 	   if so we must correct this */
27394f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
27404f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2741c6fd2807SJeff Garzik 
2742c6fd2807SJeff Garzik 	return 0;
2743c6fd2807SJeff Garzik }
2744c6fd2807SJeff Garzik 
2745c6fd2807SJeff Garzik /**
2746c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2747c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2748458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2749c6fd2807SJeff Garzik  *
2750c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2751c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2752c6fd2807SJeff Garzik  *	will apply the limit.
2753c6fd2807SJeff Garzik  *
2754c6fd2807SJeff Garzik  *	LOCKING:
2755c6fd2807SJeff Garzik  *	Inherited from caller.
2756c6fd2807SJeff Garzik  *
2757c6fd2807SJeff Garzik  *	RETURNS:
2758c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2759c6fd2807SJeff Garzik  */
2760458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2761c6fd2807SJeff Garzik {
2762458337dbSTejun Heo 	char buf[32];
2763458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2764458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2765458337dbSTejun Heo 	int quiet, highbit;
2766c6fd2807SJeff Garzik 
2767458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2768458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2769458337dbSTejun Heo 
2770458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2771458337dbSTejun Heo 						  dev->mwdma_mask,
2772c6fd2807SJeff Garzik 						  dev->udma_mask);
2773458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2774c6fd2807SJeff Garzik 
2775458337dbSTejun Heo 	switch (sel) {
2776458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2777458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2778458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2779458337dbSTejun Heo 		break;
2780458337dbSTejun Heo 
2781458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2782458337dbSTejun Heo 		if (udma_mask) {
2783458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2784458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2785458337dbSTejun Heo 			if (!udma_mask)
2786458337dbSTejun Heo 				return -ENOENT;
2787458337dbSTejun Heo 		} else if (mwdma_mask) {
2788458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2789458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2790458337dbSTejun Heo 			if (!mwdma_mask)
2791458337dbSTejun Heo 				return -ENOENT;
2792458337dbSTejun Heo 		}
2793458337dbSTejun Heo 		break;
2794458337dbSTejun Heo 
2795458337dbSTejun Heo 	case ATA_DNXFER_40C:
2796458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2797458337dbSTejun Heo 		break;
2798458337dbSTejun Heo 
2799458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2800458337dbSTejun Heo 		pio_mask &= 1;
2801458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2802458337dbSTejun Heo 		mwdma_mask = 0;
2803458337dbSTejun Heo 		udma_mask = 0;
2804458337dbSTejun Heo 		break;
2805458337dbSTejun Heo 
2806458337dbSTejun Heo 	default:
2807458337dbSTejun Heo 		BUG();
2808458337dbSTejun Heo 	}
2809458337dbSTejun Heo 
2810458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2811458337dbSTejun Heo 
2812458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2813458337dbSTejun Heo 		return -ENOENT;
2814458337dbSTejun Heo 
2815458337dbSTejun Heo 	if (!quiet) {
2816458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2817458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2818458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2819458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2820458337dbSTejun Heo 		else
2821458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2822458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2823458337dbSTejun Heo 
2824458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2825458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2826458337dbSTejun Heo 	}
2827c6fd2807SJeff Garzik 
2828c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2829c6fd2807SJeff Garzik 			    &dev->udma_mask);
2830c6fd2807SJeff Garzik 
2831c6fd2807SJeff Garzik 	return 0;
2832c6fd2807SJeff Garzik }
2833c6fd2807SJeff Garzik 
2834c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2835c6fd2807SJeff Garzik {
28369af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
2837c6fd2807SJeff Garzik 	unsigned int err_mask;
2838c6fd2807SJeff Garzik 	int rc;
2839c6fd2807SJeff Garzik 
2840c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2841c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2842c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2843c6fd2807SJeff Garzik 
2844c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
284511750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
284611750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
284711750a40SAlan         	err_mask &= ~AC_ERR_DEV;
28480bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
28490bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
28500bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
28510bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
28520bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
2853c6fd2807SJeff Garzik 	if (err_mask) {
2854c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2855c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2856c6fd2807SJeff Garzik 		return -EIO;
2857c6fd2807SJeff Garzik 	}
2858c6fd2807SJeff Garzik 
2859baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2860422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2861baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2862c6fd2807SJeff Garzik 	if (rc)
2863c6fd2807SJeff Garzik 		return rc;
2864c6fd2807SJeff Garzik 
2865c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2866c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2867c6fd2807SJeff Garzik 
2868c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2869c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2870c6fd2807SJeff Garzik 	return 0;
2871c6fd2807SJeff Garzik }
2872c6fd2807SJeff Garzik 
2873c6fd2807SJeff Garzik /**
287404351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
28750260731fSTejun Heo  *	@link: link on which timings will be programmed
2876c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2877c6fd2807SJeff Garzik  *
287804351821SAlan  *	Standard implementation of the function used to tune and set
287904351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
288004351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2881c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2882c6fd2807SJeff Garzik  *
2883c6fd2807SJeff Garzik  *	LOCKING:
2884c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2885c6fd2807SJeff Garzik  *
2886c6fd2807SJeff Garzik  *	RETURNS:
2887c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2888c6fd2807SJeff Garzik  */
288904351821SAlan 
28900260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2891c6fd2807SJeff Garzik {
28920260731fSTejun Heo 	struct ata_port *ap = link->ap;
2893c6fd2807SJeff Garzik 	struct ata_device *dev;
2894f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
2895c6fd2807SJeff Garzik 
2896c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2897f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2898c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2899c6fd2807SJeff Garzik 
2900c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2901c6fd2807SJeff Garzik 			continue;
2902c6fd2807SJeff Garzik 
2903c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2904c6fd2807SJeff Garzik 
2905c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2906c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2907c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2908c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2909c6fd2807SJeff Garzik 
2910c6fd2807SJeff Garzik 		found = 1;
2911c6fd2807SJeff Garzik 		if (dev->dma_mode)
2912c6fd2807SJeff Garzik 			used_dma = 1;
2913c6fd2807SJeff Garzik 	}
2914c6fd2807SJeff Garzik 	if (!found)
2915c6fd2807SJeff Garzik 		goto out;
2916c6fd2807SJeff Garzik 
2917c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2918f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2919c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2920c6fd2807SJeff Garzik 			continue;
2921c6fd2807SJeff Garzik 
2922c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2923c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2924c6fd2807SJeff Garzik 			rc = -EINVAL;
2925c6fd2807SJeff Garzik 			goto out;
2926c6fd2807SJeff Garzik 		}
2927c6fd2807SJeff Garzik 
2928c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2929c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2930c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2931c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2932c6fd2807SJeff Garzik 	}
2933c6fd2807SJeff Garzik 
2934c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2935f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2936c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2937c6fd2807SJeff Garzik 			continue;
2938c6fd2807SJeff Garzik 
2939c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2940c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2941c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2942c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2943c6fd2807SJeff Garzik 	}
2944c6fd2807SJeff Garzik 
2945c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2946f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
294718d90debSAlan 		/* don't update suspended devices' xfer mode */
29489666f400STejun Heo 		if (!ata_dev_enabled(dev))
2949c6fd2807SJeff Garzik 			continue;
2950c6fd2807SJeff Garzik 
2951c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
2952c6fd2807SJeff Garzik 		if (rc)
2953c6fd2807SJeff Garzik 			goto out;
2954c6fd2807SJeff Garzik 	}
2955c6fd2807SJeff Garzik 
2956c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
2957c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
2958c6fd2807SJeff Garzik 	 */
2959cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2960032af1ceSAlan 		ap->host->simplex_claimed = ap;
2961c6fd2807SJeff Garzik 
2962c6fd2807SJeff Garzik  out:
2963c6fd2807SJeff Garzik 	if (rc)
2964c6fd2807SJeff Garzik 		*r_failed_dev = dev;
2965c6fd2807SJeff Garzik 	return rc;
2966c6fd2807SJeff Garzik }
2967c6fd2807SJeff Garzik 
2968c6fd2807SJeff Garzik /**
296904351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
29700260731fSTejun Heo  *	@link: link on which timings will be programmed
297104351821SAlan  *	@r_failed_dev: out paramter for failed device
297204351821SAlan  *
297304351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
297404351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
297504351821SAlan  *	returned in @r_failed_dev.
297604351821SAlan  *
297704351821SAlan  *	LOCKING:
297804351821SAlan  *	PCI/etc. bus probe sem.
297904351821SAlan  *
298004351821SAlan  *	RETURNS:
298104351821SAlan  *	0 on success, negative errno otherwise
298204351821SAlan  */
29830260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
298404351821SAlan {
29850260731fSTejun Heo 	struct ata_port *ap = link->ap;
29860260731fSTejun Heo 
298704351821SAlan 	/* has private set_mode? */
298804351821SAlan 	if (ap->ops->set_mode)
29890260731fSTejun Heo 		return ap->ops->set_mode(link, r_failed_dev);
29900260731fSTejun Heo 	return ata_do_set_mode(link, r_failed_dev);
299104351821SAlan }
299204351821SAlan 
299304351821SAlan /**
2994c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
2995c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
2996c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
2997c6fd2807SJeff Garzik  *
2998c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
2999c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3000c6fd2807SJeff Garzik  *	other threads.
3001c6fd2807SJeff Garzik  *
3002c6fd2807SJeff Garzik  *	LOCKING:
3003cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3004c6fd2807SJeff Garzik  */
3005c6fd2807SJeff Garzik 
3006c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3007c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3008c6fd2807SJeff Garzik {
3009c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3010c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3011c6fd2807SJeff Garzik }
3012c6fd2807SJeff Garzik 
3013c6fd2807SJeff Garzik /**
3014c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3015c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3016c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3017c6fd2807SJeff Garzik  *	@tmout: overall timeout
3018c6fd2807SJeff Garzik  *
3019c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3020c6fd2807SJeff Garzik  *	or a timeout occurs.
3021c6fd2807SJeff Garzik  *
3022d1adc1bbSTejun Heo  *	LOCKING:
3023d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3024d1adc1bbSTejun Heo  *
3025d1adc1bbSTejun Heo  *	RETURNS:
3026d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3027c6fd2807SJeff Garzik  */
3028d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3029c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3030c6fd2807SJeff Garzik {
3031c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3032c6fd2807SJeff Garzik 	u8 status;
3033c6fd2807SJeff Garzik 
3034c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3035c6fd2807SJeff Garzik 	timer_start = jiffies;
3036c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3037d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3038d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3039c6fd2807SJeff Garzik 		msleep(50);
3040c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3041c6fd2807SJeff Garzik 	}
3042c6fd2807SJeff Garzik 
3043d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3044c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
304535aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
304635aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3047c6fd2807SJeff Garzik 
3048c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3049d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3050d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3051c6fd2807SJeff Garzik 		msleep(50);
3052c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3053c6fd2807SJeff Garzik 	}
3054c6fd2807SJeff Garzik 
3055d1adc1bbSTejun Heo 	if (status == 0xff)
3056d1adc1bbSTejun Heo 		return -ENODEV;
3057d1adc1bbSTejun Heo 
3058c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3059c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
306035aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
306135aa7a43SJeff Garzik 				tmout / HZ, status);
3062d1adc1bbSTejun Heo 		return -EBUSY;
3063c6fd2807SJeff Garzik 	}
3064c6fd2807SJeff Garzik 
3065c6fd2807SJeff Garzik 	return 0;
3066c6fd2807SJeff Garzik }
3067c6fd2807SJeff Garzik 
3068d4b2bab4STejun Heo /**
3069d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3070d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3071d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3072d4b2bab4STejun Heo  *
3073d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3074d4b2bab4STejun Heo  *	occurs.
3075d4b2bab4STejun Heo  *
3076d4b2bab4STejun Heo  *	LOCKING:
3077d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3078d4b2bab4STejun Heo  *
3079d4b2bab4STejun Heo  *	RETURNS:
3080d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3081d4b2bab4STejun Heo  */
3082d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3083d4b2bab4STejun Heo {
3084d4b2bab4STejun Heo 	unsigned long start = jiffies;
3085d4b2bab4STejun Heo 	int warned = 0;
3086d4b2bab4STejun Heo 
3087d4b2bab4STejun Heo 	while (1) {
3088d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3089d4b2bab4STejun Heo 		unsigned long now = jiffies;
3090d4b2bab4STejun Heo 
3091d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3092d4b2bab4STejun Heo 			return 0;
3093936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3094d4b2bab4STejun Heo 			return -ENODEV;
3095d4b2bab4STejun Heo 		if (time_after(now, deadline))
3096d4b2bab4STejun Heo 			return -EBUSY;
3097d4b2bab4STejun Heo 
3098d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3099d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3100d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3101d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3102d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3103d4b2bab4STejun Heo 			warned = 1;
3104d4b2bab4STejun Heo 		}
3105d4b2bab4STejun Heo 
3106d4b2bab4STejun Heo 		msleep(50);
3107d4b2bab4STejun Heo 	}
3108d4b2bab4STejun Heo }
3109d4b2bab4STejun Heo 
3110d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3111d4b2bab4STejun Heo 			      unsigned long deadline)
3112c6fd2807SJeff Garzik {
3113c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3114c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3115c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
31169b89391cSTejun Heo 	int rc, ret = 0;
3117c6fd2807SJeff Garzik 
3118c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3119c6fd2807SJeff Garzik 	 * BSY bit to clear
3120c6fd2807SJeff Garzik 	 */
3121d4b2bab4STejun Heo 	if (dev0) {
3122d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
31239b89391cSTejun Heo 		if (rc) {
31249b89391cSTejun Heo 			if (rc != -ENODEV)
3125d4b2bab4STejun Heo 				return rc;
31269b89391cSTejun Heo 			ret = rc;
31279b89391cSTejun Heo 		}
3128d4b2bab4STejun Heo 	}
3129c6fd2807SJeff Garzik 
3130e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3131e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3132c6fd2807SJeff Garzik 	 */
3133e141d999STejun Heo 	if (dev1) {
3134e141d999STejun Heo 		int i;
3135c6fd2807SJeff Garzik 
3136c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3137e141d999STejun Heo 
3138e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3139e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3140e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3141e141d999STejun Heo 		 */
3142e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3143e141d999STejun Heo 			u8 nsect, lbal;
3144e141d999STejun Heo 
31450d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
31460d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3147c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3148c6fd2807SJeff Garzik 				break;
3149c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3150c6fd2807SJeff Garzik 		}
3151e141d999STejun Heo 
3152d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
31539b89391cSTejun Heo 		if (rc) {
31549b89391cSTejun Heo 			if (rc != -ENODEV)
3155d4b2bab4STejun Heo 				return rc;
31569b89391cSTejun Heo 			ret = rc;
31579b89391cSTejun Heo 		}
3158d4b2bab4STejun Heo 	}
3159c6fd2807SJeff Garzik 
3160c6fd2807SJeff Garzik 	/* is all this really necessary? */
3161c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3162c6fd2807SJeff Garzik 	if (dev1)
3163c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3164c6fd2807SJeff Garzik 	if (dev0)
3165c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3166d4b2bab4STejun Heo 
31679b89391cSTejun Heo 	return ret;
3168c6fd2807SJeff Garzik }
3169c6fd2807SJeff Garzik 
3170d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3171d4b2bab4STejun Heo 			     unsigned long deadline)
3172c6fd2807SJeff Garzik {
3173c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3174c6fd2807SJeff Garzik 
317544877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3176c6fd2807SJeff Garzik 
3177c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
31780d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3179c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31800d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3181c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
31820d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3183c6fd2807SJeff Garzik 
3184c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3185c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3186c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3187c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3188c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3189c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3190c6fd2807SJeff Garzik 	 * delay here as well.
3191c6fd2807SJeff Garzik 	 *
3192c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3193c6fd2807SJeff Garzik 	 */
3194c6fd2807SJeff Garzik 	msleep(150);
3195c6fd2807SJeff Garzik 
3196c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3197c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3198c6fd2807SJeff Garzik 	 * pulldown resistor.
3199c6fd2807SJeff Garzik 	 */
3200d1adc1bbSTejun Heo 	if (ata_check_status(ap) == 0xFF)
32019b89391cSTejun Heo 		return -ENODEV;
3202c6fd2807SJeff Garzik 
3203d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3204c6fd2807SJeff Garzik }
3205c6fd2807SJeff Garzik 
3206c6fd2807SJeff Garzik /**
3207c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3208c6fd2807SJeff Garzik  *	@ap: port to reset
3209c6fd2807SJeff Garzik  *
3210c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3211c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3212c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3213c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3214c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3215c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3216c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3217c6fd2807SJeff Garzik  *
3218c6fd2807SJeff Garzik  *	LOCKING:
3219c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3220cca3974eSJeff Garzik  *	Obtains host lock.
3221c6fd2807SJeff Garzik  *
3222c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3223c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3224c6fd2807SJeff Garzik  */
3225c6fd2807SJeff Garzik 
3226c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3227c6fd2807SJeff Garzik {
32289af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3229c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3230c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3231c6fd2807SJeff Garzik 	u8 err;
3232c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
32339b89391cSTejun Heo 	int rc;
3234c6fd2807SJeff Garzik 
323544877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3236c6fd2807SJeff Garzik 
3237c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3238c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3239c6fd2807SJeff Garzik 		dev0 = 1;
3240c6fd2807SJeff Garzik 	else {
3241c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3242c6fd2807SJeff Garzik 		if (slave_possible)
3243c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3244c6fd2807SJeff Garzik 	}
3245c6fd2807SJeff Garzik 
3246c6fd2807SJeff Garzik 	if (dev0)
3247c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3248c6fd2807SJeff Garzik 	if (dev1)
3249c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3250c6fd2807SJeff Garzik 
3251c6fd2807SJeff Garzik 	/* select device 0 again */
3252c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3253c6fd2807SJeff Garzik 
3254c6fd2807SJeff Garzik 	/* issue bus reset */
32559b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
32569b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
32579b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3258c6fd2807SJeff Garzik 			goto err_out;
32599b89391cSTejun Heo 	}
3260c6fd2807SJeff Garzik 
3261c6fd2807SJeff Garzik 	/*
3262c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3263c6fd2807SJeff Garzik 	 */
32643f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3265c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
32663f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3267c6fd2807SJeff Garzik 
3268c6fd2807SJeff Garzik 	/* is double-select really necessary? */
32699af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3270c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
32719af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3272c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3273c6fd2807SJeff Garzik 
3274c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
32759af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
32769af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3277c6fd2807SJeff Garzik 		goto err_out;
3278c6fd2807SJeff Garzik 
3279c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3280c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
32810d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3282c6fd2807SJeff Garzik 	}
3283c6fd2807SJeff Garzik 
3284c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3285c6fd2807SJeff Garzik 	return;
3286c6fd2807SJeff Garzik 
3287c6fd2807SJeff Garzik err_out:
3288c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3289ac8869d5SJeff Garzik 	ata_port_disable(ap);
3290c6fd2807SJeff Garzik 
3291c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3292c6fd2807SJeff Garzik }
3293c6fd2807SJeff Garzik 
3294c6fd2807SJeff Garzik /**
3295936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3296936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3297c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3298d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3299c6fd2807SJeff Garzik  *
3300936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3301c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3302c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3303d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3304d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3305c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3306c6fd2807SJeff Garzik  *
3307d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3308d4b2bab4STejun Heo  *	two is used.
3309d4b2bab4STejun Heo  *
3310c6fd2807SJeff Garzik  *	LOCKING:
3311c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3312c6fd2807SJeff Garzik  *
3313c6fd2807SJeff Garzik  *	RETURNS:
3314c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3315c6fd2807SJeff Garzik  */
3316936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3317d4b2bab4STejun Heo 		       unsigned long deadline)
3318c6fd2807SJeff Garzik {
3319c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3320d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3321d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3322c6fd2807SJeff Garzik 	u32 last, cur;
3323c6fd2807SJeff Garzik 	int rc;
3324c6fd2807SJeff Garzik 
3325d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3326d4b2bab4STejun Heo 	if (time_before(t, deadline))
3327d4b2bab4STejun Heo 		deadline = t;
3328d4b2bab4STejun Heo 
3329936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3330c6fd2807SJeff Garzik 		return rc;
3331c6fd2807SJeff Garzik 	cur &= 0xf;
3332c6fd2807SJeff Garzik 
3333c6fd2807SJeff Garzik 	last = cur;
3334c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3335c6fd2807SJeff Garzik 
3336c6fd2807SJeff Garzik 	while (1) {
3337c6fd2807SJeff Garzik 		msleep(interval_msec);
3338936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3339c6fd2807SJeff Garzik 			return rc;
3340c6fd2807SJeff Garzik 		cur &= 0xf;
3341c6fd2807SJeff Garzik 
3342c6fd2807SJeff Garzik 		/* DET stable? */
3343c6fd2807SJeff Garzik 		if (cur == last) {
3344d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3345c6fd2807SJeff Garzik 				continue;
3346c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3347c6fd2807SJeff Garzik 				return 0;
3348c6fd2807SJeff Garzik 			continue;
3349c6fd2807SJeff Garzik 		}
3350c6fd2807SJeff Garzik 
3351c6fd2807SJeff Garzik 		/* unstable, start over */
3352c6fd2807SJeff Garzik 		last = cur;
3353c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3354c6fd2807SJeff Garzik 
3355f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3356f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3357f1545154STejun Heo 		 */
3358d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3359f1545154STejun Heo 			return -EPIPE;
3360c6fd2807SJeff Garzik 	}
3361c6fd2807SJeff Garzik }
3362c6fd2807SJeff Garzik 
3363c6fd2807SJeff Garzik /**
3364936fd732STejun Heo  *	sata_link_resume - resume SATA link
3365936fd732STejun Heo  *	@link: ATA link to resume SATA
3366c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3367d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3368c6fd2807SJeff Garzik  *
3369936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3370c6fd2807SJeff Garzik  *
3371c6fd2807SJeff Garzik  *	LOCKING:
3372c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3373c6fd2807SJeff Garzik  *
3374c6fd2807SJeff Garzik  *	RETURNS:
3375c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3376c6fd2807SJeff Garzik  */
3377936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3378d4b2bab4STejun Heo 		     unsigned long deadline)
3379c6fd2807SJeff Garzik {
3380c6fd2807SJeff Garzik 	u32 scontrol;
3381c6fd2807SJeff Garzik 	int rc;
3382c6fd2807SJeff Garzik 
3383936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3384c6fd2807SJeff Garzik 		return rc;
3385c6fd2807SJeff Garzik 
3386c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3387c6fd2807SJeff Garzik 
3388936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3389c6fd2807SJeff Garzik 		return rc;
3390c6fd2807SJeff Garzik 
3391c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3392c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3393c6fd2807SJeff Garzik 	 */
3394c6fd2807SJeff Garzik 	msleep(200);
3395c6fd2807SJeff Garzik 
3396936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3397c6fd2807SJeff Garzik }
3398c6fd2807SJeff Garzik 
3399c6fd2807SJeff Garzik /**
3400c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3401cc0680a5STejun Heo  *	@link: ATA link to be reset
3402d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3403c6fd2807SJeff Garzik  *
3404cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3405b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3406b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3407b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3408b8cffc6aSTejun Heo  *	should just whine, not fail.
3409c6fd2807SJeff Garzik  *
3410c6fd2807SJeff Garzik  *	LOCKING:
3411c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3412c6fd2807SJeff Garzik  *
3413c6fd2807SJeff Garzik  *	RETURNS:
3414c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3415c6fd2807SJeff Garzik  */
3416cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3417c6fd2807SJeff Garzik {
3418cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3419936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3420c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3421c6fd2807SJeff Garzik 	int rc;
3422c6fd2807SJeff Garzik 
342331daabdaSTejun Heo 	/* handle link resume */
3424c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
34250c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3426c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3427c6fd2807SJeff Garzik 
3428c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3429c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3430c6fd2807SJeff Garzik 		return 0;
3431c6fd2807SJeff Garzik 
3432936fd732STejun Heo 	/* if SATA, resume link */
3433a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3434936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3435b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3436b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3437cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3438c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3439c6fd2807SJeff Garzik 	}
3440c6fd2807SJeff Garzik 
3441c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3442c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3443c6fd2807SJeff Garzik 	 */
34440c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3445b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
34466dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3447cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3448b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3449b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3450b8cffc6aSTejun Heo 		}
3451b8cffc6aSTejun Heo 	}
3452c6fd2807SJeff Garzik 
3453c6fd2807SJeff Garzik 	return 0;
3454c6fd2807SJeff Garzik }
3455c6fd2807SJeff Garzik 
3456c6fd2807SJeff Garzik /**
3457c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3458cc0680a5STejun Heo  *	@link: ATA link to reset
3459c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3460d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3461c6fd2807SJeff Garzik  *
3462c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3463c6fd2807SJeff Garzik  *
3464c6fd2807SJeff Garzik  *	LOCKING:
3465c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3466c6fd2807SJeff Garzik  *
3467c6fd2807SJeff Garzik  *	RETURNS:
3468c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3469c6fd2807SJeff Garzik  */
3470cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3471d4b2bab4STejun Heo 		      unsigned long deadline)
3472c6fd2807SJeff Garzik {
3473cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3474c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3475d4b2bab4STejun Heo 	unsigned int devmask = 0;
3476d4b2bab4STejun Heo 	int rc;
3477c6fd2807SJeff Garzik 	u8 err;
3478c6fd2807SJeff Garzik 
3479c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3480c6fd2807SJeff Garzik 
3481936fd732STejun Heo 	if (ata_link_offline(link)) {
3482c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3483c6fd2807SJeff Garzik 		goto out;
3484c6fd2807SJeff Garzik 	}
3485c6fd2807SJeff Garzik 
3486c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3487c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3488c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3489c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3490c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3491c6fd2807SJeff Garzik 
3492c6fd2807SJeff Garzik 	/* select device 0 again */
3493c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3494c6fd2807SJeff Garzik 
3495c6fd2807SJeff Garzik 	/* issue bus reset */
3496c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3497d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
34989b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3499936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3500cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3501d4b2bab4STejun Heo 		return rc;
3502c6fd2807SJeff Garzik 	}
3503c6fd2807SJeff Garzik 
3504c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
35053f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
35063f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3507c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
35083f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
35093f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3510c6fd2807SJeff Garzik 
3511c6fd2807SJeff Garzik  out:
3512c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3513c6fd2807SJeff Garzik 	return 0;
3514c6fd2807SJeff Garzik }
3515c6fd2807SJeff Garzik 
3516c6fd2807SJeff Garzik /**
3517cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3518cc0680a5STejun Heo  *	@link: link to reset
3519b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3520d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3521c6fd2807SJeff Garzik  *
3522cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3523c6fd2807SJeff Garzik  *
3524c6fd2807SJeff Garzik  *	LOCKING:
3525c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3526c6fd2807SJeff Garzik  *
3527c6fd2807SJeff Garzik  *	RETURNS:
3528c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3529c6fd2807SJeff Garzik  */
3530cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3531d4b2bab4STejun Heo 			unsigned long deadline)
3532c6fd2807SJeff Garzik {
3533c6fd2807SJeff Garzik 	u32 scontrol;
3534c6fd2807SJeff Garzik 	int rc;
3535c6fd2807SJeff Garzik 
3536c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3537c6fd2807SJeff Garzik 
3538936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3539c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3540c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3541c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3542c6fd2807SJeff Garzik 		 * and Sil3124.
3543c6fd2807SJeff Garzik 		 */
3544936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3545b6103f6dSTejun Heo 			goto out;
3546c6fd2807SJeff Garzik 
3547cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3548c6fd2807SJeff Garzik 
3549936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3550b6103f6dSTejun Heo 			goto out;
3551c6fd2807SJeff Garzik 
3552936fd732STejun Heo 		sata_set_spd(link);
3553c6fd2807SJeff Garzik 	}
3554c6fd2807SJeff Garzik 
3555c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3556936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3557b6103f6dSTejun Heo 		goto out;
3558c6fd2807SJeff Garzik 
3559c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3560c6fd2807SJeff Garzik 
3561936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3562b6103f6dSTejun Heo 		goto out;
3563c6fd2807SJeff Garzik 
3564c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3565c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3566c6fd2807SJeff Garzik 	 */
3567c6fd2807SJeff Garzik 	msleep(1);
3568c6fd2807SJeff Garzik 
3569936fd732STejun Heo 	/* bring link back */
3570936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3571b6103f6dSTejun Heo  out:
3572b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3573b6103f6dSTejun Heo 	return rc;
3574b6103f6dSTejun Heo }
3575b6103f6dSTejun Heo 
3576b6103f6dSTejun Heo /**
3577b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3578cc0680a5STejun Heo  *	@link: link to reset
3579b6103f6dSTejun Heo  *	@class: resulting class of attached device
3580d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3581b6103f6dSTejun Heo  *
3582b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3583b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3584b6103f6dSTejun Heo  *
3585b6103f6dSTejun Heo  *	LOCKING:
3586b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3587b6103f6dSTejun Heo  *
3588b6103f6dSTejun Heo  *	RETURNS:
3589b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3590b6103f6dSTejun Heo  */
3591cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3592d4b2bab4STejun Heo 		       unsigned long deadline)
3593b6103f6dSTejun Heo {
3594cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3595936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3596b6103f6dSTejun Heo 	int rc;
3597b6103f6dSTejun Heo 
3598b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3599b6103f6dSTejun Heo 
3600b6103f6dSTejun Heo 	/* do hardreset */
3601cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3602b6103f6dSTejun Heo 	if (rc) {
3603cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3604b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3605b6103f6dSTejun Heo 		return rc;
3606b6103f6dSTejun Heo 	}
3607c6fd2807SJeff Garzik 
3608c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3609936fd732STejun Heo 	if (ata_link_offline(link)) {
3610c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3611c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3612c6fd2807SJeff Garzik 		return 0;
3613c6fd2807SJeff Garzik 	}
3614c6fd2807SJeff Garzik 
361534fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
361634fee227STejun Heo 	msleep(150);
361734fee227STejun Heo 
3618d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
36199b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
36209b89391cSTejun Heo 	if (rc) {
3621cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3622d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3623d4b2bab4STejun Heo 		return rc;
3624c6fd2807SJeff Garzik 	}
3625c6fd2807SJeff Garzik 
3626c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3627c6fd2807SJeff Garzik 
36283f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3629c6fd2807SJeff Garzik 
3630c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3631c6fd2807SJeff Garzik 	return 0;
3632c6fd2807SJeff Garzik }
3633c6fd2807SJeff Garzik 
3634c6fd2807SJeff Garzik /**
3635c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3636cc0680a5STejun Heo  *	@link: the target ata_link
3637c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3638c6fd2807SJeff Garzik  *
3639c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3640c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3641c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3642c6fd2807SJeff Garzik  *
3643c6fd2807SJeff Garzik  *	LOCKING:
3644c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3645c6fd2807SJeff Garzik  */
3646cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3647c6fd2807SJeff Garzik {
3648cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3649c6fd2807SJeff Garzik 	u32 serror;
3650c6fd2807SJeff Garzik 
3651c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3652c6fd2807SJeff Garzik 
3653c6fd2807SJeff Garzik 	/* print link status */
3654936fd732STejun Heo 	sata_print_link_status(link);
3655c6fd2807SJeff Garzik 
3656c6fd2807SJeff Garzik 	/* clear SError */
3657936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3658936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3659c6fd2807SJeff Garzik 
3660c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3661c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3662c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3663c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3664c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3665c6fd2807SJeff Garzik 
3666c6fd2807SJeff Garzik 	/* bail out if no device is present */
3667c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3668c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3669c6fd2807SJeff Garzik 		return;
3670c6fd2807SJeff Garzik 	}
3671c6fd2807SJeff Garzik 
3672c6fd2807SJeff Garzik 	/* set up device control */
36730d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
36740d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3675c6fd2807SJeff Garzik 
3676c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3677c6fd2807SJeff Garzik }
3678c6fd2807SJeff Garzik 
3679c6fd2807SJeff Garzik /**
3680c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3681c6fd2807SJeff Garzik  *	@dev: device to compare against
3682c6fd2807SJeff Garzik  *	@new_class: class of the new device
3683c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3684c6fd2807SJeff Garzik  *
3685c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3686c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3687c6fd2807SJeff Garzik  *	@new_id.
3688c6fd2807SJeff Garzik  *
3689c6fd2807SJeff Garzik  *	LOCKING:
3690c6fd2807SJeff Garzik  *	None.
3691c6fd2807SJeff Garzik  *
3692c6fd2807SJeff Garzik  *	RETURNS:
3693c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3694c6fd2807SJeff Garzik  */
3695c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3696c6fd2807SJeff Garzik 			       const u16 *new_id)
3697c6fd2807SJeff Garzik {
3698c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3699a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3700a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3701c6fd2807SJeff Garzik 
3702c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3703c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3704c6fd2807SJeff Garzik 			       dev->class, new_class);
3705c6fd2807SJeff Garzik 		return 0;
3706c6fd2807SJeff Garzik 	}
3707c6fd2807SJeff Garzik 
3708a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3709a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3710a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3711a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3712c6fd2807SJeff Garzik 
3713c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3714c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3715c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3716c6fd2807SJeff Garzik 		return 0;
3717c6fd2807SJeff Garzik 	}
3718c6fd2807SJeff Garzik 
3719c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3720c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3721c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3722c6fd2807SJeff Garzik 		return 0;
3723c6fd2807SJeff Garzik 	}
3724c6fd2807SJeff Garzik 
3725c6fd2807SJeff Garzik 	return 1;
3726c6fd2807SJeff Garzik }
3727c6fd2807SJeff Garzik 
3728c6fd2807SJeff Garzik /**
3729fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
37303fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3731bff04647STejun Heo  *	@readid_flags: read ID flags
3732c6fd2807SJeff Garzik  *
3733c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3734c6fd2807SJeff Garzik  *	the port.
3735c6fd2807SJeff Garzik  *
3736c6fd2807SJeff Garzik  *	LOCKING:
3737c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3738c6fd2807SJeff Garzik  *
3739c6fd2807SJeff Garzik  *	RETURNS:
3740c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3741c6fd2807SJeff Garzik  */
3742fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3743c6fd2807SJeff Garzik {
3744c6fd2807SJeff Garzik 	unsigned int class = dev->class;
37459af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
3746c6fd2807SJeff Garzik 	int rc;
3747c6fd2807SJeff Garzik 
3748c6fd2807SJeff Garzik 	/* read ID data */
3749bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3750c6fd2807SJeff Garzik 	if (rc)
3751fe30911bSTejun Heo 		return rc;
3752c6fd2807SJeff Garzik 
3753c6fd2807SJeff Garzik 	/* is the device still there? */
3754fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3755fe30911bSTejun Heo 		return -ENODEV;
3756c6fd2807SJeff Garzik 
3757c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3758fe30911bSTejun Heo 	return 0;
3759fe30911bSTejun Heo }
3760fe30911bSTejun Heo 
3761fe30911bSTejun Heo /**
3762fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3763fe30911bSTejun Heo  *	@dev: device to revalidate
3764422c9daaSTejun Heo  *	@new_class: new class code
3765fe30911bSTejun Heo  *	@readid_flags: read ID flags
3766fe30911bSTejun Heo  *
3767fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3768fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3769fe30911bSTejun Heo  *
3770fe30911bSTejun Heo  *	LOCKING:
3771fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3772fe30911bSTejun Heo  *
3773fe30911bSTejun Heo  *	RETURNS:
3774fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3775fe30911bSTejun Heo  */
3776422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3777422c9daaSTejun Heo 		       unsigned int readid_flags)
3778fe30911bSTejun Heo {
37796ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3780fe30911bSTejun Heo 	int rc;
3781fe30911bSTejun Heo 
3782fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3783fe30911bSTejun Heo 		return -ENODEV;
3784fe30911bSTejun Heo 
3785422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3786422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
3787422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3788422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3789422c9daaSTejun Heo 			       dev->class, new_class);
3790422c9daaSTejun Heo 		rc = -ENODEV;
3791422c9daaSTejun Heo 		goto fail;
3792422c9daaSTejun Heo 	}
3793422c9daaSTejun Heo 
3794fe30911bSTejun Heo 	/* re-read ID */
3795fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3796fe30911bSTejun Heo 	if (rc)
3797fe30911bSTejun Heo 		goto fail;
3798c6fd2807SJeff Garzik 
3799c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3800efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
38016ddcd3b0STejun Heo 	if (rc)
38026ddcd3b0STejun Heo 		goto fail;
38036ddcd3b0STejun Heo 
38046ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
3805b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3806b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
38076ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
38086ddcd3b0STejun Heo 			       "%llu != %llu\n",
38096ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
38106ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
38118270bec4STejun Heo 
38128270bec4STejun Heo 		/* restore original n_sectors */
38138270bec4STejun Heo 		dev->n_sectors = n_sectors;
38148270bec4STejun Heo 
38156ddcd3b0STejun Heo 		rc = -ENODEV;
38166ddcd3b0STejun Heo 		goto fail;
38176ddcd3b0STejun Heo 	}
38186ddcd3b0STejun Heo 
3819c6fd2807SJeff Garzik 	return 0;
3820c6fd2807SJeff Garzik 
3821c6fd2807SJeff Garzik  fail:
3822c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3823c6fd2807SJeff Garzik 	return rc;
3824c6fd2807SJeff Garzik }
3825c6fd2807SJeff Garzik 
38266919a0a6SAlan Cox struct ata_blacklist_entry {
38276919a0a6SAlan Cox 	const char *model_num;
38286919a0a6SAlan Cox 	const char *model_rev;
38296919a0a6SAlan Cox 	unsigned long horkage;
38306919a0a6SAlan Cox };
38316919a0a6SAlan Cox 
38326919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
38336919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
38346919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
38356919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
38366919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
38376919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
38386919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
38396919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
38406919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
38416919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
38426919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
38436919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
38446919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
38456919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
38466919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
38476919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
38486919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
38496919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
38506919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
38516919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
38526919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
38536919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
38546919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
38556919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
38566919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
38576919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
38586919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
38596919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
38606919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
38616919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
38626919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
386339f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
38645acd50f6STejun Heo 	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
386539ce7128STejun Heo 	{ "IOMEGA  ZIP 250       ATAPI       Floppy",
386639ce7128STejun Heo 				NULL,		ATA_HORKAGE_NODMA },
38676919a0a6SAlan Cox 
386818d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
386940a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
387018d6e9d5SAlbert Lee 
38716919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
38726919a0a6SAlan Cox 
38736919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
38746919a0a6SAlan Cox 	/* NCQ is slow */
38756919a0a6SAlan Cox         { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
387609125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
387709125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
38787acfaf30SPaul Rolland 	/* NCQ is broken */
3879539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
38800e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
38812f8d90abSPrarit Bhargava 	{ "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
38822f8d90abSPrarit Bhargava 	  ATA_HORKAGE_NONCQ },
3883539cc7c7SJeff Garzik 
388436e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
388536e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
388636e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
388736e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
388836e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3889bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
3890bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
38912f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
3892e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
38932f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3894a520f261STejun Heo 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
38953fb6589cSTejun Heo 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
38960e3dbc01SAlan Cox 	{ "ST3160812AS",	"3.AD",		ATA_HORKAGE_NONCQ, },
38975d6aca8dSTejun Heo 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
38986919a0a6SAlan Cox 
389916c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
390016c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
390116c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
390216c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
390316c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
39046919a0a6SAlan Cox 
39056919a0a6SAlan Cox 	/* End Marker */
39066919a0a6SAlan Cox 	{ }
3907c6fd2807SJeff Garzik };
3908c6fd2807SJeff Garzik 
3909539cc7c7SJeff Garzik int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3910539cc7c7SJeff Garzik {
3911539cc7c7SJeff Garzik 	const char *p;
3912539cc7c7SJeff Garzik 	int len;
3913539cc7c7SJeff Garzik 
3914539cc7c7SJeff Garzik 	/*
3915539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
3916539cc7c7SJeff Garzik 	 */
3917539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
3918539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
3919539cc7c7SJeff Garzik 		len = p - patt;
3920539cc7c7SJeff Garzik 	else
3921539cc7c7SJeff Garzik 		len = strlen(name);
3922539cc7c7SJeff Garzik 
3923539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
3924539cc7c7SJeff Garzik }
3925539cc7c7SJeff Garzik 
392675683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3927c6fd2807SJeff Garzik {
39288bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
39298bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
39306919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3931c6fd2807SJeff Garzik 
39328bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
39338bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3934c6fd2807SJeff Garzik 
39356919a0a6SAlan Cox 	while (ad->model_num) {
3936539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
39376919a0a6SAlan Cox 			if (ad->model_rev == NULL)
39386919a0a6SAlan Cox 				return ad->horkage;
3939539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
39406919a0a6SAlan Cox 				return ad->horkage;
3941c6fd2807SJeff Garzik 		}
39426919a0a6SAlan Cox 		ad++;
3943c6fd2807SJeff Garzik 	}
3944c6fd2807SJeff Garzik 	return 0;
3945c6fd2807SJeff Garzik }
3946c6fd2807SJeff Garzik 
39476919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
39486919a0a6SAlan Cox {
39496919a0a6SAlan Cox 	/* We don't support polling DMA.
39506919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
39516919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
39526919a0a6SAlan Cox 	 */
39539af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
39546919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
39556919a0a6SAlan Cox 		return 1;
395675683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
39576919a0a6SAlan Cox }
39586919a0a6SAlan Cox 
3959c6fd2807SJeff Garzik /**
3960c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
3961c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
3962c6fd2807SJeff Garzik  *
3963c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
3964c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
3965c6fd2807SJeff Garzik  *	known limits including host controller limits, device
3966c6fd2807SJeff Garzik  *	blacklist, etc...
3967c6fd2807SJeff Garzik  *
3968c6fd2807SJeff Garzik  *	LOCKING:
3969c6fd2807SJeff Garzik  *	None.
3970c6fd2807SJeff Garzik  */
3971c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
3972c6fd2807SJeff Garzik {
39739af5c9c9STejun Heo 	struct ata_link *link = dev->link;
39749af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
3975cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
3976c6fd2807SJeff Garzik 	unsigned long xfer_mask;
3977c6fd2807SJeff Garzik 
3978c6fd2807SJeff Garzik 	/* controller modes available */
3979c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3980c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
3981c6fd2807SJeff Garzik 
39828343f889SRobert Hancock 	/* drive modes available */
3983c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3984c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
3985c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
3986c6fd2807SJeff Garzik 
3987b352e57dSAlan Cox 	/*
3988b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3989b352e57dSAlan Cox 	 *	cable
3990b352e57dSAlan Cox 	 */
3991b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
3992b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
3993b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3994b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
3995b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3996b352e57dSAlan Cox 	}
3997b352e57dSAlan Cox 
3998c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
3999c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4000c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4001c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4002c6fd2807SJeff Garzik 	}
4003c6fd2807SJeff Garzik 
400414d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
400514d66ab7SPetr Vandrovec             host->simplex_claimed && host->simplex_claimed != ap) {
4006c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4007c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4008c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4009c6fd2807SJeff Garzik 	}
4010c6fd2807SJeff Garzik 
4011e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4012e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4013e424675fSJeff Garzik 
4014c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4015a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4016c6fd2807SJeff Garzik 
40178343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
40188343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
40198343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
40208343f889SRobert Hancock 	 * solely limited by the cable.
40218343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
40228343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
40238343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
40248343f889SRobert Hancock 	 */
40258343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
40268343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
40278343f889SRobert Hancock 		if((ap->cbl == ATA_CBL_PATA40) ||
40288343f889SRobert Hancock    		    (ata_drive_40wire(dev->id) &&
40298343f889SRobert Hancock 		     (ap->cbl == ATA_CBL_PATA_UNK ||
40308343f889SRobert Hancock                      ap->cbl == ATA_CBL_PATA80))) {
40318343f889SRobert Hancock 		      	ata_dev_printk(dev, KERN_WARNING,
40328343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
40338343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
40348343f889SRobert Hancock 		}
40358343f889SRobert Hancock 
4036c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4037c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4038c6fd2807SJeff Garzik }
4039c6fd2807SJeff Garzik 
4040c6fd2807SJeff Garzik /**
4041c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4042c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4043c6fd2807SJeff Garzik  *
4044c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4045c6fd2807SJeff Garzik  *	on port @ap.
4046c6fd2807SJeff Garzik  *
4047c6fd2807SJeff Garzik  *	LOCKING:
4048c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4049c6fd2807SJeff Garzik  *
4050c6fd2807SJeff Garzik  *	RETURNS:
4051c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4052c6fd2807SJeff Garzik  */
4053c6fd2807SJeff Garzik 
4054c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4055c6fd2807SJeff Garzik {
4056c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4057c6fd2807SJeff Garzik 	unsigned int err_mask;
4058c6fd2807SJeff Garzik 
4059c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4060c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4061c6fd2807SJeff Garzik 
4062464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4063464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4064464cf177STejun Heo 	 */
4065c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4066c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4067c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4068464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4069c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4070c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
4071c6fd2807SJeff Garzik 
4072c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4073c6fd2807SJeff Garzik 
4074c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4075c6fd2807SJeff Garzik 	return err_mask;
4076c6fd2807SJeff Garzik }
4077c6fd2807SJeff Garzik 
4078c6fd2807SJeff Garzik /**
40799f45cbd3SKristen Carlson Accardi  *	ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
40809f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
40819f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
40829f45cbd3SKristen Carlson Accardi  *
40839f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
40849f45cbd3SKristen Carlson Accardi  *	on port @ap with sector count set to indicate Asynchronous
40859f45cbd3SKristen Carlson Accardi  *	Notification feature
40869f45cbd3SKristen Carlson Accardi  *
40879f45cbd3SKristen Carlson Accardi  *	LOCKING:
40889f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
40899f45cbd3SKristen Carlson Accardi  *
40909f45cbd3SKristen Carlson Accardi  *	RETURNS:
40919f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
40929f45cbd3SKristen Carlson Accardi  */
40939f45cbd3SKristen Carlson Accardi static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
40949f45cbd3SKristen Carlson Accardi {
40959f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
40969f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
40979f45cbd3SKristen Carlson Accardi 
40989f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
40999f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
41009f45cbd3SKristen Carlson Accardi 
41019f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
41029f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
41039f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
41049f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
41059f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
41069f45cbd3SKristen Carlson Accardi 	tf.nsect = SATA_AN;
41079f45cbd3SKristen Carlson Accardi 
41089f45cbd3SKristen Carlson Accardi 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
41099f45cbd3SKristen Carlson Accardi 
41109f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
41119f45cbd3SKristen Carlson Accardi 	return err_mask;
41129f45cbd3SKristen Carlson Accardi }
41139f45cbd3SKristen Carlson Accardi 
41149f45cbd3SKristen Carlson Accardi /**
4115c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4116c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4117c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4118c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4119c6fd2807SJeff Garzik  *
4120c6fd2807SJeff Garzik  *	LOCKING:
4121c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4122c6fd2807SJeff Garzik  *
4123c6fd2807SJeff Garzik  *	RETURNS:
4124c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4125c6fd2807SJeff Garzik  */
4126c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4127c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4128c6fd2807SJeff Garzik {
4129c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4130c6fd2807SJeff Garzik 	unsigned int err_mask;
4131c6fd2807SJeff Garzik 
4132c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4133c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4134c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4135c6fd2807SJeff Garzik 
4136c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4137c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4138c6fd2807SJeff Garzik 
4139c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4140c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4141c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4142c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4143c6fd2807SJeff Garzik 	tf.nsect = sectors;
4144c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4145c6fd2807SJeff Garzik 
4146c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
414718b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
414818b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
414918b2466cSAlan Cox 	   drive reported working geometry */
415018b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
415118b2466cSAlan Cox 		err_mask = 0;
4152c6fd2807SJeff Garzik 
4153c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4154c6fd2807SJeff Garzik 	return err_mask;
4155c6fd2807SJeff Garzik }
4156c6fd2807SJeff Garzik 
4157c6fd2807SJeff Garzik /**
4158c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4159c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4160c6fd2807SJeff Garzik  *
4161c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4162c6fd2807SJeff Garzik  *
4163c6fd2807SJeff Garzik  *	LOCKING:
4164cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4165c6fd2807SJeff Garzik  */
416670e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4167c6fd2807SJeff Garzik {
4168c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4169c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4170c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4171c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4172c6fd2807SJeff Garzik 
4173c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4174c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4175c6fd2807SJeff Garzik 
4176c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4177c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4178c6fd2807SJeff Garzik 
4179c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4180c6fd2807SJeff Garzik 
4181c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4182c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4183c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4184c6fd2807SJeff Garzik 	 */
4185c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4186c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4187c6fd2807SJeff Garzik 
4188c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4189c6fd2807SJeff Garzik 		if (qc->n_elem)
4190c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4191c6fd2807SJeff Garzik 		/* restore last sg */
4192c6fd2807SJeff Garzik 		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4193c6fd2807SJeff Garzik 		if (pad_buf) {
4194c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
4195c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4196c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4197c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4198c6fd2807SJeff Garzik 		}
4199c6fd2807SJeff Garzik 	} else {
4200c6fd2807SJeff Garzik 		if (qc->n_elem)
4201c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4202c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4203c6fd2807SJeff Garzik 				dir);
4204c6fd2807SJeff Garzik 		/* restore sg */
4205c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4206c6fd2807SJeff Garzik 		if (pad_buf)
4207c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4208c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4209c6fd2807SJeff Garzik 	}
4210c6fd2807SJeff Garzik 
4211c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4212c6fd2807SJeff Garzik 	qc->__sg = NULL;
4213c6fd2807SJeff Garzik }
4214c6fd2807SJeff Garzik 
4215c6fd2807SJeff Garzik /**
4216c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4217c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4218c6fd2807SJeff Garzik  *
4219c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4220c6fd2807SJeff Garzik  *	associated with the current disk command.
4221c6fd2807SJeff Garzik  *
4222c6fd2807SJeff Garzik  *	LOCKING:
4223cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4224c6fd2807SJeff Garzik  *
4225c6fd2807SJeff Garzik  */
4226c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4227c6fd2807SJeff Garzik {
4228c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4229c6fd2807SJeff Garzik 	struct scatterlist *sg;
4230c6fd2807SJeff Garzik 	unsigned int idx;
4231c6fd2807SJeff Garzik 
4232c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4233c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4234c6fd2807SJeff Garzik 
4235c6fd2807SJeff Garzik 	idx = 0;
4236c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4237c6fd2807SJeff Garzik 		u32 addr, offset;
4238c6fd2807SJeff Garzik 		u32 sg_len, len;
4239c6fd2807SJeff Garzik 
4240c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4241c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4242c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4243c6fd2807SJeff Garzik 		 */
4244c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4245c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4246c6fd2807SJeff Garzik 
4247c6fd2807SJeff Garzik 		while (sg_len) {
4248c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4249c6fd2807SJeff Garzik 			len = sg_len;
4250c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4251c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4252c6fd2807SJeff Garzik 
4253c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4254c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4255c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4256c6fd2807SJeff Garzik 
4257c6fd2807SJeff Garzik 			idx++;
4258c6fd2807SJeff Garzik 			sg_len -= len;
4259c6fd2807SJeff Garzik 			addr += len;
4260c6fd2807SJeff Garzik 		}
4261c6fd2807SJeff Garzik 	}
4262c6fd2807SJeff Garzik 
4263c6fd2807SJeff Garzik 	if (idx)
4264c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4265c6fd2807SJeff Garzik }
4266b9a4197eSTejun Heo 
4267c6fd2807SJeff Garzik /**
4268d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4269d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4270d26fc955SAlan Cox  *
4271d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4272d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4273d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4274d26fc955SAlan Cox  *	controllers that don't follow the spec.
4275d26fc955SAlan Cox  *
4276d26fc955SAlan Cox  *	LOCKING:
4277d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4278d26fc955SAlan Cox  *
4279d26fc955SAlan Cox  */
4280d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4281d26fc955SAlan Cox {
4282d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4283d26fc955SAlan Cox 	struct scatterlist *sg;
4284d26fc955SAlan Cox 	unsigned int idx;
4285d26fc955SAlan Cox 
4286d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4287d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4288d26fc955SAlan Cox 
4289d26fc955SAlan Cox 	idx = 0;
4290d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4291d26fc955SAlan Cox 		u32 addr, offset;
4292d26fc955SAlan Cox 		u32 sg_len, len, blen;
4293d26fc955SAlan Cox 
4294d26fc955SAlan Cox  		/* determine if physical DMA addr spans 64K boundary.
4295d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4296d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4297d26fc955SAlan Cox 		 */
4298d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4299d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4300d26fc955SAlan Cox 
4301d26fc955SAlan Cox 		while (sg_len) {
4302d26fc955SAlan Cox 			offset = addr & 0xffff;
4303d26fc955SAlan Cox 			len = sg_len;
4304d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4305d26fc955SAlan Cox 				len = 0x10000 - offset;
4306d26fc955SAlan Cox 
4307d26fc955SAlan Cox 			blen = len & 0xffff;
4308d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4309d26fc955SAlan Cox 			if (blen == 0) {
4310d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4311d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4312d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4313d26fc955SAlan Cox 				blen = 0x8000;
4314d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4315d26fc955SAlan Cox 			}
4316d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4317d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4318d26fc955SAlan Cox 
4319d26fc955SAlan Cox 			idx++;
4320d26fc955SAlan Cox 			sg_len -= len;
4321d26fc955SAlan Cox 			addr += len;
4322d26fc955SAlan Cox 		}
4323d26fc955SAlan Cox 	}
4324d26fc955SAlan Cox 
4325d26fc955SAlan Cox 	if (idx)
4326d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4327d26fc955SAlan Cox }
4328d26fc955SAlan Cox 
4329d26fc955SAlan Cox /**
4330c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4331c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4332c6fd2807SJeff Garzik  *
4333c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4334c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4335c6fd2807SJeff Garzik  *	supplied PACKET command.
4336c6fd2807SJeff Garzik  *
4337c6fd2807SJeff Garzik  *	LOCKING:
4338cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4339c6fd2807SJeff Garzik  *
4340c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4341c6fd2807SJeff Garzik  *               nonzero otherwise
4342c6fd2807SJeff Garzik  */
4343c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4344c6fd2807SJeff Garzik {
4345c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4346c6fd2807SJeff Garzik 
4347b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4348b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4349b9a4197eSTejun Heo 	 */
4350b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
43516f23a31dSAlbert Lee 		return 1;
43526f23a31dSAlbert Lee 
4353c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4354b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4355c6fd2807SJeff Garzik 
4356b9a4197eSTejun Heo 	return 0;
4357c6fd2807SJeff Garzik }
4358b9a4197eSTejun Heo 
4359c6fd2807SJeff Garzik /**
436031cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
436131cc23b3STejun Heo  *	@qc: ATA command in question
436231cc23b3STejun Heo  *
436331cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
436431cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
436531cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
436631cc23b3STejun Heo  *	whether a new command @qc can be issued.
436731cc23b3STejun Heo  *
436831cc23b3STejun Heo  *	LOCKING:
436931cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
437031cc23b3STejun Heo  *
437131cc23b3STejun Heo  *	RETURNS:
437231cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
437331cc23b3STejun Heo  */
437431cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
437531cc23b3STejun Heo {
437631cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
437731cc23b3STejun Heo 
437831cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
437931cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
438031cc23b3STejun Heo 			return 0;
438131cc23b3STejun Heo 	} else {
438231cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
438331cc23b3STejun Heo 			return 0;
438431cc23b3STejun Heo 	}
438531cc23b3STejun Heo 
438631cc23b3STejun Heo 	return ATA_DEFER_LINK;
438731cc23b3STejun Heo }
438831cc23b3STejun Heo 
438931cc23b3STejun Heo /**
4390c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4391c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4392c6fd2807SJeff Garzik  *
4393c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4394c6fd2807SJeff Garzik  *
4395c6fd2807SJeff Garzik  *	LOCKING:
4396cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4397c6fd2807SJeff Garzik  */
4398c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4399c6fd2807SJeff Garzik {
4400c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4401c6fd2807SJeff Garzik 		return;
4402c6fd2807SJeff Garzik 
4403c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4404c6fd2807SJeff Garzik }
4405c6fd2807SJeff Garzik 
4406d26fc955SAlan Cox /**
4407d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4408d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4409d26fc955SAlan Cox  *
4410d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4411d26fc955SAlan Cox  *
4412d26fc955SAlan Cox  *	LOCKING:
4413d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4414d26fc955SAlan Cox  */
4415d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4416d26fc955SAlan Cox {
4417d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4418d26fc955SAlan Cox 		return;
4419d26fc955SAlan Cox 
4420d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4421d26fc955SAlan Cox }
4422d26fc955SAlan Cox 
4423c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4424c6fd2807SJeff Garzik 
4425c6fd2807SJeff Garzik /**
4426c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4427c6fd2807SJeff Garzik  *	@qc: Command to be associated
4428c6fd2807SJeff Garzik  *	@buf: Memory buffer
4429c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4430c6fd2807SJeff Garzik  *
4431c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4432c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4433c6fd2807SJeff Garzik  *
4434c6fd2807SJeff Garzik  *	LOCKING:
4435cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4436c6fd2807SJeff Garzik  */
4437c6fd2807SJeff Garzik 
4438c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4439c6fd2807SJeff Garzik {
4440c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4441c6fd2807SJeff Garzik 
4442c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4443c6fd2807SJeff Garzik 	qc->n_elem = 1;
4444c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4445c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4446c6fd2807SJeff Garzik 	qc->nbytes = buflen;
4447c6fd2807SJeff Garzik 
444861c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4449c6fd2807SJeff Garzik }
4450c6fd2807SJeff Garzik 
4451c6fd2807SJeff Garzik /**
4452c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4453c6fd2807SJeff Garzik  *	@qc: Command to be associated
4454c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4455c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4456c6fd2807SJeff Garzik  *
4457c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4458c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4459c6fd2807SJeff Garzik  *	elements.
4460c6fd2807SJeff Garzik  *
4461c6fd2807SJeff Garzik  *	LOCKING:
4462cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4463c6fd2807SJeff Garzik  */
4464c6fd2807SJeff Garzik 
4465c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4466c6fd2807SJeff Garzik 		 unsigned int n_elem)
4467c6fd2807SJeff Garzik {
4468c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4469c6fd2807SJeff Garzik 	qc->__sg = sg;
4470c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4471c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
4472c6fd2807SJeff Garzik }
4473c6fd2807SJeff Garzik 
4474c6fd2807SJeff Garzik /**
4475c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4476c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4477c6fd2807SJeff Garzik  *
4478c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4479c6fd2807SJeff Garzik  *
4480c6fd2807SJeff Garzik  *	LOCKING:
4481cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4482c6fd2807SJeff Garzik  *
4483c6fd2807SJeff Garzik  *	RETURNS:
4484c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4485c6fd2807SJeff Garzik  */
4486c6fd2807SJeff Garzik 
4487c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4488c6fd2807SJeff Garzik {
4489c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4490c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4491c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4492c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4493c6fd2807SJeff Garzik 	int trim_sg = 0;
4494c6fd2807SJeff Garzik 
4495c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4496c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4497c6fd2807SJeff Garzik 	if (qc->pad_len) {
4498c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4499c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4500c6fd2807SJeff Garzik 
4501c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4502c6fd2807SJeff Garzik 
4503c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4504c6fd2807SJeff Garzik 
4505c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4506c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4507c6fd2807SJeff Garzik 			       qc->pad_len);
4508c6fd2807SJeff Garzik 
4509c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4510c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4511c6fd2807SJeff Garzik 		/* trim sg */
4512c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4513c6fd2807SJeff Garzik 		if (sg->length == 0)
4514c6fd2807SJeff Garzik 			trim_sg = 1;
4515c6fd2807SJeff Garzik 
4516c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4517c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4518c6fd2807SJeff Garzik 	}
4519c6fd2807SJeff Garzik 
4520c6fd2807SJeff Garzik 	if (trim_sg) {
4521c6fd2807SJeff Garzik 		qc->n_elem--;
4522c6fd2807SJeff Garzik 		goto skip_map;
4523c6fd2807SJeff Garzik 	}
4524c6fd2807SJeff Garzik 
4525c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4526c6fd2807SJeff Garzik 				     sg->length, dir);
4527c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4528c6fd2807SJeff Garzik 		/* restore sg */
4529c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4530c6fd2807SJeff Garzik 		return -1;
4531c6fd2807SJeff Garzik 	}
4532c6fd2807SJeff Garzik 
4533c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4534c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4535c6fd2807SJeff Garzik 
4536c6fd2807SJeff Garzik skip_map:
4537c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4538c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4539c6fd2807SJeff Garzik 
4540c6fd2807SJeff Garzik 	return 0;
4541c6fd2807SJeff Garzik }
4542c6fd2807SJeff Garzik 
4543c6fd2807SJeff Garzik /**
4544c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4545c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4546c6fd2807SJeff Garzik  *
4547c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4548c6fd2807SJeff Garzik  *
4549c6fd2807SJeff Garzik  *	LOCKING:
4550cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4551c6fd2807SJeff Garzik  *
4552c6fd2807SJeff Garzik  *	RETURNS:
4553c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4554c6fd2807SJeff Garzik  *
4555c6fd2807SJeff Garzik  */
4556c6fd2807SJeff Garzik 
4557c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4558c6fd2807SJeff Garzik {
4559c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4560c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4561c6fd2807SJeff Garzik 	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4562c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4563c6fd2807SJeff Garzik 
456444877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4565c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4566c6fd2807SJeff Garzik 
4567c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4568c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4569c6fd2807SJeff Garzik 	if (qc->pad_len) {
4570c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4571c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4572c6fd2807SJeff Garzik 		unsigned int offset;
4573c6fd2807SJeff Garzik 
4574c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4575c6fd2807SJeff Garzik 
4576c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4577c6fd2807SJeff Garzik 
4578c6fd2807SJeff Garzik 		/*
4579c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4580c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4581c6fd2807SJeff Garzik 		 */
4582c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4583c6fd2807SJeff Garzik 		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4584c6fd2807SJeff Garzik 		psg->offset = offset_in_page(offset);
4585c6fd2807SJeff Garzik 
4586c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4587c6fd2807SJeff Garzik 			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4588c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4589c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4590c6fd2807SJeff Garzik 		}
4591c6fd2807SJeff Garzik 
4592c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4593c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4594c6fd2807SJeff Garzik 		/* trim last sg */
4595c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4596c6fd2807SJeff Garzik 		if (lsg->length == 0)
4597c6fd2807SJeff Garzik 			trim_sg = 1;
4598c6fd2807SJeff Garzik 
4599c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4600c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4601c6fd2807SJeff Garzik 	}
4602c6fd2807SJeff Garzik 
4603c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4604c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4605c6fd2807SJeff Garzik 		pre_n_elem--;
4606c6fd2807SJeff Garzik 
4607c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4608c6fd2807SJeff Garzik 		n_elem = 0;
4609c6fd2807SJeff Garzik 		goto skip_map;
4610c6fd2807SJeff Garzik 	}
4611c6fd2807SJeff Garzik 
4612c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4613c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4614c6fd2807SJeff Garzik 	if (n_elem < 1) {
4615c6fd2807SJeff Garzik 		/* restore last sg */
4616c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4617c6fd2807SJeff Garzik 		return -1;
4618c6fd2807SJeff Garzik 	}
4619c6fd2807SJeff Garzik 
4620c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4621c6fd2807SJeff Garzik 
4622c6fd2807SJeff Garzik skip_map:
4623c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4624c6fd2807SJeff Garzik 
4625c6fd2807SJeff Garzik 	return 0;
4626c6fd2807SJeff Garzik }
4627c6fd2807SJeff Garzik 
4628c6fd2807SJeff Garzik /**
4629c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4630c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4631c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4632c6fd2807SJeff Garzik  *
4633c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4634c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4635c6fd2807SJeff Garzik  *	vice-versa.
4636c6fd2807SJeff Garzik  *
4637c6fd2807SJeff Garzik  *	LOCKING:
4638c6fd2807SJeff Garzik  *	Inherited from caller.
4639c6fd2807SJeff Garzik  */
4640c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4641c6fd2807SJeff Garzik {
4642c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4643c6fd2807SJeff Garzik 	unsigned int i;
4644c6fd2807SJeff Garzik 
4645c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4646c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4647c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4648c6fd2807SJeff Garzik }
4649c6fd2807SJeff Garzik 
4650c6fd2807SJeff Garzik /**
46510d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4652c6fd2807SJeff Garzik  *	@adev: device to target
4653c6fd2807SJeff Garzik  *	@buf: data buffer
4654c6fd2807SJeff Garzik  *	@buflen: buffer length
4655c6fd2807SJeff Garzik  *	@write_data: read/write
4656c6fd2807SJeff Garzik  *
4657c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4658c6fd2807SJeff Garzik  *
4659c6fd2807SJeff Garzik  *	LOCKING:
4660c6fd2807SJeff Garzik  *	Inherited from caller.
4661c6fd2807SJeff Garzik  */
46620d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4663c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4664c6fd2807SJeff Garzik {
46659af5c9c9STejun Heo 	struct ata_port *ap = adev->link->ap;
4666c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4667c6fd2807SJeff Garzik 
4668c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4669c6fd2807SJeff Garzik 	if (write_data)
46700d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4671c6fd2807SJeff Garzik 	else
46720d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4673c6fd2807SJeff Garzik 
4674c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4675c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4676c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4677c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4678c6fd2807SJeff Garzik 
4679c6fd2807SJeff Garzik 		if (write_data) {
4680c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
46810d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4682c6fd2807SJeff Garzik 		} else {
46830d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4684c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4685c6fd2807SJeff Garzik 		}
4686c6fd2807SJeff Garzik 	}
4687c6fd2807SJeff Garzik }
4688c6fd2807SJeff Garzik 
4689c6fd2807SJeff Garzik /**
46900d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4691c6fd2807SJeff Garzik  *	@adev: device to target
4692c6fd2807SJeff Garzik  *	@buf: data buffer
4693c6fd2807SJeff Garzik  *	@buflen: buffer length
4694c6fd2807SJeff Garzik  *	@write_data: read/write
4695c6fd2807SJeff Garzik  *
4696c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4697c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4698c6fd2807SJeff Garzik  *
4699c6fd2807SJeff Garzik  *	LOCKING:
4700c6fd2807SJeff Garzik  *	Inherited from caller.
4701c6fd2807SJeff Garzik  */
47020d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4703c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4704c6fd2807SJeff Garzik {
4705c6fd2807SJeff Garzik 	unsigned long flags;
4706c6fd2807SJeff Garzik 	local_irq_save(flags);
47070d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4708c6fd2807SJeff Garzik 	local_irq_restore(flags);
4709c6fd2807SJeff Garzik }
4710c6fd2807SJeff Garzik 
4711c6fd2807SJeff Garzik 
4712c6fd2807SJeff Garzik /**
47135a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4714c6fd2807SJeff Garzik  *	@qc: Command on going
4715c6fd2807SJeff Garzik  *
47165a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4717c6fd2807SJeff Garzik  *
4718c6fd2807SJeff Garzik  *	LOCKING:
4719c6fd2807SJeff Garzik  *	Inherited from caller.
4720c6fd2807SJeff Garzik  */
4721c6fd2807SJeff Garzik 
4722c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4723c6fd2807SJeff Garzik {
4724c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4725c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4726c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4727c6fd2807SJeff Garzik 	struct page *page;
4728c6fd2807SJeff Garzik 	unsigned int offset;
4729c6fd2807SJeff Garzik 	unsigned char *buf;
4730c6fd2807SJeff Garzik 
47315a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4732c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4733c6fd2807SJeff Garzik 
4734c6fd2807SJeff Garzik 	page = sg[qc->cursg].page;
4735726f0785STejun Heo 	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4736c6fd2807SJeff Garzik 
4737c6fd2807SJeff Garzik 	/* get the current page and offset */
4738c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4739c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4740c6fd2807SJeff Garzik 
4741c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4742c6fd2807SJeff Garzik 
4743c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4744c6fd2807SJeff Garzik 		unsigned long flags;
4745c6fd2807SJeff Garzik 
4746c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4747c6fd2807SJeff Garzik 		local_irq_save(flags);
4748c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4749c6fd2807SJeff Garzik 
4750c6fd2807SJeff Garzik 		/* do the actual data transfer */
47515a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4752c6fd2807SJeff Garzik 
4753c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4754c6fd2807SJeff Garzik 		local_irq_restore(flags);
4755c6fd2807SJeff Garzik 	} else {
4756c6fd2807SJeff Garzik 		buf = page_address(page);
47575a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4758c6fd2807SJeff Garzik 	}
4759c6fd2807SJeff Garzik 
47605a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
47615a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4762c6fd2807SJeff Garzik 
4763726f0785STejun Heo 	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4764c6fd2807SJeff Garzik 		qc->cursg++;
4765c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4766c6fd2807SJeff Garzik 	}
4767c6fd2807SJeff Garzik }
4768c6fd2807SJeff Garzik 
4769c6fd2807SJeff Garzik /**
47705a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4771c6fd2807SJeff Garzik  *	@qc: Command on going
4772c6fd2807SJeff Garzik  *
47735a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4774c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4775c6fd2807SJeff Garzik  *
4776c6fd2807SJeff Garzik  *	LOCKING:
4777c6fd2807SJeff Garzik  *	Inherited from caller.
4778c6fd2807SJeff Garzik  */
4779c6fd2807SJeff Garzik 
4780c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4781c6fd2807SJeff Garzik {
4782c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4783c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4784c6fd2807SJeff Garzik 		unsigned int nsect;
4785c6fd2807SJeff Garzik 
4786c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4787c6fd2807SJeff Garzik 
47885a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4789726f0785STejun Heo 			    qc->dev->multi_count);
4790c6fd2807SJeff Garzik 		while (nsect--)
4791c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4792c6fd2807SJeff Garzik 	} else
4793c6fd2807SJeff Garzik 		ata_pio_sector(qc);
47944cc980b3SAlbert Lee 
47954cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
4796c6fd2807SJeff Garzik }
4797c6fd2807SJeff Garzik 
4798c6fd2807SJeff Garzik /**
4799c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4800c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4801c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4802c6fd2807SJeff Garzik  *
4803c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4804c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4805c6fd2807SJeff Garzik  *
4806c6fd2807SJeff Garzik  *	LOCKING:
4807c6fd2807SJeff Garzik  *	caller.
4808c6fd2807SJeff Garzik  */
4809c6fd2807SJeff Garzik 
4810c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4811c6fd2807SJeff Garzik {
4812c6fd2807SJeff Garzik 	/* send SCSI cdb */
4813c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4814c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4815c6fd2807SJeff Garzik 
4816c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4817c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4818c6fd2807SJeff Garzik 
4819c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4820c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4821c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4822c6fd2807SJeff Garzik 		break;
4823c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4824c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4825c6fd2807SJeff Garzik 		break;
4826c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4827c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4828c6fd2807SJeff Garzik 		/* initiate bmdma */
4829c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4830c6fd2807SJeff Garzik 		break;
4831c6fd2807SJeff Garzik 	}
4832c6fd2807SJeff Garzik }
4833c6fd2807SJeff Garzik 
4834c6fd2807SJeff Garzik /**
4835c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4836c6fd2807SJeff Garzik  *	@qc: Command on going
4837c6fd2807SJeff Garzik  *	@bytes: number of bytes
4838c6fd2807SJeff Garzik  *
4839c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4840c6fd2807SJeff Garzik  *
4841c6fd2807SJeff Garzik  *	LOCKING:
4842c6fd2807SJeff Garzik  *	Inherited from caller.
4843c6fd2807SJeff Garzik  *
4844c6fd2807SJeff Garzik  */
4845c6fd2807SJeff Garzik 
4846c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4847c6fd2807SJeff Garzik {
4848c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4849c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4850c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4851c6fd2807SJeff Garzik 	struct page *page;
4852c6fd2807SJeff Garzik 	unsigned char *buf;
4853c6fd2807SJeff Garzik 	unsigned int offset, count;
4854c6fd2807SJeff Garzik 
4855c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4856c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4857c6fd2807SJeff Garzik 
4858c6fd2807SJeff Garzik next_sg:
4859c6fd2807SJeff Garzik 	if (unlikely(qc->cursg >= qc->n_elem)) {
4860c6fd2807SJeff Garzik 		/*
4861c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4862c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4863c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4864c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4865c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4866c6fd2807SJeff Garzik 		 */
4867c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4868c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4869c6fd2807SJeff Garzik 		unsigned int i;
4870c6fd2807SJeff Garzik 
4871c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4872c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4873c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4874c6fd2807SJeff Garzik 
4875c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4876c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4877c6fd2807SJeff Garzik 
4878c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4879c6fd2807SJeff Garzik 		return;
4880c6fd2807SJeff Garzik 	}
4881c6fd2807SJeff Garzik 
4882c6fd2807SJeff Garzik 	sg = &qc->__sg[qc->cursg];
4883c6fd2807SJeff Garzik 
4884c6fd2807SJeff Garzik 	page = sg->page;
4885c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4886c6fd2807SJeff Garzik 
4887c6fd2807SJeff Garzik 	/* get the current page and offset */
4888c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4889c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4890c6fd2807SJeff Garzik 
4891c6fd2807SJeff Garzik 	/* don't overrun current sg */
4892c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
4893c6fd2807SJeff Garzik 
4894c6fd2807SJeff Garzik 	/* don't cross page boundaries */
4895c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
4896c6fd2807SJeff Garzik 
4897c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4898c6fd2807SJeff Garzik 
4899c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4900c6fd2807SJeff Garzik 		unsigned long flags;
4901c6fd2807SJeff Garzik 
4902c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
4903c6fd2807SJeff Garzik 		local_irq_save(flags);
4904c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4905c6fd2807SJeff Garzik 
4906c6fd2807SJeff Garzik 		/* do the actual data transfer */
4907c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4908c6fd2807SJeff Garzik 
4909c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4910c6fd2807SJeff Garzik 		local_irq_restore(flags);
4911c6fd2807SJeff Garzik 	} else {
4912c6fd2807SJeff Garzik 		buf = page_address(page);
4913c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4914c6fd2807SJeff Garzik 	}
4915c6fd2807SJeff Garzik 
4916c6fd2807SJeff Garzik 	bytes -= count;
4917c6fd2807SJeff Garzik 	qc->curbytes += count;
4918c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
4919c6fd2807SJeff Garzik 
4920c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
4921c6fd2807SJeff Garzik 		qc->cursg++;
4922c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4923c6fd2807SJeff Garzik 	}
4924c6fd2807SJeff Garzik 
4925c6fd2807SJeff Garzik 	if (bytes)
4926c6fd2807SJeff Garzik 		goto next_sg;
4927c6fd2807SJeff Garzik }
4928c6fd2807SJeff Garzik 
4929c6fd2807SJeff Garzik /**
4930c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4931c6fd2807SJeff Garzik  *	@qc: Command on going
4932c6fd2807SJeff Garzik  *
4933c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4934c6fd2807SJeff Garzik  *
4935c6fd2807SJeff Garzik  *	LOCKING:
4936c6fd2807SJeff Garzik  *	Inherited from caller.
4937c6fd2807SJeff Garzik  */
4938c6fd2807SJeff Garzik 
4939c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4940c6fd2807SJeff Garzik {
4941c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4942c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
4943c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
4944c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4945c6fd2807SJeff Garzik 
4946c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
4947c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
4948c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
4949c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4950c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
4951c6fd2807SJeff Garzik 	 */
4952c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
4953c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
4954c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
4955c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
4956c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
4957c6fd2807SJeff Garzik 
4958c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
4959c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
4960c6fd2807SJeff Garzik 		goto err_out;
4961c6fd2807SJeff Garzik 
4962c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
4963c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4964c6fd2807SJeff Garzik 	if (do_write != i_write)
4965c6fd2807SJeff Garzik 		goto err_out;
4966c6fd2807SJeff Garzik 
496744877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4968c6fd2807SJeff Garzik 
4969c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
49704cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
4971c6fd2807SJeff Garzik 
4972c6fd2807SJeff Garzik 	return;
4973c6fd2807SJeff Garzik 
4974c6fd2807SJeff Garzik err_out:
4975c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4976c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
4977c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
4978c6fd2807SJeff Garzik }
4979c6fd2807SJeff Garzik 
4980c6fd2807SJeff Garzik /**
4981c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4982c6fd2807SJeff Garzik  *	@ap: the target ata_port
4983c6fd2807SJeff Garzik  *	@qc: qc on going
4984c6fd2807SJeff Garzik  *
4985c6fd2807SJeff Garzik  *	RETURNS:
4986c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
4987c6fd2807SJeff Garzik  */
4988c6fd2807SJeff Garzik 
4989c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4990c6fd2807SJeff Garzik {
4991c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
4992c6fd2807SJeff Garzik 		return 1;
4993c6fd2807SJeff Garzik 
4994c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
4995c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
4996c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
4997c6fd2807SJeff Garzik 		    return 1;
4998c6fd2807SJeff Garzik 
4999c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
5000c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5001c6fd2807SJeff Garzik 			return 1;
5002c6fd2807SJeff Garzik 	}
5003c6fd2807SJeff Garzik 
5004c6fd2807SJeff Garzik 	return 0;
5005c6fd2807SJeff Garzik }
5006c6fd2807SJeff Garzik 
5007c6fd2807SJeff Garzik /**
5008c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5009c6fd2807SJeff Garzik  *	@qc: Command to complete
5010c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5011c6fd2807SJeff Garzik  *
5012c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5013c6fd2807SJeff Garzik  *
5014c6fd2807SJeff Garzik  *	LOCKING:
5015cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5016c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5017c6fd2807SJeff Garzik  */
5018c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5019c6fd2807SJeff Garzik {
5020c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5021c6fd2807SJeff Garzik 	unsigned long flags;
5022c6fd2807SJeff Garzik 
5023c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5024c6fd2807SJeff Garzik 		if (in_wq) {
5025c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5026c6fd2807SJeff Garzik 
5027cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5028cca3974eSJeff Garzik 			 * released.
5029c6fd2807SJeff Garzik 			 */
5030c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5031c6fd2807SJeff Garzik 			if (qc) {
5032c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
503383625006SAkira Iguchi 					ap->ops->irq_on(ap);
5034c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5035c6fd2807SJeff Garzik 				} else
5036c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5037c6fd2807SJeff Garzik 			}
5038c6fd2807SJeff Garzik 
5039c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5040c6fd2807SJeff Garzik 		} else {
5041c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5042c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5043c6fd2807SJeff Garzik 			else
5044c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5045c6fd2807SJeff Garzik 		}
5046c6fd2807SJeff Garzik 	} else {
5047c6fd2807SJeff Garzik 		if (in_wq) {
5048c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
504983625006SAkira Iguchi 			ap->ops->irq_on(ap);
5050c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5051c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5052c6fd2807SJeff Garzik 		} else
5053c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5054c6fd2807SJeff Garzik 	}
5055c6fd2807SJeff Garzik }
5056c6fd2807SJeff Garzik 
5057c6fd2807SJeff Garzik /**
5058c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5059c6fd2807SJeff Garzik  *	@ap: the target ata_port
5060c6fd2807SJeff Garzik  *	@qc: qc on going
5061c6fd2807SJeff Garzik  *	@status: current device status
5062c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5063c6fd2807SJeff Garzik  *
5064c6fd2807SJeff Garzik  *	RETURNS:
5065c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5066c6fd2807SJeff Garzik  */
5067c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5068c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5069c6fd2807SJeff Garzik {
5070c6fd2807SJeff Garzik 	unsigned long flags = 0;
5071c6fd2807SJeff Garzik 	int poll_next;
5072c6fd2807SJeff Garzik 
5073c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5074c6fd2807SJeff Garzik 
5075c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5076c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5077c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5078c6fd2807SJeff Garzik 	 */
5079c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5080c6fd2807SJeff Garzik 
5081c6fd2807SJeff Garzik fsm_start:
5082c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
508344877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5084c6fd2807SJeff Garzik 
5085c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5086c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5087c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5088c6fd2807SJeff Garzik 
5089c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5090c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5091c6fd2807SJeff Garzik 		 * takes over after sending the data.
5092c6fd2807SJeff Garzik 		 */
5093c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5094c6fd2807SJeff Garzik 
5095c6fd2807SJeff Garzik 		/* check device status */
5096c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5097c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5098c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5099c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5100c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5101c6fd2807SJeff Garzik 			else
5102c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5103c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5104c6fd2807SJeff Garzik 
5105c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5106c6fd2807SJeff Garzik 			goto fsm_start;
5107c6fd2807SJeff Garzik 		}
5108c6fd2807SJeff Garzik 
5109c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5110c6fd2807SJeff Garzik 		 * when it finds something wrong.
5111c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5112c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5113c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5114c6fd2807SJeff Garzik 		 */
5115c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
511644877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
511744877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
5118c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
5119c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5120c6fd2807SJeff Garzik 			goto fsm_start;
5121c6fd2807SJeff Garzik 		}
5122c6fd2807SJeff Garzik 
5123c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5124c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5125c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5126c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5127c6fd2807SJeff Garzik 		 */
5128c6fd2807SJeff Garzik 		if (in_wq)
5129c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5130c6fd2807SJeff Garzik 
5131c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5132c6fd2807SJeff Garzik 			/* PIO data out protocol.
5133c6fd2807SJeff Garzik 			 * send first data block.
5134c6fd2807SJeff Garzik 			 */
5135c6fd2807SJeff Garzik 
5136c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5137c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5138c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5139c6fd2807SJeff Garzik 			 */
5140c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5141c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5142c6fd2807SJeff Garzik 		} else
5143c6fd2807SJeff Garzik 			/* send CDB */
5144c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5145c6fd2807SJeff Garzik 
5146c6fd2807SJeff Garzik 		if (in_wq)
5147c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5148c6fd2807SJeff Garzik 
5149c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5150c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5151c6fd2807SJeff Garzik 		 */
5152c6fd2807SJeff Garzik 		break;
5153c6fd2807SJeff Garzik 
5154c6fd2807SJeff Garzik 	case HSM_ST:
5155c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
5156c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
5157c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5158c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5159c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5160c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5161c6fd2807SJeff Garzik 				 */
5162c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5163c6fd2807SJeff Garzik 				goto fsm_start;
5164c6fd2807SJeff Garzik 			}
5165c6fd2807SJeff Garzik 
5166c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5167c6fd2807SJeff Garzik 			 * when it finds something wrong.
5168c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5169c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5170c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5171c6fd2807SJeff Garzik 			 */
5172c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
517344877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
517444877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
517544877b4eSTejun Heo 						status);
5176c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5177c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5178c6fd2807SJeff Garzik 				goto fsm_start;
5179c6fd2807SJeff Garzik 			}
5180c6fd2807SJeff Garzik 
5181c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5182c6fd2807SJeff Garzik 
5183c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5184c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5185c6fd2807SJeff Garzik 				goto fsm_start;
5186c6fd2807SJeff Garzik 
5187c6fd2807SJeff Garzik 		} else {
5188c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5189c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5190c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5191c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5192c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5193c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5194c6fd2807SJeff Garzik 				else
519555a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
519655a8e2c8STejun Heo 					 * Phantom devices also trigger this
519755a8e2c8STejun Heo 					 * condition.  Mark hint.
519855a8e2c8STejun Heo 					 */
519955a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
520055a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5201c6fd2807SJeff Garzik 
5202c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5203c6fd2807SJeff Garzik 				goto fsm_start;
5204c6fd2807SJeff Garzik 			}
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5207c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5208c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5209c6fd2807SJeff Garzik 			 * block of junk data before changing the
5210c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5211c6fd2807SJeff Garzik 			 *
5212c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5213c6fd2807SJeff Garzik 			 * sense since the data block has been
5214c6fd2807SJeff Garzik 			 * transferred to the device.
5215c6fd2807SJeff Garzik 			 */
5216c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5217c6fd2807SJeff Garzik 				/* data might be corrputed */
5218c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5219c6fd2807SJeff Garzik 
5220c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5221c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5222c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5223c6fd2807SJeff Garzik 				}
5224c6fd2807SJeff Garzik 
5225c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5226c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5227c6fd2807SJeff Garzik 
5228c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5229c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5230c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5231c6fd2807SJeff Garzik 				 */
5232c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5233c6fd2807SJeff Garzik 				goto fsm_start;
5234c6fd2807SJeff Garzik 			}
5235c6fd2807SJeff Garzik 
5236c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5237c6fd2807SJeff Garzik 
5238c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5239c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5240c6fd2807SJeff Garzik 				/* all data read */
5241c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5242c6fd2807SJeff Garzik 				goto fsm_start;
5243c6fd2807SJeff Garzik 			}
5244c6fd2807SJeff Garzik 		}
5245c6fd2807SJeff Garzik 
5246c6fd2807SJeff Garzik 		poll_next = 1;
5247c6fd2807SJeff Garzik 		break;
5248c6fd2807SJeff Garzik 
5249c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5250c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5251c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5252c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5253c6fd2807SJeff Garzik 			goto fsm_start;
5254c6fd2807SJeff Garzik 		}
5255c6fd2807SJeff Garzik 
5256c6fd2807SJeff Garzik 		/* no more data to transfer */
5257c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
525844877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5259c6fd2807SJeff Garzik 
5260c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5263c6fd2807SJeff Garzik 
5264c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5265c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5266c6fd2807SJeff Garzik 
5267c6fd2807SJeff Garzik 		poll_next = 0;
5268c6fd2807SJeff Garzik 		break;
5269c6fd2807SJeff Garzik 
5270c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5271c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5272c6fd2807SJeff Garzik 		 * know what's wrong and recover
5273c6fd2807SJeff Garzik 		 */
5274c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5275c6fd2807SJeff Garzik 
5276c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5277c6fd2807SJeff Garzik 
5278c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5279c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5280c6fd2807SJeff Garzik 
5281c6fd2807SJeff Garzik 		poll_next = 0;
5282c6fd2807SJeff Garzik 		break;
5283c6fd2807SJeff Garzik 	default:
5284c6fd2807SJeff Garzik 		poll_next = 0;
5285c6fd2807SJeff Garzik 		BUG();
5286c6fd2807SJeff Garzik 	}
5287c6fd2807SJeff Garzik 
5288c6fd2807SJeff Garzik 	return poll_next;
5289c6fd2807SJeff Garzik }
5290c6fd2807SJeff Garzik 
529165f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5292c6fd2807SJeff Garzik {
529365f27f38SDavid Howells 	struct ata_port *ap =
529465f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
529565f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5296c6fd2807SJeff Garzik 	u8 status;
5297c6fd2807SJeff Garzik 	int poll_next;
5298c6fd2807SJeff Garzik 
5299c6fd2807SJeff Garzik fsm_start:
5300c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5301c6fd2807SJeff Garzik 
5302c6fd2807SJeff Garzik 	/*
5303c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5304c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5305c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5306c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5307c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5308c6fd2807SJeff Garzik 	 */
5309c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5310c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5311c6fd2807SJeff Garzik 		msleep(2);
5312c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5313c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5314c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5315c6fd2807SJeff Garzik 			return;
5316c6fd2807SJeff Garzik 		}
5317c6fd2807SJeff Garzik 	}
5318c6fd2807SJeff Garzik 
5319c6fd2807SJeff Garzik 	/* move the HSM */
5320c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5321c6fd2807SJeff Garzik 
5322c6fd2807SJeff Garzik 	/* another command or interrupt handler
5323c6fd2807SJeff Garzik 	 * may be running at this point.
5324c6fd2807SJeff Garzik 	 */
5325c6fd2807SJeff Garzik 	if (poll_next)
5326c6fd2807SJeff Garzik 		goto fsm_start;
5327c6fd2807SJeff Garzik }
5328c6fd2807SJeff Garzik 
5329c6fd2807SJeff Garzik /**
5330c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5331c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5332c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5333c6fd2807SJeff Garzik  *
5334c6fd2807SJeff Garzik  *	LOCKING:
5335c6fd2807SJeff Garzik  *	None.
5336c6fd2807SJeff Garzik  */
5337c6fd2807SJeff Garzik 
5338c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5339c6fd2807SJeff Garzik {
5340c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5341c6fd2807SJeff Garzik 	unsigned int i;
5342c6fd2807SJeff Garzik 
5343c6fd2807SJeff Garzik 	/* no command while frozen */
5344c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5345c6fd2807SJeff Garzik 		return NULL;
5346c6fd2807SJeff Garzik 
5347c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5348c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5349c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5350c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5351c6fd2807SJeff Garzik 			break;
5352c6fd2807SJeff Garzik 		}
5353c6fd2807SJeff Garzik 
5354c6fd2807SJeff Garzik 	if (qc)
5355c6fd2807SJeff Garzik 		qc->tag = i;
5356c6fd2807SJeff Garzik 
5357c6fd2807SJeff Garzik 	return qc;
5358c6fd2807SJeff Garzik }
5359c6fd2807SJeff Garzik 
5360c6fd2807SJeff Garzik /**
5361c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5362c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5363c6fd2807SJeff Garzik  *
5364c6fd2807SJeff Garzik  *	LOCKING:
5365c6fd2807SJeff Garzik  *	None.
5366c6fd2807SJeff Garzik  */
5367c6fd2807SJeff Garzik 
5368c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5369c6fd2807SJeff Garzik {
53709af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5371c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5372c6fd2807SJeff Garzik 
5373c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5374c6fd2807SJeff Garzik 	if (qc) {
5375c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5376c6fd2807SJeff Garzik 		qc->ap = ap;
5377c6fd2807SJeff Garzik 		qc->dev = dev;
5378c6fd2807SJeff Garzik 
5379c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5380c6fd2807SJeff Garzik 	}
5381c6fd2807SJeff Garzik 
5382c6fd2807SJeff Garzik 	return qc;
5383c6fd2807SJeff Garzik }
5384c6fd2807SJeff Garzik 
5385c6fd2807SJeff Garzik /**
5386c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5387c6fd2807SJeff Garzik  *	@qc: Command to complete
5388c6fd2807SJeff Garzik  *
5389c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5390c6fd2807SJeff Garzik  *	in case something prevents using it.
5391c6fd2807SJeff Garzik  *
5392c6fd2807SJeff Garzik  *	LOCKING:
5393cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5394c6fd2807SJeff Garzik  */
5395c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5396c6fd2807SJeff Garzik {
5397c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5398c6fd2807SJeff Garzik 	unsigned int tag;
5399c6fd2807SJeff Garzik 
5400c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5401c6fd2807SJeff Garzik 
5402c6fd2807SJeff Garzik 	qc->flags = 0;
5403c6fd2807SJeff Garzik 	tag = qc->tag;
5404c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5405c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5406c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5407c6fd2807SJeff Garzik 	}
5408c6fd2807SJeff Garzik }
5409c6fd2807SJeff Garzik 
5410c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5411c6fd2807SJeff Garzik {
5412c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
54139af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5414c6fd2807SJeff Garzik 
5415c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5416c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5417c6fd2807SJeff Garzik 
5418c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5419c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5420c6fd2807SJeff Garzik 
5421c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5422da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
54239af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5424da917d69STejun Heo 		if (!link->sactive)
5425da917d69STejun Heo 			ap->nr_active_links--;
5426da917d69STejun Heo 	} else {
54279af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5428da917d69STejun Heo 		ap->nr_active_links--;
5429da917d69STejun Heo 	}
5430da917d69STejun Heo 
5431da917d69STejun Heo 	/* clear exclusive status */
5432da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5433da917d69STejun Heo 		     ap->excl_link == link))
5434da917d69STejun Heo 		ap->excl_link = NULL;
5435c6fd2807SJeff Garzik 
5436c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5437c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5438c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5439c6fd2807SJeff Garzik 	 */
5440c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5441c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5442c6fd2807SJeff Garzik 
5443c6fd2807SJeff Garzik 	/* call completion callback */
5444c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5445c6fd2807SJeff Garzik }
5446c6fd2807SJeff Garzik 
544739599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
544839599a53STejun Heo {
544939599a53STejun Heo 	struct ata_port *ap = qc->ap;
545039599a53STejun Heo 
545139599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
54524742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
545339599a53STejun Heo }
545439599a53STejun Heo 
5455c6fd2807SJeff Garzik /**
5456c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5457c6fd2807SJeff Garzik  *	@qc: Command to complete
5458c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5459c6fd2807SJeff Garzik  *
5460c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5461c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5462c6fd2807SJeff Garzik  *
5463c6fd2807SJeff Garzik  *	LOCKING:
5464cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5465c6fd2807SJeff Garzik  */
5466c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5467c6fd2807SJeff Garzik {
5468c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5469c6fd2807SJeff Garzik 
5470c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5471c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5472c6fd2807SJeff Garzik 	 *
5473c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5474c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5475c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5476c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5477c6fd2807SJeff Garzik 	 *
5478c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5479c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5480c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5481c6fd2807SJeff Garzik 	 * taken care of.
5482c6fd2807SJeff Garzik 	 */
5483c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5484c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5485c6fd2807SJeff Garzik 
5486c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5487c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5488c6fd2807SJeff Garzik 
5489c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5490c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5491c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
549239599a53STejun Heo 				fill_result_tf(qc);
5493c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5494c6fd2807SJeff Garzik 				return;
5495c6fd2807SJeff Garzik 			}
5496c6fd2807SJeff Garzik 		}
5497c6fd2807SJeff Garzik 
5498c6fd2807SJeff Garzik 		/* read result TF if requested */
5499c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
550039599a53STejun Heo 			fill_result_tf(qc);
5501c6fd2807SJeff Garzik 
5502c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5503c6fd2807SJeff Garzik 	} else {
5504c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5505c6fd2807SJeff Garzik 			return;
5506c6fd2807SJeff Garzik 
5507c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5508c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
550939599a53STejun Heo 			fill_result_tf(qc);
5510c6fd2807SJeff Garzik 
5511c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5512c6fd2807SJeff Garzik 	}
5513c6fd2807SJeff Garzik }
5514c6fd2807SJeff Garzik 
5515c6fd2807SJeff Garzik /**
5516c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5517c6fd2807SJeff Garzik  *	@ap: port in question
5518c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5519c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5520c6fd2807SJeff Garzik  *
5521c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5522c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5523c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5524c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5525c6fd2807SJeff Garzik  *
5526c6fd2807SJeff Garzik  *	LOCKING:
5527cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5528c6fd2807SJeff Garzik  *
5529c6fd2807SJeff Garzik  *	RETURNS:
5530c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5531c6fd2807SJeff Garzik  */
5532c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5533c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5534c6fd2807SJeff Garzik {
5535c6fd2807SJeff Garzik 	int nr_done = 0;
5536c6fd2807SJeff Garzik 	u32 done_mask;
5537c6fd2807SJeff Garzik 	int i;
5538c6fd2807SJeff Garzik 
5539c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5540c6fd2807SJeff Garzik 
5541c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5542c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5543c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5544c6fd2807SJeff Garzik 		return -EINVAL;
5545c6fd2807SJeff Garzik 	}
5546c6fd2807SJeff Garzik 
5547c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5548c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5549c6fd2807SJeff Garzik 
5550c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5551c6fd2807SJeff Garzik 			continue;
5552c6fd2807SJeff Garzik 
5553c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5554c6fd2807SJeff Garzik 			if (finish_qc)
5555c6fd2807SJeff Garzik 				finish_qc(qc);
5556c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5557c6fd2807SJeff Garzik 			nr_done++;
5558c6fd2807SJeff Garzik 		}
5559c6fd2807SJeff Garzik 	}
5560c6fd2807SJeff Garzik 
5561c6fd2807SJeff Garzik 	return nr_done;
5562c6fd2807SJeff Garzik }
5563c6fd2807SJeff Garzik 
5564c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5565c6fd2807SJeff Garzik {
5566c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5567c6fd2807SJeff Garzik 
5568c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5569c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5570c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5571c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5572c6fd2807SJeff Garzik 		return 1;
5573c6fd2807SJeff Garzik 
5574c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5575c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5576c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5577c6fd2807SJeff Garzik 			return 1;
5578c6fd2807SJeff Garzik 
5579c6fd2807SJeff Garzik 		/* fall through */
5580c6fd2807SJeff Garzik 
5581c6fd2807SJeff Garzik 	default:
5582c6fd2807SJeff Garzik 		return 0;
5583c6fd2807SJeff Garzik 	}
5584c6fd2807SJeff Garzik 
5585c6fd2807SJeff Garzik 	/* never reached */
5586c6fd2807SJeff Garzik }
5587c6fd2807SJeff Garzik 
5588c6fd2807SJeff Garzik /**
5589c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5590c6fd2807SJeff Garzik  *	@qc: command to issue to device
5591c6fd2807SJeff Garzik  *
5592c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5593c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5594c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5595c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5596c6fd2807SJeff Garzik  *
5597c6fd2807SJeff Garzik  *	LOCKING:
5598cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5599c6fd2807SJeff Garzik  */
5600c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5601c6fd2807SJeff Garzik {
5602c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
56039af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5604c6fd2807SJeff Garzik 
5605c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5606c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5607c6fd2807SJeff Garzik 	 * request ATAPI sense.
5608c6fd2807SJeff Garzik 	 */
56099af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5610c6fd2807SJeff Garzik 
5611c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
56129af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5613da917d69STejun Heo 
5614da917d69STejun Heo 		if (!link->sactive)
5615da917d69STejun Heo 			ap->nr_active_links++;
56169af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5617c6fd2807SJeff Garzik 	} else {
56189af5c9c9STejun Heo 		WARN_ON(link->sactive);
5619da917d69STejun Heo 
5620da917d69STejun Heo 		ap->nr_active_links++;
56219af5c9c9STejun Heo 		link->active_tag = qc->tag;
5622c6fd2807SJeff Garzik 	}
5623c6fd2807SJeff Garzik 
5624c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5625c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5626c6fd2807SJeff Garzik 
5627c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5628c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5629c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5630c6fd2807SJeff Garzik 				goto sg_err;
5631c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5632c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5633c6fd2807SJeff Garzik 				goto sg_err;
5634c6fd2807SJeff Garzik 		}
5635c6fd2807SJeff Garzik 	} else {
5636c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5637c6fd2807SJeff Garzik 	}
5638c6fd2807SJeff Garzik 
5639c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5640c6fd2807SJeff Garzik 
5641c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5642c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5643c6fd2807SJeff Garzik 		goto err;
5644c6fd2807SJeff Garzik 	return;
5645c6fd2807SJeff Garzik 
5646c6fd2807SJeff Garzik sg_err:
5647c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5648c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5649c6fd2807SJeff Garzik err:
5650c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5651c6fd2807SJeff Garzik }
5652c6fd2807SJeff Garzik 
5653c6fd2807SJeff Garzik /**
5654c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5655c6fd2807SJeff Garzik  *	@qc: command to issue to device
5656c6fd2807SJeff Garzik  *
5657c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5658c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5659c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5660c6fd2807SJeff Garzik  *	is slightly different.
5661c6fd2807SJeff Garzik  *
5662c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5663c6fd2807SJeff Garzik  *
5664c6fd2807SJeff Garzik  *	LOCKING:
5665cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5666c6fd2807SJeff Garzik  *
5667c6fd2807SJeff Garzik  *	RETURNS:
5668c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5669c6fd2807SJeff Garzik  */
5670c6fd2807SJeff Garzik 
5671c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5672c6fd2807SJeff Garzik {
5673c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5674c6fd2807SJeff Garzik 
5675c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5676c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5677c6fd2807SJeff Garzik 	 */
5678c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5679c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5680c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5681e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5682c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5683c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5684c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5685c6fd2807SJeff Garzik 			break;
5686c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5687c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5688c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5689c6fd2807SJeff Garzik 				BUG();
5690c6fd2807SJeff Garzik 			break;
5691c6fd2807SJeff Garzik 		default:
5692c6fd2807SJeff Garzik 			break;
5693c6fd2807SJeff Garzik 		}
5694c6fd2807SJeff Garzik 	}
5695c6fd2807SJeff Garzik 
5696c6fd2807SJeff Garzik 	/* select the device */
5697c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5698c6fd2807SJeff Garzik 
5699c6fd2807SJeff Garzik 	/* start the command */
5700c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5701c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5702c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5703c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5704c6fd2807SJeff Garzik 
5705c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5706c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5707c6fd2807SJeff Garzik 
5708c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5709c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5710c6fd2807SJeff Garzik 
5711c6fd2807SJeff Garzik 		break;
5712c6fd2807SJeff Garzik 
5713c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5714c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5715c6fd2807SJeff Garzik 
5716c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5717c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5718c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5719c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5720c6fd2807SJeff Garzik 		break;
5721c6fd2807SJeff Garzik 
5722c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5723c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5724c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5725c6fd2807SJeff Garzik 
5726c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5727c6fd2807SJeff Garzik 
5728c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5729c6fd2807SJeff Garzik 			/* PIO data out protocol */
5730c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5731c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5732c6fd2807SJeff Garzik 
5733c6fd2807SJeff Garzik 			/* always send first data block using
5734c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5735c6fd2807SJeff Garzik 			 */
5736c6fd2807SJeff Garzik 		} else {
5737c6fd2807SJeff Garzik 			/* PIO data in protocol */
5738c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5739c6fd2807SJeff Garzik 
5740c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5741c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5742c6fd2807SJeff Garzik 
5743c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5744c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5745c6fd2807SJeff Garzik 			 */
5746c6fd2807SJeff Garzik 		}
5747c6fd2807SJeff Garzik 
5748c6fd2807SJeff Garzik 		break;
5749c6fd2807SJeff Garzik 
5750c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5751c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5752c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5753c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5754c6fd2807SJeff Garzik 
5755c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5756c6fd2807SJeff Garzik 
5757c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5758c6fd2807SJeff Garzik 
5759c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5760c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5761c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5762c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5763c6fd2807SJeff Garzik 		break;
5764c6fd2807SJeff Garzik 
5765c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5766c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5767c6fd2807SJeff Garzik 
5768c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5769c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5770c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5771c6fd2807SJeff Garzik 
5772c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5773c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5774c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5775c6fd2807SJeff Garzik 		break;
5776c6fd2807SJeff Garzik 
5777c6fd2807SJeff Garzik 	default:
5778c6fd2807SJeff Garzik 		WARN_ON(1);
5779c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5780c6fd2807SJeff Garzik 	}
5781c6fd2807SJeff Garzik 
5782c6fd2807SJeff Garzik 	return 0;
5783c6fd2807SJeff Garzik }
5784c6fd2807SJeff Garzik 
5785c6fd2807SJeff Garzik /**
5786c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5787c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5788c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5789c6fd2807SJeff Garzik  *
5790c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5791c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5792c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5793c6fd2807SJeff Garzik  *
5794c6fd2807SJeff Garzik  *	LOCKING:
5795cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5796c6fd2807SJeff Garzik  *
5797c6fd2807SJeff Garzik  *	RETURNS:
5798c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5799c6fd2807SJeff Garzik  */
5800c6fd2807SJeff Garzik 
5801c6fd2807SJeff Garzik inline unsigned int ata_host_intr (struct ata_port *ap,
5802c6fd2807SJeff Garzik 				   struct ata_queued_cmd *qc)
5803c6fd2807SJeff Garzik {
58049af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
5805c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5806c6fd2807SJeff Garzik 
5807c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
580844877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5809c6fd2807SJeff Garzik 
5810c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5811c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5812c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5813c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5814c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5815c6fd2807SJeff Garzik 		 */
5816c6fd2807SJeff Garzik 
5817c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5818c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5819c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5820c6fd2807SJeff Garzik 		 */
5821c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5822c6fd2807SJeff Garzik 			goto idle_irq;
5823c6fd2807SJeff Garzik 		break;
5824c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5825c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5826c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5827c6fd2807SJeff Garzik 			/* check status of DMA engine */
5828c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
582944877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
583044877b4eSTejun Heo 				ap->print_id, host_stat);
5831c6fd2807SJeff Garzik 
5832c6fd2807SJeff Garzik 			/* if it's not our irq... */
5833c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5834c6fd2807SJeff Garzik 				goto idle_irq;
5835c6fd2807SJeff Garzik 
5836c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5837c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5838c6fd2807SJeff Garzik 
5839c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5840c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5841c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5842c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5843c6fd2807SJeff Garzik 			}
5844c6fd2807SJeff Garzik 		}
5845c6fd2807SJeff Garzik 		break;
5846c6fd2807SJeff Garzik 	case HSM_ST:
5847c6fd2807SJeff Garzik 		break;
5848c6fd2807SJeff Garzik 	default:
5849c6fd2807SJeff Garzik 		goto idle_irq;
5850c6fd2807SJeff Garzik 	}
5851c6fd2807SJeff Garzik 
5852c6fd2807SJeff Garzik 	/* check altstatus */
5853c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5854c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5855c6fd2807SJeff Garzik 		goto idle_irq;
5856c6fd2807SJeff Garzik 
5857c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5858c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5859c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5860c6fd2807SJeff Garzik 		goto idle_irq;
5861c6fd2807SJeff Garzik 
5862c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5863c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5864c6fd2807SJeff Garzik 
5865c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5866ea54763fSTejun Heo 
5867ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5868ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5869ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5870ea54763fSTejun Heo 
5871c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5872c6fd2807SJeff Garzik 
5873c6fd2807SJeff Garzik idle_irq:
5874c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5875c6fd2807SJeff Garzik 
5876c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5877c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
58786d32d30fSJeff Garzik 		ata_chk_status(ap);
58796d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
5880c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5881c6fd2807SJeff Garzik 		return 1;
5882c6fd2807SJeff Garzik 	}
5883c6fd2807SJeff Garzik #endif
5884c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5885c6fd2807SJeff Garzik }
5886c6fd2807SJeff Garzik 
5887c6fd2807SJeff Garzik /**
5888c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
5889c6fd2807SJeff Garzik  *	@irq: irq line (unused)
5890cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
5891c6fd2807SJeff Garzik  *
5892c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
5893c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
5894c6fd2807SJeff Garzik  *
5895c6fd2807SJeff Garzik  *	LOCKING:
5896cca3974eSJeff Garzik  *	Obtains host lock during operation.
5897c6fd2807SJeff Garzik  *
5898c6fd2807SJeff Garzik  *	RETURNS:
5899c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
5900c6fd2807SJeff Garzik  */
5901c6fd2807SJeff Garzik 
59027d12e780SDavid Howells irqreturn_t ata_interrupt (int irq, void *dev_instance)
5903c6fd2807SJeff Garzik {
5904cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
5905c6fd2807SJeff Garzik 	unsigned int i;
5906c6fd2807SJeff Garzik 	unsigned int handled = 0;
5907c6fd2807SJeff Garzik 	unsigned long flags;
5908c6fd2807SJeff Garzik 
5909c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5910cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
5911c6fd2807SJeff Garzik 
5912cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
5913c6fd2807SJeff Garzik 		struct ata_port *ap;
5914c6fd2807SJeff Garzik 
5915cca3974eSJeff Garzik 		ap = host->ports[i];
5916c6fd2807SJeff Garzik 		if (ap &&
5917c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
5918c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
5919c6fd2807SJeff Garzik 
59209af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
5921c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5922c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
5923c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
5924c6fd2807SJeff Garzik 		}
5925c6fd2807SJeff Garzik 	}
5926c6fd2807SJeff Garzik 
5927cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
5928c6fd2807SJeff Garzik 
5929c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
5930c6fd2807SJeff Garzik }
5931c6fd2807SJeff Garzik 
5932c6fd2807SJeff Garzik /**
5933c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
5934936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
5935c6fd2807SJeff Garzik  *
5936936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
5937c6fd2807SJeff Garzik  *
5938c6fd2807SJeff Garzik  *	LOCKING:
5939c6fd2807SJeff Garzik  *	None.
5940c6fd2807SJeff Garzik  *
5941c6fd2807SJeff Garzik  *	RETURNS:
5942c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
5943c6fd2807SJeff Garzik  */
5944936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
5945c6fd2807SJeff Garzik {
5946936fd732STejun Heo 	struct ata_port *ap = link->ap;
5947936fd732STejun Heo 
5948a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5949c6fd2807SJeff Garzik }
5950c6fd2807SJeff Garzik 
5951c6fd2807SJeff Garzik /**
5952c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
5953936fd732STejun Heo  *	@link: ATA link to read SCR for
5954c6fd2807SJeff Garzik  *	@reg: SCR to read
5955c6fd2807SJeff Garzik  *	@val: Place to store read value
5956c6fd2807SJeff Garzik  *
5957936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
5958c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5959c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5960c6fd2807SJeff Garzik  *
5961c6fd2807SJeff Garzik  *	LOCKING:
5962c6fd2807SJeff Garzik  *	None.
5963c6fd2807SJeff Garzik  *
5964c6fd2807SJeff Garzik  *	RETURNS:
5965c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5966c6fd2807SJeff Garzik  */
5967936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5968c6fd2807SJeff Garzik {
5969936fd732STejun Heo 	struct ata_port *ap = link->ap;
5970936fd732STejun Heo 
5971936fd732STejun Heo 	if (sata_scr_valid(link))
5972da3dbb17STejun Heo 		return ap->ops->scr_read(ap, reg, val);
5973c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5974c6fd2807SJeff Garzik }
5975c6fd2807SJeff Garzik 
5976c6fd2807SJeff Garzik /**
5977c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
5978936fd732STejun Heo  *	@link: ATA link to write SCR for
5979c6fd2807SJeff Garzik  *	@reg: SCR to write
5980c6fd2807SJeff Garzik  *	@val: value to write
5981c6fd2807SJeff Garzik  *
5982936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
5983c6fd2807SJeff Garzik  *	guaranteed to succeed if the cable type of the port is SATA
5984c6fd2807SJeff Garzik  *	and the port implements ->scr_read.
5985c6fd2807SJeff Garzik  *
5986c6fd2807SJeff Garzik  *	LOCKING:
5987c6fd2807SJeff Garzik  *	None.
5988c6fd2807SJeff Garzik  *
5989c6fd2807SJeff Garzik  *	RETURNS:
5990c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
5991c6fd2807SJeff Garzik  */
5992936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
5993c6fd2807SJeff Garzik {
5994936fd732STejun Heo 	struct ata_port *ap = link->ap;
5995936fd732STejun Heo 
5996936fd732STejun Heo 	if (sata_scr_valid(link))
5997da3dbb17STejun Heo 		return ap->ops->scr_write(ap, reg, val);
5998c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
5999c6fd2807SJeff Garzik }
6000c6fd2807SJeff Garzik 
6001c6fd2807SJeff Garzik /**
6002c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6003936fd732STejun Heo  *	@link: ATA link to write SCR for
6004c6fd2807SJeff Garzik  *	@reg: SCR to write
6005c6fd2807SJeff Garzik  *	@val: value to write
6006c6fd2807SJeff Garzik  *
6007c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6008c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6009c6fd2807SJeff Garzik  *
6010c6fd2807SJeff Garzik  *	LOCKING:
6011c6fd2807SJeff Garzik  *	None.
6012c6fd2807SJeff Garzik  *
6013c6fd2807SJeff Garzik  *	RETURNS:
6014c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6015c6fd2807SJeff Garzik  */
6016936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6017c6fd2807SJeff Garzik {
6018936fd732STejun Heo 	struct ata_port *ap = link->ap;
6019da3dbb17STejun Heo 	int rc;
6020da3dbb17STejun Heo 
6021936fd732STejun Heo 	if (sata_scr_valid(link)) {
6022da3dbb17STejun Heo 		rc = ap->ops->scr_write(ap, reg, val);
6023da3dbb17STejun Heo 		if (rc == 0)
6024da3dbb17STejun Heo 			rc = ap->ops->scr_read(ap, reg, &val);
6025da3dbb17STejun Heo 		return rc;
6026c6fd2807SJeff Garzik 	}
6027c6fd2807SJeff Garzik 	return -EOPNOTSUPP;
6028c6fd2807SJeff Garzik }
6029c6fd2807SJeff Garzik 
6030c6fd2807SJeff Garzik /**
6031936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6032936fd732STejun Heo  *	@link: ATA link to test
6033c6fd2807SJeff Garzik  *
6034936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6035936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6036936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6037c6fd2807SJeff Garzik  *
6038c6fd2807SJeff Garzik  *	LOCKING:
6039c6fd2807SJeff Garzik  *	None.
6040c6fd2807SJeff Garzik  *
6041c6fd2807SJeff Garzik  *	RETURNS:
6042c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6043c6fd2807SJeff Garzik  */
6044936fd732STejun Heo int ata_link_online(struct ata_link *link)
6045c6fd2807SJeff Garzik {
6046c6fd2807SJeff Garzik 	u32 sstatus;
6047c6fd2807SJeff Garzik 
6048936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6049936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6050c6fd2807SJeff Garzik 		return 1;
6051c6fd2807SJeff Garzik 	return 0;
6052c6fd2807SJeff Garzik }
6053c6fd2807SJeff Garzik 
6054c6fd2807SJeff Garzik /**
6055936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6056936fd732STejun Heo  *	@link: ATA link to test
6057c6fd2807SJeff Garzik  *
6058936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6059936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6060936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6061c6fd2807SJeff Garzik  *
6062c6fd2807SJeff Garzik  *	LOCKING:
6063c6fd2807SJeff Garzik  *	None.
6064c6fd2807SJeff Garzik  *
6065c6fd2807SJeff Garzik  *	RETURNS:
6066c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6067c6fd2807SJeff Garzik  */
6068936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6069c6fd2807SJeff Garzik {
6070c6fd2807SJeff Garzik 	u32 sstatus;
6071c6fd2807SJeff Garzik 
6072936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6073936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6074c6fd2807SJeff Garzik 		return 1;
6075c6fd2807SJeff Garzik 	return 0;
6076c6fd2807SJeff Garzik }
6077c6fd2807SJeff Garzik 
6078c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6079c6fd2807SJeff Garzik {
6080c6fd2807SJeff Garzik 	unsigned int err_mask;
6081c6fd2807SJeff Garzik 	u8 cmd;
6082c6fd2807SJeff Garzik 
6083c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6084c6fd2807SJeff Garzik 		return 0;
6085c6fd2807SJeff Garzik 
60866fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6087c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6088c6fd2807SJeff Garzik 	else
6089c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6090c6fd2807SJeff Garzik 
60914f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
60924f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
60934f34337bSAlan Cox 	   a further flush command to continue the writeback until it
60944f34337bSAlan Cox 	   does not error */
6095c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6096c6fd2807SJeff Garzik 	if (err_mask) {
6097c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6098c6fd2807SJeff Garzik 		return -EIO;
6099c6fd2807SJeff Garzik 	}
6100c6fd2807SJeff Garzik 
6101c6fd2807SJeff Garzik 	return 0;
6102c6fd2807SJeff Garzik }
6103c6fd2807SJeff Garzik 
61046ffa01d8STejun Heo #ifdef CONFIG_PM
6105cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6106cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6107cca3974eSJeff Garzik 			       int wait)
6108c6fd2807SJeff Garzik {
6109c6fd2807SJeff Garzik 	unsigned long flags;
6110c6fd2807SJeff Garzik 	int i, rc;
6111c6fd2807SJeff Garzik 
6112cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6113cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6114e3667ebfSTejun Heo 		struct ata_link *link;
6115c6fd2807SJeff Garzik 
6116c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6117c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6118c6fd2807SJeff Garzik 		 */
6119c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6120c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6121c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6122c6fd2807SJeff Garzik 		}
6123c6fd2807SJeff Garzik 
6124c6fd2807SJeff Garzik 		/* request PM ops to EH */
6125c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6126c6fd2807SJeff Garzik 
6127c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6128c6fd2807SJeff Garzik 		if (wait) {
6129c6fd2807SJeff Garzik 			rc = 0;
6130c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6131c6fd2807SJeff Garzik 		}
6132c6fd2807SJeff Garzik 
6133c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6134e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6135e3667ebfSTejun Heo 			link->eh_info.action |= action;
6136e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6137e3667ebfSTejun Heo 		}
6138c6fd2807SJeff Garzik 
6139c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6140c6fd2807SJeff Garzik 
6141c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6142c6fd2807SJeff Garzik 
6143c6fd2807SJeff Garzik 		/* wait and check result */
6144c6fd2807SJeff Garzik 		if (wait) {
6145c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6146c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6147c6fd2807SJeff Garzik 			if (rc)
6148c6fd2807SJeff Garzik 				return rc;
6149c6fd2807SJeff Garzik 		}
6150c6fd2807SJeff Garzik 	}
6151c6fd2807SJeff Garzik 
6152c6fd2807SJeff Garzik 	return 0;
6153c6fd2807SJeff Garzik }
6154c6fd2807SJeff Garzik 
6155c6fd2807SJeff Garzik /**
6156cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6157cca3974eSJeff Garzik  *	@host: host to suspend
6158c6fd2807SJeff Garzik  *	@mesg: PM message
6159c6fd2807SJeff Garzik  *
6160cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6161c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6162c6fd2807SJeff Garzik  *	to finish.
6163c6fd2807SJeff Garzik  *
6164c6fd2807SJeff Garzik  *	LOCKING:
6165c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6166c6fd2807SJeff Garzik  *
6167c6fd2807SJeff Garzik  *	RETURNS:
6168c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6169c6fd2807SJeff Garzik  */
6170cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6171c6fd2807SJeff Garzik {
61729666f400STejun Heo 	int rc;
6173c6fd2807SJeff Garzik 
6174cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
61759666f400STejun Heo 	if (rc == 0)
6176cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6177c6fd2807SJeff Garzik 	return rc;
6178c6fd2807SJeff Garzik }
6179c6fd2807SJeff Garzik 
6180c6fd2807SJeff Garzik /**
6181cca3974eSJeff Garzik  *	ata_host_resume - resume host
6182cca3974eSJeff Garzik  *	@host: host to resume
6183c6fd2807SJeff Garzik  *
6184cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6185c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6186c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6187c6fd2807SJeff Garzik  *
6188c6fd2807SJeff Garzik  *	LOCKING:
6189c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6190c6fd2807SJeff Garzik  */
6191cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6192c6fd2807SJeff Garzik {
6193cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6194c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6195cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6196c6fd2807SJeff Garzik }
61976ffa01d8STejun Heo #endif
6198c6fd2807SJeff Garzik 
6199c6fd2807SJeff Garzik /**
6200c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6201c6fd2807SJeff Garzik  *	@ap: Port to initialize
6202c6fd2807SJeff Garzik  *
6203c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6204c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6205c6fd2807SJeff Garzik  *
6206c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6207c6fd2807SJeff Garzik  *
6208c6fd2807SJeff Garzik  *	LOCKING:
6209c6fd2807SJeff Garzik  *	Inherited from caller.
6210c6fd2807SJeff Garzik  */
6211c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6212c6fd2807SJeff Garzik {
6213c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6214c6fd2807SJeff Garzik 	int rc;
6215c6fd2807SJeff Garzik 
6216f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6217f0d36efdSTejun Heo 				      GFP_KERNEL);
6218c6fd2807SJeff Garzik 	if (!ap->prd)
6219c6fd2807SJeff Garzik 		return -ENOMEM;
6220c6fd2807SJeff Garzik 
6221c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6222f0d36efdSTejun Heo 	if (rc)
6223c6fd2807SJeff Garzik 		return rc;
6224c6fd2807SJeff Garzik 
6225f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6226f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6227c6fd2807SJeff Garzik 	return 0;
6228c6fd2807SJeff Garzik }
6229c6fd2807SJeff Garzik 
6230c6fd2807SJeff Garzik /**
6231c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6232c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6233c6fd2807SJeff Garzik  *
6234c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6235c6fd2807SJeff Garzik  *
6236c6fd2807SJeff Garzik  *	LOCKING:
6237c6fd2807SJeff Garzik  *	Inherited from caller.
6238c6fd2807SJeff Garzik  */
6239c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6240c6fd2807SJeff Garzik {
62419af5c9c9STejun Heo 	struct ata_link *link = dev->link;
62429af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6243c6fd2807SJeff Garzik 	unsigned long flags;
6244c6fd2807SJeff Garzik 
6245c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
62469af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
62479af5c9c9STejun Heo 	link->sata_spd = 0;
6248c6fd2807SJeff Garzik 
6249c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6250c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6251cca3974eSJeff Garzik 	 * host lock.
6252c6fd2807SJeff Garzik 	 */
6253c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6254c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
62553dcc323fSTejun Heo 	dev->horkage = 0;
6256c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6257c6fd2807SJeff Garzik 
6258c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6259c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6260c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6261c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6262c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6263c6fd2807SJeff Garzik }
6264c6fd2807SJeff Garzik 
6265c6fd2807SJeff Garzik /**
62664fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
62674fb37a25STejun Heo  *	@ap: ATA port link is attached to
62684fb37a25STejun Heo  *	@link: Link structure to initialize
62698989805dSTejun Heo  *	@pmp: Port multiplier port number
62704fb37a25STejun Heo  *
62714fb37a25STejun Heo  *	Initialize @link.
62724fb37a25STejun Heo  *
62734fb37a25STejun Heo  *	LOCKING:
62744fb37a25STejun Heo  *	Kernel thread context (may sleep)
62754fb37a25STejun Heo  */
6276fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
62774fb37a25STejun Heo {
62784fb37a25STejun Heo 	int i;
62794fb37a25STejun Heo 
62804fb37a25STejun Heo 	/* clear everything except for devices */
62814fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
62824fb37a25STejun Heo 
62834fb37a25STejun Heo 	link->ap = ap;
62848989805dSTejun Heo 	link->pmp = pmp;
62854fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
62864fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
62874fb37a25STejun Heo 
62884fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
62894fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
62904fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
62914fb37a25STejun Heo 
62924fb37a25STejun Heo 		dev->link = link;
62934fb37a25STejun Heo 		dev->devno = dev - link->device;
62944fb37a25STejun Heo 		ata_dev_init(dev);
62954fb37a25STejun Heo 	}
62964fb37a25STejun Heo }
62974fb37a25STejun Heo 
62984fb37a25STejun Heo /**
62994fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
63004fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
63014fb37a25STejun Heo  *
63024fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
63034fb37a25STejun Heo  *	configured value.
63044fb37a25STejun Heo  *
63054fb37a25STejun Heo  *	LOCKING:
63064fb37a25STejun Heo  *	Kernel thread context (may sleep).
63074fb37a25STejun Heo  *
63084fb37a25STejun Heo  *	RETURNS:
63094fb37a25STejun Heo  *	0 on success, -errno on failure.
63104fb37a25STejun Heo  */
6311fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
63124fb37a25STejun Heo {
63134fb37a25STejun Heo 	u32 scontrol, spd;
63144fb37a25STejun Heo 	int rc;
63154fb37a25STejun Heo 
63164fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
63174fb37a25STejun Heo 	if (rc)
63184fb37a25STejun Heo 		return rc;
63194fb37a25STejun Heo 
63204fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
63214fb37a25STejun Heo 	if (spd)
63224fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
63234fb37a25STejun Heo 
63244fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
63254fb37a25STejun Heo 
63264fb37a25STejun Heo 	return 0;
63274fb37a25STejun Heo }
63284fb37a25STejun Heo 
63294fb37a25STejun Heo /**
6330f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6331f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6332c6fd2807SJeff Garzik  *
6333f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6334f3187195STejun Heo  *
6335f3187195STejun Heo  *	RETURNS:
6336f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6337c6fd2807SJeff Garzik  *
6338c6fd2807SJeff Garzik  *	LOCKING:
6339f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6340c6fd2807SJeff Garzik  */
6341f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6342c6fd2807SJeff Garzik {
6343f3187195STejun Heo 	struct ata_port *ap;
6344c6fd2807SJeff Garzik 
6345f3187195STejun Heo 	DPRINTK("ENTER\n");
6346f3187195STejun Heo 
6347f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6348f3187195STejun Heo 	if (!ap)
6349f3187195STejun Heo 		return NULL;
6350f3187195STejun Heo 
6351f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6352cca3974eSJeff Garzik 	ap->lock = &host->lock;
6353c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6354f3187195STejun Heo 	ap->print_id = -1;
6355c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6356cca3974eSJeff Garzik 	ap->host = host;
6357f3187195STejun Heo 	ap->dev = host->dev;
6358c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6359c6fd2807SJeff Garzik 
6360c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6361c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6362c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6363c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6364c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6365c6fd2807SJeff Garzik #else
6366c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6367c6fd2807SJeff Garzik #endif
6368c6fd2807SJeff Garzik 
636965f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
637065f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
637165f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6372c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6373c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
63745ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
63755ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
63765ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6377c6fd2807SJeff Garzik 
6378c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6379c6fd2807SJeff Garzik 
63808989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6381c6fd2807SJeff Garzik 
6382c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6383c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6384c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6385c6fd2807SJeff Garzik #endif
6386c6fd2807SJeff Garzik 	return ap;
6387c6fd2807SJeff Garzik }
6388c6fd2807SJeff Garzik 
6389f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6390f0d36efdSTejun Heo {
6391f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6392f0d36efdSTejun Heo 	int i;
6393f0d36efdSTejun Heo 
6394f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6395f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6396f0d36efdSTejun Heo 
6397ecef7253STejun Heo 		if (!ap)
6398ecef7253STejun Heo 			continue;
6399ecef7253STejun Heo 
6400ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6401f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6402f0d36efdSTejun Heo 	}
6403f0d36efdSTejun Heo 
6404ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6405f0d36efdSTejun Heo 		host->ops->host_stop(host);
64061aa56ccaSTejun Heo 
64071aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
64081aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
64091aa506e4STejun Heo 
64104911487aSTejun Heo 		if (!ap)
64114911487aSTejun Heo 			continue;
64124911487aSTejun Heo 
64134911487aSTejun Heo 		if (ap->scsi_host)
64141aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
64151aa506e4STejun Heo 
64164911487aSTejun Heo 		kfree(ap);
64171aa506e4STejun Heo 		host->ports[i] = NULL;
64181aa506e4STejun Heo 	}
64191aa506e4STejun Heo 
64201aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6421f0d36efdSTejun Heo }
6422f0d36efdSTejun Heo 
6423c6fd2807SJeff Garzik /**
6424f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6425f3187195STejun Heo  *	@dev: generic device this host is associated with
6426f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6427f3187195STejun Heo  *
6428f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6429f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6430f3187195STejun Heo  *	attaches it using ata_host_register().
6431f3187195STejun Heo  *
6432f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6433f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6434f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6435f3187195STejun Heo  *	ports will be automatically freed on registration.
6436f3187195STejun Heo  *
6437f3187195STejun Heo  *	RETURNS:
6438f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6439f3187195STejun Heo  *
6440f3187195STejun Heo  *	LOCKING:
6441f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6442f3187195STejun Heo  */
6443f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6444f3187195STejun Heo {
6445f3187195STejun Heo 	struct ata_host *host;
6446f3187195STejun Heo 	size_t sz;
6447f3187195STejun Heo 	int i;
6448f3187195STejun Heo 
6449f3187195STejun Heo 	DPRINTK("ENTER\n");
6450f3187195STejun Heo 
6451f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6452f3187195STejun Heo 		return NULL;
6453f3187195STejun Heo 
6454f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6455f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6456f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6457f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6458f3187195STejun Heo 	if (!host)
6459f3187195STejun Heo 		goto err_out;
6460f3187195STejun Heo 
6461f3187195STejun Heo 	devres_add(dev, host);
6462f3187195STejun Heo 	dev_set_drvdata(dev, host);
6463f3187195STejun Heo 
6464f3187195STejun Heo 	spin_lock_init(&host->lock);
6465f3187195STejun Heo 	host->dev = dev;
6466f3187195STejun Heo 	host->n_ports = max_ports;
6467f3187195STejun Heo 
6468f3187195STejun Heo 	/* allocate ports bound to this host */
6469f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6470f3187195STejun Heo 		struct ata_port *ap;
6471f3187195STejun Heo 
6472f3187195STejun Heo 		ap = ata_port_alloc(host);
6473f3187195STejun Heo 		if (!ap)
6474f3187195STejun Heo 			goto err_out;
6475f3187195STejun Heo 
6476f3187195STejun Heo 		ap->port_no = i;
6477f3187195STejun Heo 		host->ports[i] = ap;
6478f3187195STejun Heo 	}
6479f3187195STejun Heo 
6480f3187195STejun Heo 	devres_remove_group(dev, NULL);
6481f3187195STejun Heo 	return host;
6482f3187195STejun Heo 
6483f3187195STejun Heo  err_out:
6484f3187195STejun Heo 	devres_release_group(dev, NULL);
6485f3187195STejun Heo 	return NULL;
6486f3187195STejun Heo }
6487f3187195STejun Heo 
6488f3187195STejun Heo /**
6489f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6490f5cda257STejun Heo  *	@dev: generic device this host is associated with
6491f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6492f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6493f5cda257STejun Heo  *
6494f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6495f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6496f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6497f5cda257STejun Heo  *
6498f5cda257STejun Heo  *	RETURNS:
6499f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6500f5cda257STejun Heo  *
6501f5cda257STejun Heo  *	LOCKING:
6502f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6503f5cda257STejun Heo  */
6504f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6505f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6506f5cda257STejun Heo 				      int n_ports)
6507f5cda257STejun Heo {
6508f5cda257STejun Heo 	const struct ata_port_info *pi;
6509f5cda257STejun Heo 	struct ata_host *host;
6510f5cda257STejun Heo 	int i, j;
6511f5cda257STejun Heo 
6512f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6513f5cda257STejun Heo 	if (!host)
6514f5cda257STejun Heo 		return NULL;
6515f5cda257STejun Heo 
6516f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6517f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6518f5cda257STejun Heo 
6519f5cda257STejun Heo 		if (ppi[j])
6520f5cda257STejun Heo 			pi = ppi[j++];
6521f5cda257STejun Heo 
6522f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6523f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6524f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6525f5cda257STejun Heo 		ap->flags |= pi->flags;
65260c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6527f5cda257STejun Heo 		ap->ops = pi->port_ops;
6528f5cda257STejun Heo 
6529f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6530f5cda257STejun Heo 			host->ops = pi->port_ops;
6531f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6532f5cda257STejun Heo 			host->private_data = pi->private_data;
6533f5cda257STejun Heo 	}
6534f5cda257STejun Heo 
6535f5cda257STejun Heo 	return host;
6536f5cda257STejun Heo }
6537f5cda257STejun Heo 
6538f5cda257STejun Heo /**
6539ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6540ecef7253STejun Heo  *	@host: ATA host to start ports for
6541ecef7253STejun Heo  *
6542ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6543ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6544ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6545f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6546f3187195STejun Heo  *	first non-dummy port ops.
6547ecef7253STejun Heo  *
6548ecef7253STejun Heo  *	LOCKING:
6549ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6550ecef7253STejun Heo  *
6551ecef7253STejun Heo  *	RETURNS:
6552ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6553ecef7253STejun Heo  */
6554ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6555ecef7253STejun Heo {
6556ecef7253STejun Heo 	int i, rc;
6557ecef7253STejun Heo 
6558ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6559ecef7253STejun Heo 		return 0;
6560ecef7253STejun Heo 
6561ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6562ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6563ecef7253STejun Heo 
6564f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6565f3187195STejun Heo 			host->ops = ap->ops;
6566f3187195STejun Heo 
6567ecef7253STejun Heo 		if (ap->ops->port_start) {
6568ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6569ecef7253STejun Heo 			if (rc) {
6570ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6571ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6572ecef7253STejun Heo 				goto err_out;
6573ecef7253STejun Heo 			}
6574ecef7253STejun Heo 		}
6575ecef7253STejun Heo 
6576ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6577ecef7253STejun Heo 	}
6578ecef7253STejun Heo 
6579ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6580ecef7253STejun Heo 	return 0;
6581ecef7253STejun Heo 
6582ecef7253STejun Heo  err_out:
6583ecef7253STejun Heo 	while (--i >= 0) {
6584ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6585ecef7253STejun Heo 
6586ecef7253STejun Heo 		if (ap->ops->port_stop)
6587ecef7253STejun Heo 			ap->ops->port_stop(ap);
6588ecef7253STejun Heo 	}
6589ecef7253STejun Heo 	return rc;
6590ecef7253STejun Heo }
6591ecef7253STejun Heo 
6592ecef7253STejun Heo /**
6593cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6594cca3974eSJeff Garzik  *	@host:	host to initialize
6595cca3974eSJeff Garzik  *	@dev:	device host is attached to
6596cca3974eSJeff Garzik  *	@flags:	host flags
6597c6fd2807SJeff Garzik  *	@ops:	port_ops
6598c6fd2807SJeff Garzik  *
6599c6fd2807SJeff Garzik  *	LOCKING:
6600c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6601c6fd2807SJeff Garzik  *
6602c6fd2807SJeff Garzik  */
6603f3187195STejun Heo /* KILLME - the only user left is ipr */
6604cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6605cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6606c6fd2807SJeff Garzik {
6607cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6608cca3974eSJeff Garzik 	host->dev = dev;
6609cca3974eSJeff Garzik 	host->flags = flags;
6610cca3974eSJeff Garzik 	host->ops = ops;
6611c6fd2807SJeff Garzik }
6612c6fd2807SJeff Garzik 
6613c6fd2807SJeff Garzik /**
6614f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6615f3187195STejun Heo  *	@host: ATA host to register
6616f3187195STejun Heo  *	@sht: template for SCSI host
6617c6fd2807SJeff Garzik  *
6618f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6619f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6620f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6621f3187195STejun Heo  *	probe registered devices.
6622c6fd2807SJeff Garzik  *
6623c6fd2807SJeff Garzik  *	LOCKING:
6624f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6625c6fd2807SJeff Garzik  *
6626c6fd2807SJeff Garzik  *	RETURNS:
6627f3187195STejun Heo  *	0 on success, -errno otherwise.
6628c6fd2807SJeff Garzik  */
6629f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6630c6fd2807SJeff Garzik {
6631f3187195STejun Heo 	int i, rc;
6632c6fd2807SJeff Garzik 
6633f3187195STejun Heo 	/* host must have been started */
6634f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6635f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6636f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6637f3187195STejun Heo 		WARN_ON(1);
6638f3187195STejun Heo 		return -EINVAL;
663902f076aaSAlan Cox 	}
6640f0d36efdSTejun Heo 
6641f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6642f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6643f3187195STejun Heo 	 * allocation time.
6644f3187195STejun Heo 	 */
6645f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6646f3187195STejun Heo 		kfree(host->ports[i]);
6647f0d36efdSTejun Heo 
6648f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6649f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6650f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6651c6fd2807SJeff Garzik 
6652f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6653ecef7253STejun Heo 	if (rc)
6654f3187195STejun Heo 		return rc;
6655ecef7253STejun Heo 
6656fafbae87STejun Heo 	/* associate with ACPI nodes */
6657fafbae87STejun Heo 	ata_acpi_associate(host);
6658fafbae87STejun Heo 
6659f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6660cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6661cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6662f3187195STejun Heo 		unsigned long xfer_mask;
6663f3187195STejun Heo 
6664f3187195STejun Heo 		/* set SATA cable type if still unset */
6665f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6666f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6667c6fd2807SJeff Garzik 
6668c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
66694fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
6670c6fd2807SJeff Garzik 
6671cbcdd875STejun Heo 		/* print per-port info to dmesg */
6672f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6673f3187195STejun Heo 					      ap->udma_mask);
6674f3187195STejun Heo 
6675f3187195STejun Heo 		if (!ata_port_is_dummy(ap))
6676cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
6677cbcdd875STejun Heo 					"%cATA max %s %s\n",
6678a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6679f3187195STejun Heo 					ata_mode_string(xfer_mask),
6680cbcdd875STejun Heo 					ap->link.eh_info.desc);
6681f3187195STejun Heo 		else
6682f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6683c6fd2807SJeff Garzik 	}
6684c6fd2807SJeff Garzik 
6685f3187195STejun Heo 	/* perform each probe synchronously */
6686f3187195STejun Heo 	DPRINTK("probe begin\n");
6687f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6688f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6689f3187195STejun Heo 		int rc;
6690f3187195STejun Heo 
6691f3187195STejun Heo 		/* probe */
6692c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
66939af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
6694c6fd2807SJeff Garzik 			unsigned long flags;
6695c6fd2807SJeff Garzik 
6696c6fd2807SJeff Garzik 			ata_port_probe(ap);
6697c6fd2807SJeff Garzik 
6698c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6699c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6700c6fd2807SJeff Garzik 
6701f58229f8STejun Heo 			ehi->probe_mask =
6702f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
6703c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6704c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6705c6fd2807SJeff Garzik 
6706f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6707c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6708c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6709c6fd2807SJeff Garzik 
6710c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6711c6fd2807SJeff Garzik 
6712c6fd2807SJeff Garzik 			/* wait for EH to finish */
6713c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6714c6fd2807SJeff Garzik 		} else {
671544877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6716c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
671744877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6718c6fd2807SJeff Garzik 
6719c6fd2807SJeff Garzik 			if (rc) {
6720c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6721c6fd2807SJeff Garzik 				 * Current libata behavior will
6722c6fd2807SJeff Garzik 				 * tear down everything when
6723c6fd2807SJeff Garzik 				 * the module is removed
6724c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6725c6fd2807SJeff Garzik 				 */
6726c6fd2807SJeff Garzik 			}
6727c6fd2807SJeff Garzik 		}
6728c6fd2807SJeff Garzik 	}
6729c6fd2807SJeff Garzik 
6730c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6731c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6732cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6733cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6734c6fd2807SJeff Garzik 
67351ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
6736c6fd2807SJeff Garzik 	}
6737c6fd2807SJeff Garzik 
6738f3187195STejun Heo 	return 0;
6739f3187195STejun Heo }
6740f3187195STejun Heo 
6741f3187195STejun Heo /**
6742f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6743f5cda257STejun Heo  *	@host: target ATA host
6744f5cda257STejun Heo  *	@irq: IRQ to request
6745f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6746f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6747f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6748f5cda257STejun Heo  *
6749f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6750f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6751f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6752f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6753f5cda257STejun Heo  *
6754f5cda257STejun Heo  *	LOCKING:
6755f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6756f5cda257STejun Heo  *
6757f5cda257STejun Heo  *	RETURNS:
6758f5cda257STejun Heo  *	0 on success, -errno otherwise.
6759f5cda257STejun Heo  */
6760f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6761f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6762f5cda257STejun Heo 		      struct scsi_host_template *sht)
6763f5cda257STejun Heo {
6764cbcdd875STejun Heo 	int i, rc;
6765f5cda257STejun Heo 
6766f5cda257STejun Heo 	rc = ata_host_start(host);
6767f5cda257STejun Heo 	if (rc)
6768f5cda257STejun Heo 		return rc;
6769f5cda257STejun Heo 
6770f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6771f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6772f5cda257STejun Heo 	if (rc)
6773f5cda257STejun Heo 		return rc;
6774f5cda257STejun Heo 
6775cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
6776cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
67774031826bSTejun Heo 
6778f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6779f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6780f5cda257STejun Heo 	if (rc)
6781f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6782f5cda257STejun Heo 
6783f5cda257STejun Heo 	return rc;
6784f5cda257STejun Heo }
6785f5cda257STejun Heo 
6786f5cda257STejun Heo /**
6787c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6788c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6789c6fd2807SJeff Garzik  *
6790c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6791c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6792c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6793c6fd2807SJeff Garzik  *
6794c6fd2807SJeff Garzik  *	LOCKING:
6795c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6796c6fd2807SJeff Garzik  */
6797c6fd2807SJeff Garzik void ata_port_detach(struct ata_port *ap)
6798c6fd2807SJeff Garzik {
6799c6fd2807SJeff Garzik 	unsigned long flags;
680041bda9c9STejun Heo 	struct ata_link *link;
6801f58229f8STejun Heo 	struct ata_device *dev;
6802c6fd2807SJeff Garzik 
6803c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6804c6fd2807SJeff Garzik 		goto skip_eh;
6805c6fd2807SJeff Garzik 
6806c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6807c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6808c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6809c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6810c6fd2807SJeff Garzik 
6811c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6812c6fd2807SJeff Garzik 
6813c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6814c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6815c6fd2807SJeff Garzik 	 */
6816c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6817c6fd2807SJeff Garzik 
681841bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
681941bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
6820f58229f8STejun Heo 			ata_dev_disable(dev);
682141bda9c9STejun Heo 	}
6822c6fd2807SJeff Garzik 
6823c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6824c6fd2807SJeff Garzik 
6825c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6826c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6827c6fd2807SJeff Garzik 	 * target.
6828c6fd2807SJeff Garzik 	 */
6829c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6830c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6831c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6832c6fd2807SJeff Garzik 
6833c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
683445a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
6835c6fd2807SJeff Garzik 
6836c6fd2807SJeff Garzik  skip_eh:
6837c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6838cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6839c6fd2807SJeff Garzik }
6840c6fd2807SJeff Garzik 
6841c6fd2807SJeff Garzik /**
68420529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
68430529c159STejun Heo  *	@host: Host to detach
68440529c159STejun Heo  *
68450529c159STejun Heo  *	Detach all ports of @host.
68460529c159STejun Heo  *
68470529c159STejun Heo  *	LOCKING:
68480529c159STejun Heo  *	Kernel thread context (may sleep).
68490529c159STejun Heo  */
68500529c159STejun Heo void ata_host_detach(struct ata_host *host)
68510529c159STejun Heo {
68520529c159STejun Heo 	int i;
68530529c159STejun Heo 
68540529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
68550529c159STejun Heo 		ata_port_detach(host->ports[i]);
68560529c159STejun Heo }
68570529c159STejun Heo 
6858c6fd2807SJeff Garzik /**
6859c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6860c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6861c6fd2807SJeff Garzik  *
6862c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6863c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6864c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6865c6fd2807SJeff Garzik  *	relative to cmd_addr.
6866c6fd2807SJeff Garzik  *
6867c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6868c6fd2807SJeff Garzik  */
6869c6fd2807SJeff Garzik 
6870c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6871c6fd2807SJeff Garzik {
6872c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6873c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6874c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6875c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6876c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6877c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6878c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6879c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6880c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6881c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6882c6fd2807SJeff Garzik }
6883c6fd2807SJeff Garzik 
6884c6fd2807SJeff Garzik 
6885c6fd2807SJeff Garzik #ifdef CONFIG_PCI
6886c6fd2807SJeff Garzik 
6887c6fd2807SJeff Garzik /**
6888c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
6889c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
6890c6fd2807SJeff Garzik  *
6891b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
6892b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
6893b878ca5dSTejun Heo  *	release is handled via devres.
6894c6fd2807SJeff Garzik  *
6895c6fd2807SJeff Garzik  *	LOCKING:
6896c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
6897c6fd2807SJeff Garzik  */
6898c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
6899c6fd2807SJeff Garzik {
6900c6fd2807SJeff Garzik 	struct device *dev = pci_dev_to_dev(pdev);
6901cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
6902c6fd2807SJeff Garzik 
6903f0d36efdSTejun Heo 	ata_host_detach(host);
6904c6fd2807SJeff Garzik }
6905c6fd2807SJeff Garzik 
6906c6fd2807SJeff Garzik /* move to PCI subsystem */
6907c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6908c6fd2807SJeff Garzik {
6909c6fd2807SJeff Garzik 	unsigned long tmp = 0;
6910c6fd2807SJeff Garzik 
6911c6fd2807SJeff Garzik 	switch (bits->width) {
6912c6fd2807SJeff Garzik 	case 1: {
6913c6fd2807SJeff Garzik 		u8 tmp8 = 0;
6914c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
6915c6fd2807SJeff Garzik 		tmp = tmp8;
6916c6fd2807SJeff Garzik 		break;
6917c6fd2807SJeff Garzik 	}
6918c6fd2807SJeff Garzik 	case 2: {
6919c6fd2807SJeff Garzik 		u16 tmp16 = 0;
6920c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
6921c6fd2807SJeff Garzik 		tmp = tmp16;
6922c6fd2807SJeff Garzik 		break;
6923c6fd2807SJeff Garzik 	}
6924c6fd2807SJeff Garzik 	case 4: {
6925c6fd2807SJeff Garzik 		u32 tmp32 = 0;
6926c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
6927c6fd2807SJeff Garzik 		tmp = tmp32;
6928c6fd2807SJeff Garzik 		break;
6929c6fd2807SJeff Garzik 	}
6930c6fd2807SJeff Garzik 
6931c6fd2807SJeff Garzik 	default:
6932c6fd2807SJeff Garzik 		return -EINVAL;
6933c6fd2807SJeff Garzik 	}
6934c6fd2807SJeff Garzik 
6935c6fd2807SJeff Garzik 	tmp &= bits->mask;
6936c6fd2807SJeff Garzik 
6937c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
6938c6fd2807SJeff Garzik }
6939c6fd2807SJeff Garzik 
69406ffa01d8STejun Heo #ifdef CONFIG_PM
6941c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6942c6fd2807SJeff Garzik {
6943c6fd2807SJeff Garzik 	pci_save_state(pdev);
6944c6fd2807SJeff Garzik 	pci_disable_device(pdev);
69454c90d971STejun Heo 
69464c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
6947c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
6948c6fd2807SJeff Garzik }
6949c6fd2807SJeff Garzik 
6950553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
6951c6fd2807SJeff Garzik {
6952553c4aa6STejun Heo 	int rc;
6953553c4aa6STejun Heo 
6954c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
6955c6fd2807SJeff Garzik 	pci_restore_state(pdev);
6956553c4aa6STejun Heo 
6957f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
6958553c4aa6STejun Heo 	if (rc) {
6959553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
6960553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
6961553c4aa6STejun Heo 		return rc;
6962553c4aa6STejun Heo 	}
6963553c4aa6STejun Heo 
6964c6fd2807SJeff Garzik 	pci_set_master(pdev);
6965553c4aa6STejun Heo 	return 0;
6966c6fd2807SJeff Garzik }
6967c6fd2807SJeff Garzik 
6968c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6969c6fd2807SJeff Garzik {
6970cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6971c6fd2807SJeff Garzik 	int rc = 0;
6972c6fd2807SJeff Garzik 
6973cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
6974c6fd2807SJeff Garzik 	if (rc)
6975c6fd2807SJeff Garzik 		return rc;
6976c6fd2807SJeff Garzik 
6977c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
6978c6fd2807SJeff Garzik 
6979c6fd2807SJeff Garzik 	return 0;
6980c6fd2807SJeff Garzik }
6981c6fd2807SJeff Garzik 
6982c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
6983c6fd2807SJeff Garzik {
6984cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6985553c4aa6STejun Heo 	int rc;
6986c6fd2807SJeff Garzik 
6987553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
6988553c4aa6STejun Heo 	if (rc == 0)
6989cca3974eSJeff Garzik 		ata_host_resume(host);
6990553c4aa6STejun Heo 	return rc;
6991c6fd2807SJeff Garzik }
69926ffa01d8STejun Heo #endif /* CONFIG_PM */
69936ffa01d8STejun Heo 
6994c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
6995c6fd2807SJeff Garzik 
6996c6fd2807SJeff Garzik 
6997c6fd2807SJeff Garzik static int __init ata_init(void)
6998c6fd2807SJeff Garzik {
6999c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7000c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7001c6fd2807SJeff Garzik 	if (!ata_wq)
7002c6fd2807SJeff Garzik 		return -ENOMEM;
7003c6fd2807SJeff Garzik 
7004c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7005c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7006c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7007c6fd2807SJeff Garzik 		return -ENOMEM;
7008c6fd2807SJeff Garzik 	}
7009c6fd2807SJeff Garzik 
7010c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7011c6fd2807SJeff Garzik 	return 0;
7012c6fd2807SJeff Garzik }
7013c6fd2807SJeff Garzik 
7014c6fd2807SJeff Garzik static void __exit ata_exit(void)
7015c6fd2807SJeff Garzik {
7016c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7017c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7018c6fd2807SJeff Garzik }
7019c6fd2807SJeff Garzik 
7020a4625085SBrian King subsys_initcall(ata_init);
7021c6fd2807SJeff Garzik module_exit(ata_exit);
7022c6fd2807SJeff Garzik 
7023c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7024c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7025c6fd2807SJeff Garzik 
7026c6fd2807SJeff Garzik int ata_ratelimit(void)
7027c6fd2807SJeff Garzik {
7028c6fd2807SJeff Garzik 	int rc;
7029c6fd2807SJeff Garzik 	unsigned long flags;
7030c6fd2807SJeff Garzik 
7031c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7032c6fd2807SJeff Garzik 
7033c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7034c6fd2807SJeff Garzik 		rc = 1;
7035c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7036c6fd2807SJeff Garzik 	} else
7037c6fd2807SJeff Garzik 		rc = 0;
7038c6fd2807SJeff Garzik 
7039c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7040c6fd2807SJeff Garzik 
7041c6fd2807SJeff Garzik 	return rc;
7042c6fd2807SJeff Garzik }
7043c6fd2807SJeff Garzik 
7044c6fd2807SJeff Garzik /**
7045c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7046c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7047c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7048c6fd2807SJeff Garzik  *	@val: Wait condition
7049c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7050c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7051c6fd2807SJeff Garzik  *
7052c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7053c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7054c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7055c6fd2807SJeff Garzik  *
7056c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7057c6fd2807SJeff Garzik  *
7058c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7059c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7060c6fd2807SJeff Garzik  *
7061c6fd2807SJeff Garzik  *	LOCKING:
7062c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7063c6fd2807SJeff Garzik  *
7064c6fd2807SJeff Garzik  *	RETURNS:
7065c6fd2807SJeff Garzik  *	The final register value.
7066c6fd2807SJeff Garzik  */
7067c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7068c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7069c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7070c6fd2807SJeff Garzik {
7071c6fd2807SJeff Garzik 	unsigned long timeout;
7072c6fd2807SJeff Garzik 	u32 tmp;
7073c6fd2807SJeff Garzik 
7074c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7075c6fd2807SJeff Garzik 
7076c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7077c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7078c6fd2807SJeff Garzik 	 * eat away the timeout.
7079c6fd2807SJeff Garzik 	 */
7080c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7081c6fd2807SJeff Garzik 
7082c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7083c6fd2807SJeff Garzik 		msleep(interval_msec);
7084c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7085c6fd2807SJeff Garzik 	}
7086c6fd2807SJeff Garzik 
7087c6fd2807SJeff Garzik 	return tmp;
7088c6fd2807SJeff Garzik }
7089c6fd2807SJeff Garzik 
7090c6fd2807SJeff Garzik /*
7091c6fd2807SJeff Garzik  * Dummy port_ops
7092c6fd2807SJeff Garzik  */
7093c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7094c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7095c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7096c6fd2807SJeff Garzik 
7097c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7098c6fd2807SJeff Garzik {
7099c6fd2807SJeff Garzik 	return ATA_DRDY;
7100c6fd2807SJeff Garzik }
7101c6fd2807SJeff Garzik 
7102c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7103c6fd2807SJeff Garzik {
7104c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7105c6fd2807SJeff Garzik }
7106c6fd2807SJeff Garzik 
7107c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7108c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7109c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7110c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7111c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7112c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7113c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7114c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7115c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7116c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7117c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7118c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7119c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7120c6fd2807SJeff Garzik };
7121c6fd2807SJeff Garzik 
712221b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
712321b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
712421b0ad4fSTejun Heo };
712521b0ad4fSTejun Heo 
7126c6fd2807SJeff Garzik /*
7127c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7128c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7129c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7130c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7131c6fd2807SJeff Garzik  */
7132c6fd2807SJeff Garzik 
7133c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7134c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7135c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7136c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
713721b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7138c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7139c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7140cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7141f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7142f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7143ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7144f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7145f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
71460529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7147c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7148c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7149c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7150c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7151c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7152c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7153c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7154c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7155c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7156c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
715743727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7158c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7159c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7160c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7161c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7162c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7163c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7164d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7165c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
716604351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
71670d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
71680d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
716931cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7170c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7171d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7172c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7173c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7174c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7175c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7176c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7177c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7178c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7179c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7180c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7181c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7182c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7183c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
718410305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7185c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7186936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7187936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7188c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
7189c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
7190c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7191c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7192c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7193cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7194c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7195c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7196c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7197c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7198c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7199c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7200c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7201c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
7202d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7203c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7204c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7205c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7206c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7207c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7208c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7209c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7210c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7211c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7212c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7213c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7214936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7215936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
72166ffa01d8STejun Heo #ifdef CONFIG_PM
7217cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7218cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
72196ffa01d8STejun Heo #endif /* CONFIG_PM */
7220c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7221c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
722210305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7223c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7224c6fd2807SJeff Garzik 
7225c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7226c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7227c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7228c6fd2807SJeff Garzik 
7229c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7230c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7231d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
72321626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7233d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7234c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7235c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
72366ffa01d8STejun Heo #ifdef CONFIG_PM
7237c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7238c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7239c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7240c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
72416ffa01d8STejun Heo #endif /* CONFIG_PM */
7242c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7243c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7244c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7245c6fd2807SJeff Garzik 
7246b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7247b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7248b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7249cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7250cbcdd875STejun Heo #ifdef CONFIG_PCI
7251cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7252cbcdd875STejun Heo #endif /* CONFIG_PCI */
7253c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
7254c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7255dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7256c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7257c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
72587d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7259c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7260c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7261c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7262c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7263c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
726483625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7265a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7266be0d18dfSAlan Cox 
7267be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7268be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7269be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7270be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7271