xref: /openbmc/linux/drivers/ata/libata-core.c (revision 88ff6eaf)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
522dcb407eSJeff Garzik #include <linux/io.h>
53c6fd2807SJeff Garzik #include <scsi/scsi.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
55c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
56c6fd2807SJeff Garzik #include <linux/libata.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
62fda0efc5SJeff Garzik 
63c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
64c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
65c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
67c6fd2807SJeff Garzik 
68c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
69c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
70c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
72218f3d30SJeff Garzik 					u8 enable, u8 feature);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
7475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75c6fd2807SJeff Garzik 
76f3187195STejun Heo unsigned int ata_print_id = 1;
77c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
78c6fd2807SJeff Garzik 
79c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
80c6fd2807SJeff Garzik 
81c6fd2807SJeff Garzik int atapi_enabled = 1;
82c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
83c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84c6fd2807SJeff Garzik 
85c6fd2807SJeff Garzik int atapi_dmadir = 0;
86c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
87c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88c6fd2807SJeff Garzik 
89baf4fdfaSMark Lord int atapi_passthru16 = 1;
90baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
91baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92baf4fdfaSMark Lord 
93c6fd2807SJeff Garzik int libata_fua = 0;
94c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
95c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96c6fd2807SJeff Garzik 
972dcb407eSJeff Garzik static int ata_ignore_hpa;
981e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
991e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1001e999736SAlan Cox 
101b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
103b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104b3a70601SAlan Cox 
105c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
107c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108c6fd2807SJeff Garzik 
1096ebe9d86SJeff Garzik int libata_noacpi = 0;
110d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1116ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11211ef697bSKristen Carlson Accardi 
113c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
114c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
115c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
116c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
117c6fd2807SJeff Garzik 
118c6fd2807SJeff Garzik 
119c6fd2807SJeff Garzik /**
120c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
122c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1239977126cSTejun Heo  *	@is_cmd: This FIS is for command
1249977126cSTejun Heo  *	@fis: Buffer into which data will output
125c6fd2807SJeff Garzik  *
126c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
127c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
128c6fd2807SJeff Garzik  *
129c6fd2807SJeff Garzik  *	LOCKING:
130c6fd2807SJeff Garzik  *	Inherited from caller.
131c6fd2807SJeff Garzik  */
1329977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
133c6fd2807SJeff Garzik {
134c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1359977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1369977126cSTejun Heo 	if (is_cmd)
1379977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1389977126cSTejun Heo 
139c6fd2807SJeff Garzik 	fis[2] = tf->command;
140c6fd2807SJeff Garzik 	fis[3] = tf->feature;
141c6fd2807SJeff Garzik 
142c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
143c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
144c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
145c6fd2807SJeff Garzik 	fis[7] = tf->device;
146c6fd2807SJeff Garzik 
147c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
148c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
149c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
150c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
153c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
154c6fd2807SJeff Garzik 	fis[14] = 0;
155c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
156c6fd2807SJeff Garzik 
157c6fd2807SJeff Garzik 	fis[16] = 0;
158c6fd2807SJeff Garzik 	fis[17] = 0;
159c6fd2807SJeff Garzik 	fis[18] = 0;
160c6fd2807SJeff Garzik 	fis[19] = 0;
161c6fd2807SJeff Garzik }
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik /**
164c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
166c6fd2807SJeff Garzik  *	@tf: Taskfile to output
167c6fd2807SJeff Garzik  *
168c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
169c6fd2807SJeff Garzik  *
170c6fd2807SJeff Garzik  *	LOCKING:
171c6fd2807SJeff Garzik  *	Inherited from caller.
172c6fd2807SJeff Garzik  */
173c6fd2807SJeff Garzik 
174c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
175c6fd2807SJeff Garzik {
176c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
177c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
178c6fd2807SJeff Garzik 
179c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
180c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
181c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
182c6fd2807SJeff Garzik 	tf->device	= fis[7];
183c6fd2807SJeff Garzik 
184c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
185c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
186c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
187c6fd2807SJeff Garzik 
188c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
189c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
190c6fd2807SJeff Garzik }
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
193c6fd2807SJeff Garzik 	/* pio multi */
194c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
195c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
196c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
197c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	0,
201c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
202c6fd2807SJeff Garzik 	/* pio */
203c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
204c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
205c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
206c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	0,
209c6fd2807SJeff Garzik 	0,
210c6fd2807SJeff Garzik 	0,
211c6fd2807SJeff Garzik 	/* dma */
212c6fd2807SJeff Garzik 	ATA_CMD_READ,
213c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
214c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
215c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
216c6fd2807SJeff Garzik 	0,
217c6fd2807SJeff Garzik 	0,
218c6fd2807SJeff Garzik 	0,
219c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
220c6fd2807SJeff Garzik };
221c6fd2807SJeff Garzik 
222c6fd2807SJeff Garzik /**
223c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
224bd056d7eSTejun Heo  *	@tf: command to examine and configure
225bd056d7eSTejun Heo  *	@dev: device tf belongs to
226c6fd2807SJeff Garzik  *
227c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
228c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
229c6fd2807SJeff Garzik  *
230c6fd2807SJeff Garzik  *	LOCKING:
231c6fd2807SJeff Garzik  *	caller.
232c6fd2807SJeff Garzik  */
233bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
234c6fd2807SJeff Garzik {
235c6fd2807SJeff Garzik 	u8 cmd;
236c6fd2807SJeff Garzik 
237c6fd2807SJeff Garzik 	int index, fua, lba48, write;
238c6fd2807SJeff Garzik 
239c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
240c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
242c6fd2807SJeff Garzik 
243c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
244c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
245c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2469af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
247c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
248c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
249c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
250c6fd2807SJeff Garzik 	} else {
251c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
252c6fd2807SJeff Garzik 		index = 16;
253c6fd2807SJeff Garzik 	}
254c6fd2807SJeff Garzik 
255c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
256c6fd2807SJeff Garzik 	if (cmd) {
257c6fd2807SJeff Garzik 		tf->command = cmd;
258c6fd2807SJeff Garzik 		return 0;
259c6fd2807SJeff Garzik 	}
260c6fd2807SJeff Garzik 	return -1;
261c6fd2807SJeff Garzik }
262c6fd2807SJeff Garzik 
263c6fd2807SJeff Garzik /**
26435b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
26535b649feSTejun Heo  *	@tf: ATA taskfile of interest
26635b649feSTejun Heo  *	@dev: ATA device @tf belongs to
26735b649feSTejun Heo  *
26835b649feSTejun Heo  *	LOCKING:
26935b649feSTejun Heo  *	None.
27035b649feSTejun Heo  *
27135b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
27235b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
27335b649feSTejun Heo  *	flags select the address format to use.
27435b649feSTejun Heo  *
27535b649feSTejun Heo  *	RETURNS:
27635b649feSTejun Heo  *	Block address read from @tf.
27735b649feSTejun Heo  */
27835b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
27935b649feSTejun Heo {
28035b649feSTejun Heo 	u64 block = 0;
28135b649feSTejun Heo 
28235b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
28335b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
28435b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
28535b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
28635b649feSTejun Heo 			block |= tf->hob_lbal << 24;
28735b649feSTejun Heo 		} else
28835b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
28935b649feSTejun Heo 
29035b649feSTejun Heo 		block |= tf->lbah << 16;
29135b649feSTejun Heo 		block |= tf->lbam << 8;
29235b649feSTejun Heo 		block |= tf->lbal;
29335b649feSTejun Heo 	} else {
29435b649feSTejun Heo 		u32 cyl, head, sect;
29535b649feSTejun Heo 
29635b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
29735b649feSTejun Heo 		head = tf->device & 0xf;
29835b649feSTejun Heo 		sect = tf->lbal;
29935b649feSTejun Heo 
30035b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
30135b649feSTejun Heo 	}
30235b649feSTejun Heo 
30335b649feSTejun Heo 	return block;
30435b649feSTejun Heo }
30535b649feSTejun Heo 
30635b649feSTejun Heo /**
307bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
308bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
309bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
310bd056d7eSTejun Heo  *	@block: Block address
311bd056d7eSTejun Heo  *	@n_block: Number of blocks
312bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
313bd056d7eSTejun Heo  *	@tag: tag
314bd056d7eSTejun Heo  *
315bd056d7eSTejun Heo  *	LOCKING:
316bd056d7eSTejun Heo  *	None.
317bd056d7eSTejun Heo  *
318bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
319bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
320bd056d7eSTejun Heo  *
321bd056d7eSTejun Heo  *	RETURNS:
322bd056d7eSTejun Heo  *
323bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
324bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
325bd056d7eSTejun Heo  */
326bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
328bd056d7eSTejun Heo 		    unsigned int tag)
329bd056d7eSTejun Heo {
330bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331bd056d7eSTejun Heo 	tf->flags |= tf_flags;
332bd056d7eSTejun Heo 
3336d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
334bd056d7eSTejun Heo 		/* yay, NCQ */
335bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
336bd056d7eSTejun Heo 			return -ERANGE;
337bd056d7eSTejun Heo 
338bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
339bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340bd056d7eSTejun Heo 
341bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
342bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
343bd056d7eSTejun Heo 		else
344bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->nsect = tag << 3;
347bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
348bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
349bd056d7eSTejun Heo 
350bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
351bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
352bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
353bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
354bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
355bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
356bd056d7eSTejun Heo 
357bd056d7eSTejun Heo 		tf->device = 1 << 6;
358bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
359bd056d7eSTejun Heo 			tf->device |= 1 << 7;
360bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
361bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
364bd056d7eSTejun Heo 			/* use LBA28 */
365bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
366bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
367bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
368bd056d7eSTejun Heo 				return -ERANGE;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 			/* use LBA48 */
371bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
372bd056d7eSTejun Heo 
373bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
374bd056d7eSTejun Heo 
375bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
376bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
377bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
378bd056d7eSTejun Heo 		} else
379bd056d7eSTejun Heo 			/* request too large even for LBA48 */
380bd056d7eSTejun Heo 			return -ERANGE;
381bd056d7eSTejun Heo 
382bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383bd056d7eSTejun Heo 			return -EINVAL;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
386bd056d7eSTejun Heo 
387bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
388bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
389bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
390bd056d7eSTejun Heo 
391bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
392bd056d7eSTejun Heo 	} else {
393bd056d7eSTejun Heo 		/* CHS */
394bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
395bd056d7eSTejun Heo 
396bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
397bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
398bd056d7eSTejun Heo 			return -ERANGE;
399bd056d7eSTejun Heo 
400bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401bd056d7eSTejun Heo 			return -EINVAL;
402bd056d7eSTejun Heo 
403bd056d7eSTejun Heo 		/* Convert LBA to CHS */
404bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
405bd056d7eSTejun Heo 		cyl   = track / dev->heads;
406bd056d7eSTejun Heo 		head  = track % dev->heads;
407bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
411bd056d7eSTejun Heo 
412bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
413bd056d7eSTejun Heo 		   Cylinder: 0-65535
414bd056d7eSTejun Heo 		   Head: 0-15
415bd056d7eSTejun Heo 		   Sector: 1-255*/
416bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417bd056d7eSTejun Heo 			return -ERANGE;
418bd056d7eSTejun Heo 
419bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420bd056d7eSTejun Heo 		tf->lbal = sect;
421bd056d7eSTejun Heo 		tf->lbam = cyl;
422bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
423bd056d7eSTejun Heo 		tf->device |= head;
424bd056d7eSTejun Heo 	}
425bd056d7eSTejun Heo 
426bd056d7eSTejun Heo 	return 0;
427bd056d7eSTejun Heo }
428bd056d7eSTejun Heo 
429bd056d7eSTejun Heo /**
430c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
432c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
433c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
434c6fd2807SJeff Garzik  *
435c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
437c6fd2807SJeff Garzik  *
438c6fd2807SJeff Garzik  *	LOCKING:
439c6fd2807SJeff Garzik  *	None.
440c6fd2807SJeff Garzik  *
441c6fd2807SJeff Garzik  *	RETURNS:
442c6fd2807SJeff Garzik  *	Packed xfer_mask.
443c6fd2807SJeff Garzik  */
444c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
446c6fd2807SJeff Garzik 				      unsigned int udma_mask)
447c6fd2807SJeff Garzik {
448c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451c6fd2807SJeff Garzik }
452c6fd2807SJeff Garzik 
453c6fd2807SJeff Garzik /**
454c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
456c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
457c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
458c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
459c6fd2807SJeff Garzik  *
460c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
462c6fd2807SJeff Garzik  */
463c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
464c6fd2807SJeff Garzik 				unsigned int *pio_mask,
465c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
466c6fd2807SJeff Garzik 				unsigned int *udma_mask)
467c6fd2807SJeff Garzik {
468c6fd2807SJeff Garzik 	if (pio_mask)
469c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470c6fd2807SJeff Garzik 	if (mwdma_mask)
471c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472c6fd2807SJeff Garzik 	if (udma_mask)
473c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474c6fd2807SJeff Garzik }
475c6fd2807SJeff Garzik 
476c6fd2807SJeff Garzik static const struct ata_xfer_ent {
477c6fd2807SJeff Garzik 	int shift, bits;
478c6fd2807SJeff Garzik 	u8 base;
479c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
480c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483c6fd2807SJeff Garzik 	{ -1, },
484c6fd2807SJeff Garzik };
485c6fd2807SJeff Garzik 
486c6fd2807SJeff Garzik /**
487c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
489c6fd2807SJeff Garzik  *
490c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
491c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
492c6fd2807SJeff Garzik  *
493c6fd2807SJeff Garzik  *	LOCKING:
494c6fd2807SJeff Garzik  *	None.
495c6fd2807SJeff Garzik  *
496c6fd2807SJeff Garzik  *	RETURNS:
497c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
498c6fd2807SJeff Garzik  */
499c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500c6fd2807SJeff Garzik {
501c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
502c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
503c6fd2807SJeff Garzik 
504c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
507c6fd2807SJeff Garzik 	return 0;
508c6fd2807SJeff Garzik }
509c6fd2807SJeff Garzik 
510c6fd2807SJeff Garzik /**
511c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
513c6fd2807SJeff Garzik  *
514c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
515c6fd2807SJeff Garzik  *
516c6fd2807SJeff Garzik  *	LOCKING:
517c6fd2807SJeff Garzik  *	None.
518c6fd2807SJeff Garzik  *
519c6fd2807SJeff Garzik  *	RETURNS:
520c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
521c6fd2807SJeff Garzik  */
522c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523c6fd2807SJeff Garzik {
524c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
525c6fd2807SJeff Garzik 
526c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
529c6fd2807SJeff Garzik 	return 0;
530c6fd2807SJeff Garzik }
531c6fd2807SJeff Garzik 
532c6fd2807SJeff Garzik /**
533c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
535c6fd2807SJeff Garzik  *
536c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
537c6fd2807SJeff Garzik  *
538c6fd2807SJeff Garzik  *	LOCKING:
539c6fd2807SJeff Garzik  *	None.
540c6fd2807SJeff Garzik  *
541c6fd2807SJeff Garzik  *	RETURNS:
542c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
543c6fd2807SJeff Garzik  */
544c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
545c6fd2807SJeff Garzik {
546c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
547c6fd2807SJeff Garzik 
548c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550c6fd2807SJeff Garzik 			return ent->shift;
551c6fd2807SJeff Garzik 	return -1;
552c6fd2807SJeff Garzik }
553c6fd2807SJeff Garzik 
554c6fd2807SJeff Garzik /**
555c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
556c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
557c6fd2807SJeff Garzik  *
558c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
559c6fd2807SJeff Garzik  *	(highest bit in @modemask).
560c6fd2807SJeff Garzik  *
561c6fd2807SJeff Garzik  *	LOCKING:
562c6fd2807SJeff Garzik  *	None.
563c6fd2807SJeff Garzik  *
564c6fd2807SJeff Garzik  *	RETURNS:
565c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
566c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
567c6fd2807SJeff Garzik  */
568c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
569c6fd2807SJeff Garzik {
570c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
571c6fd2807SJeff Garzik 		"PIO0",
572c6fd2807SJeff Garzik 		"PIO1",
573c6fd2807SJeff Garzik 		"PIO2",
574c6fd2807SJeff Garzik 		"PIO3",
575c6fd2807SJeff Garzik 		"PIO4",
576b352e57dSAlan Cox 		"PIO5",
577b352e57dSAlan Cox 		"PIO6",
578c6fd2807SJeff Garzik 		"MWDMA0",
579c6fd2807SJeff Garzik 		"MWDMA1",
580c6fd2807SJeff Garzik 		"MWDMA2",
581b352e57dSAlan Cox 		"MWDMA3",
582b352e57dSAlan Cox 		"MWDMA4",
583c6fd2807SJeff Garzik 		"UDMA/16",
584c6fd2807SJeff Garzik 		"UDMA/25",
585c6fd2807SJeff Garzik 		"UDMA/33",
586c6fd2807SJeff Garzik 		"UDMA/44",
587c6fd2807SJeff Garzik 		"UDMA/66",
588c6fd2807SJeff Garzik 		"UDMA/100",
589c6fd2807SJeff Garzik 		"UDMA/133",
590c6fd2807SJeff Garzik 		"UDMA7",
591c6fd2807SJeff Garzik 	};
592c6fd2807SJeff Garzik 	int highbit;
593c6fd2807SJeff Garzik 
594c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
595c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
597c6fd2807SJeff Garzik 	return "<n/a>";
598c6fd2807SJeff Garzik }
599c6fd2807SJeff Garzik 
600c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
601c6fd2807SJeff Garzik {
602c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
603c6fd2807SJeff Garzik 		"1.5 Gbps",
604c6fd2807SJeff Garzik 		"3.0 Gbps",
605c6fd2807SJeff Garzik 	};
606c6fd2807SJeff Garzik 
607c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608c6fd2807SJeff Garzik 		return "<unknown>";
609c6fd2807SJeff Garzik 	return spd_str[spd - 1];
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
613c6fd2807SJeff Garzik {
61409d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6159af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
616c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6174ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6184ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
619c6fd2807SJeff Garzik 		dev->class++;
620c6fd2807SJeff Garzik 	}
621c6fd2807SJeff Garzik }
622c6fd2807SJeff Garzik 
623c6fd2807SJeff Garzik /**
624c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
625c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
626c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
627c6fd2807SJeff Garzik  *
6280d5ff566STejun Heo  *	This technique was originally described in
6290d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6300d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6310d5ff566STejun Heo  *
6320d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6330d5ff566STejun Heo  *	and if a device is present, it will respond by
6340d5ff566STejun Heo  *	correctly storing and echoing back the
6350d5ff566STejun Heo  *	ATA shadow register contents.
636c6fd2807SJeff Garzik  *
637c6fd2807SJeff Garzik  *	LOCKING:
638c6fd2807SJeff Garzik  *	caller.
639c6fd2807SJeff Garzik  */
640c6fd2807SJeff Garzik 
6410d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
642c6fd2807SJeff Garzik {
6430d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6440d5ff566STejun Heo 	u8 nsect, lbal;
6450d5ff566STejun Heo 
6460d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6470d5ff566STejun Heo 
6480d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6490d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6500d5ff566STejun Heo 
6510d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6520d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6530d5ff566STejun Heo 
6540d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6550d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6560d5ff566STejun Heo 
6570d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6580d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6590d5ff566STejun Heo 
6600d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6610d5ff566STejun Heo 		return 1;	/* we found a device */
6620d5ff566STejun Heo 
6630d5ff566STejun Heo 	return 0;		/* nothing found */
664c6fd2807SJeff Garzik }
665c6fd2807SJeff Garzik 
666c6fd2807SJeff Garzik /**
667c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
668c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
669c6fd2807SJeff Garzik  *
670c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
671c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
672c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
673c6fd2807SJeff Garzik  *
674c6fd2807SJeff Garzik  *	LOCKING:
675c6fd2807SJeff Garzik  *	None.
676c6fd2807SJeff Garzik  *
677c6fd2807SJeff Garzik  *	RETURNS:
678633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
679633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
680c6fd2807SJeff Garzik  */
681c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
682c6fd2807SJeff Garzik {
683c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
684c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
685c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
686633273a3STejun Heo 	 *
687633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
688633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
689633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
690633273a3STejun Heo 	 * spec has never mentioned about using different signatures
691633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
692633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
693633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
694633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
695633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
696633273a3STejun Heo 	 * SerialATA.
697633273a3STejun Heo 	 *
698633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
699633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
700c6fd2807SJeff Garzik 	 */
701633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
702c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
703c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
704c6fd2807SJeff Garzik 	}
705c6fd2807SJeff Garzik 
706633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
707c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
708c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
709c6fd2807SJeff Garzik 	}
710c6fd2807SJeff Garzik 
711633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
712633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
713633273a3STejun Heo 		return ATA_DEV_PMP;
714633273a3STejun Heo 	}
715633273a3STejun Heo 
716633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
7172dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
718633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
719633273a3STejun Heo 	}
720633273a3STejun Heo 
721c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
722c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
723c6fd2807SJeff Garzik }
724c6fd2807SJeff Garzik 
725c6fd2807SJeff Garzik /**
726c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
7273f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
7283f19859eSTejun Heo  *	@present: device seems present
729c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
730c6fd2807SJeff Garzik  *
731c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
732c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
733c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
734c6fd2807SJeff Garzik  *	and diagnostics.
735c6fd2807SJeff Garzik  *
736c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
737c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
738c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
739c6fd2807SJeff Garzik  *
740c6fd2807SJeff Garzik  *	LOCKING:
741c6fd2807SJeff Garzik  *	caller.
742c6fd2807SJeff Garzik  *
743c6fd2807SJeff Garzik  *	RETURNS:
744c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
745c6fd2807SJeff Garzik  */
7463f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
7473f19859eSTejun Heo 				  u8 *r_err)
748c6fd2807SJeff Garzik {
7493f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
750c6fd2807SJeff Garzik 	struct ata_taskfile tf;
751c6fd2807SJeff Garzik 	unsigned int class;
752c6fd2807SJeff Garzik 	u8 err;
753c6fd2807SJeff Garzik 
7543f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
755c6fd2807SJeff Garzik 
756c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
757c6fd2807SJeff Garzik 
758c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
759c6fd2807SJeff Garzik 	err = tf.feature;
760c6fd2807SJeff Garzik 	if (r_err)
761c6fd2807SJeff Garzik 		*r_err = err;
762c6fd2807SJeff Garzik 
76393590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
7643f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
76593590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
7663f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
76793590859SAlan Cox 	else if (err == 1)
768c6fd2807SJeff Garzik 		/* do nothing */ ;
7693f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
770c6fd2807SJeff Garzik 		/* do nothing */ ;
771c6fd2807SJeff Garzik 	else
772c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
773c6fd2807SJeff Garzik 
774c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
775c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
776c6fd2807SJeff Garzik 
777d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
778d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
779d7fbee05STejun Heo 		 * have reported incorrect device signature too.
780d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
781d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
782d7fbee05STejun Heo 		 * failure.
783d7fbee05STejun Heo 		 */
784d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
785d7fbee05STejun Heo 			class = ATA_DEV_ATA;
786d7fbee05STejun Heo 		else
787d7fbee05STejun Heo 			class = ATA_DEV_NONE;
788d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
789d7fbee05STejun Heo 		class = ATA_DEV_NONE;
790d7fbee05STejun Heo 
791c6fd2807SJeff Garzik 	return class;
792c6fd2807SJeff Garzik }
793c6fd2807SJeff Garzik 
794c6fd2807SJeff Garzik /**
795c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
796c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
797c6fd2807SJeff Garzik  *	@s: string into which data is output
798c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
799c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
800c6fd2807SJeff Garzik  *
801c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
802c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
803c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
804c6fd2807SJeff Garzik  *
805c6fd2807SJeff Garzik  *	LOCKING:
806c6fd2807SJeff Garzik  *	caller.
807c6fd2807SJeff Garzik  */
808c6fd2807SJeff Garzik 
809c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
810c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
811c6fd2807SJeff Garzik {
812c6fd2807SJeff Garzik 	unsigned int c;
813c6fd2807SJeff Garzik 
814c6fd2807SJeff Garzik 	while (len > 0) {
815c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
816c6fd2807SJeff Garzik 		*s = c;
817c6fd2807SJeff Garzik 		s++;
818c6fd2807SJeff Garzik 
819c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
820c6fd2807SJeff Garzik 		*s = c;
821c6fd2807SJeff Garzik 		s++;
822c6fd2807SJeff Garzik 
823c6fd2807SJeff Garzik 		ofs++;
824c6fd2807SJeff Garzik 		len -= 2;
825c6fd2807SJeff Garzik 	}
826c6fd2807SJeff Garzik }
827c6fd2807SJeff Garzik 
828c6fd2807SJeff Garzik /**
829c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
830c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
831c6fd2807SJeff Garzik  *	@s: string into which data is output
832c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
833c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
834c6fd2807SJeff Garzik  *
835c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
836c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
837c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
838c6fd2807SJeff Garzik  *
839c6fd2807SJeff Garzik  *	LOCKING:
840c6fd2807SJeff Garzik  *	caller.
841c6fd2807SJeff Garzik  */
842c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
843c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
844c6fd2807SJeff Garzik {
845c6fd2807SJeff Garzik 	unsigned char *p;
846c6fd2807SJeff Garzik 
847c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
848c6fd2807SJeff Garzik 
849c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
850c6fd2807SJeff Garzik 
851c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
852c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
853c6fd2807SJeff Garzik 		p--;
854c6fd2807SJeff Garzik 	*p = '\0';
855c6fd2807SJeff Garzik }
856c6fd2807SJeff Garzik 
857db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
858db6f8759STejun Heo {
859db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
860db6f8759STejun Heo 		if (ata_id_has_lba48(id))
861db6f8759STejun Heo 			return ata_id_u64(id, 100);
862db6f8759STejun Heo 		else
863db6f8759STejun Heo 			return ata_id_u32(id, 60);
864db6f8759STejun Heo 	} else {
865db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
866db6f8759STejun Heo 			return ata_id_u32(id, 57);
867db6f8759STejun Heo 		else
868db6f8759STejun Heo 			return id[1] * id[3] * id[6];
869db6f8759STejun Heo 	}
870db6f8759STejun Heo }
871db6f8759STejun Heo 
8721e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8731e999736SAlan Cox {
8741e999736SAlan Cox 	u64 sectors = 0;
8751e999736SAlan Cox 
8761e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8771e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8781e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8791e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8801e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8811e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8821e999736SAlan Cox 
8831e999736SAlan Cox 	return ++sectors;
8841e999736SAlan Cox }
8851e999736SAlan Cox 
8861e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8871e999736SAlan Cox {
8881e999736SAlan Cox 	u64 sectors = 0;
8891e999736SAlan Cox 
8901e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8911e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8921e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8931e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8941e999736SAlan Cox 
8951e999736SAlan Cox 	return ++sectors;
8961e999736SAlan Cox }
8971e999736SAlan Cox 
8981e999736SAlan Cox /**
899c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
900c728a914STejun Heo  *	@dev: target device
901c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
9021e999736SAlan Cox  *
903c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
904c728a914STejun Heo  *	question.
905c728a914STejun Heo  *
906c728a914STejun Heo  *	RETURNS:
907c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
908c728a914STejun Heo  *	-EIO on other errors.
9091e999736SAlan Cox  */
910c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
9111e999736SAlan Cox {
912c728a914STejun Heo 	unsigned int err_mask;
9131e999736SAlan Cox 	struct ata_taskfile tf;
914c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
9151e999736SAlan Cox 
9161e999736SAlan Cox 	ata_tf_init(dev, &tf);
9171e999736SAlan Cox 
918c728a914STejun Heo 	/* always clear all address registers */
9191e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
920c728a914STejun Heo 
921c728a914STejun Heo 	if (lba48) {
922c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
923c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
924c728a914STejun Heo 	} else
925c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
926c728a914STejun Heo 
9271e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
928c728a914STejun Heo 	tf.device |= ATA_LBA;
9291e999736SAlan Cox 
9302b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
931c728a914STejun Heo 	if (err_mask) {
932c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
933c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
934c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
935c728a914STejun Heo 			return -EACCES;
936c728a914STejun Heo 		return -EIO;
937c728a914STejun Heo 	}
938c728a914STejun Heo 
939c728a914STejun Heo 	if (lba48)
940c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
941c728a914STejun Heo 	else
942c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
94393328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
94493328e11SAlan Cox 		(*max_sectors)--;
9451e999736SAlan Cox 	return 0;
9461e999736SAlan Cox }
9471e999736SAlan Cox 
9481e999736SAlan Cox /**
949c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
950c728a914STejun Heo  *	@dev: target device
9516b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9521e999736SAlan Cox  *
953c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
954c728a914STejun Heo  *
955c728a914STejun Heo  *	RETURNS:
956c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
957c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
958c728a914STejun Heo  *	errors.
9591e999736SAlan Cox  */
96005027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
9611e999736SAlan Cox {
962c728a914STejun Heo 	unsigned int err_mask;
9631e999736SAlan Cox 	struct ata_taskfile tf;
964c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
9651e999736SAlan Cox 
9661e999736SAlan Cox 	new_sectors--;
9671e999736SAlan Cox 
9681e999736SAlan Cox 	ata_tf_init(dev, &tf);
9691e999736SAlan Cox 
970c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9711e999736SAlan Cox 
972c728a914STejun Heo 	if (lba48) {
973c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
974c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
9751e999736SAlan Cox 
9761e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
9771e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
9781e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
9791e582ba4STejun Heo 	} else {
9801e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
981c728a914STejun Heo 
9821e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
9831e582ba4STejun Heo 	}
9841e582ba4STejun Heo 
9851e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
986c728a914STejun Heo 	tf.device |= ATA_LBA;
9871e999736SAlan Cox 
9881e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9891e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9901e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9911e999736SAlan Cox 
9922b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
993c728a914STejun Heo 	if (err_mask) {
994c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
995c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
996c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
997c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
998c728a914STejun Heo 			return -EACCES;
999c728a914STejun Heo 		return -EIO;
1000c728a914STejun Heo 	}
1001c728a914STejun Heo 
10021e999736SAlan Cox 	return 0;
10031e999736SAlan Cox }
10041e999736SAlan Cox 
10051e999736SAlan Cox /**
10061e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
10071e999736SAlan Cox  *	@dev: Device to resize
10081e999736SAlan Cox  *
10091e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
10101e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
10111e999736SAlan Cox  *	the drive has the HPA feature set enabled.
101205027adcSTejun Heo  *
101305027adcSTejun Heo  *	RETURNS:
101405027adcSTejun Heo  *	0 on success, -errno on failure.
10151e999736SAlan Cox  */
101605027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
10171e999736SAlan Cox {
101805027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
101905027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
102005027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
102105027adcSTejun Heo 	u64 native_sectors;
1022c728a914STejun Heo 	int rc;
10231e999736SAlan Cox 
102405027adcSTejun Heo 	/* do we need to do it? */
102505027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
102605027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
102705027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1028c728a914STejun Heo 		return 0;
10291e999736SAlan Cox 
103005027adcSTejun Heo 	/* read native max address */
103105027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
103205027adcSTejun Heo 	if (rc) {
103305027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
103405027adcSTejun Heo 		 * resizing from the next try.
103505027adcSTejun Heo 		 */
103605027adcSTejun Heo 		if (!ata_ignore_hpa) {
103705027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
103805027adcSTejun Heo 				       "broken, will skip HPA handling\n");
103905027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
104005027adcSTejun Heo 
104105027adcSTejun Heo 			/* we can continue if device aborted the command */
104205027adcSTejun Heo 			if (rc == -EACCES)
104305027adcSTejun Heo 				rc = 0;
104405027adcSTejun Heo 		}
104505027adcSTejun Heo 
104605027adcSTejun Heo 		return rc;
104705027adcSTejun Heo 	}
104805027adcSTejun Heo 
104905027adcSTejun Heo 	/* nothing to do? */
105005027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
105105027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
105205027adcSTejun Heo 			return 0;
105305027adcSTejun Heo 
105405027adcSTejun Heo 		if (native_sectors > sectors)
10551e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
105605027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
105705027adcSTejun Heo 				(unsigned long long)sectors,
105805027adcSTejun Heo 				(unsigned long long)native_sectors);
105905027adcSTejun Heo 		else if (native_sectors < sectors)
106005027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
106105027adcSTejun Heo 				"native sectors (%llu) is smaller than "
106205027adcSTejun Heo 				"sectors (%llu)\n",
106305027adcSTejun Heo 				(unsigned long long)native_sectors,
106405027adcSTejun Heo 				(unsigned long long)sectors);
106505027adcSTejun Heo 		return 0;
10661e999736SAlan Cox 	}
106737301a55STejun Heo 
106805027adcSTejun Heo 	/* let's unlock HPA */
106905027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
107005027adcSTejun Heo 	if (rc == -EACCES) {
107105027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
107205027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
107305027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
107405027adcSTejun Heo 			       (unsigned long long)sectors,
107505027adcSTejun Heo 			       (unsigned long long)native_sectors);
107605027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
107705027adcSTejun Heo 		return 0;
107805027adcSTejun Heo 	} else if (rc)
107905027adcSTejun Heo 		return rc;
108005027adcSTejun Heo 
108105027adcSTejun Heo 	/* re-read IDENTIFY data */
108205027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
108305027adcSTejun Heo 	if (rc) {
108405027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
108505027adcSTejun Heo 			       "data after HPA resizing\n");
108605027adcSTejun Heo 		return rc;
108705027adcSTejun Heo 	}
108805027adcSTejun Heo 
108905027adcSTejun Heo 	if (print_info) {
109005027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
109105027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
109205027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
109305027adcSTejun Heo 			(unsigned long long)sectors,
109405027adcSTejun Heo 			(unsigned long long)new_sectors,
109505027adcSTejun Heo 			(unsigned long long)native_sectors);
109605027adcSTejun Heo 	}
109705027adcSTejun Heo 
109805027adcSTejun Heo 	return 0;
10991e999736SAlan Cox }
11001e999736SAlan Cox 
1101c6fd2807SJeff Garzik /**
110210305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
110310305f0fSAlan  *	@dev: device to identify
1104cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
110510305f0fSAlan  *
110610305f0fSAlan  *	Set up the timing values for the device based upon the identify
110710305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
110810305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
110910305f0fSAlan  *	mode correctly when possible.
111010305f0fSAlan  *
111110305f0fSAlan  *	In addition we emit similarly formatted messages to the default
111210305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
111310305f0fSAlan  *	presentation.
111410305f0fSAlan  */
111510305f0fSAlan 
111610305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
111710305f0fSAlan {
111810305f0fSAlan 	unsigned int mask;
111910305f0fSAlan 	u8 mode;
112010305f0fSAlan 
112110305f0fSAlan 	/* Pack the DMA modes */
112210305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
112310305f0fSAlan 	if (dev->id[53] & 0x04)
112410305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
112510305f0fSAlan 
112610305f0fSAlan 	/* Select the mode in use */
112710305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
112810305f0fSAlan 
112910305f0fSAlan 	if (mode != 0) {
113010305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
113110305f0fSAlan 		       ata_mode_string(mask));
113210305f0fSAlan 	} else {
113310305f0fSAlan 		/* SWDMA perhaps ? */
113410305f0fSAlan 		mode = unknown;
113510305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
113610305f0fSAlan 	}
113710305f0fSAlan 
113810305f0fSAlan 	/* Configure the device reporting */
113910305f0fSAlan 	dev->xfer_mode = mode;
114010305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
114110305f0fSAlan }
114210305f0fSAlan 
114310305f0fSAlan /**
1144c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1145c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1146c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1147c6fd2807SJeff Garzik  *
1148c6fd2807SJeff Garzik  *	This function performs no actual function.
1149c6fd2807SJeff Garzik  *
1150c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1151c6fd2807SJeff Garzik  *
1152c6fd2807SJeff Garzik  *	LOCKING:
1153c6fd2807SJeff Garzik  *	caller.
1154c6fd2807SJeff Garzik  */
1155c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1156c6fd2807SJeff Garzik {
1157c6fd2807SJeff Garzik }
1158c6fd2807SJeff Garzik 
1159c6fd2807SJeff Garzik 
1160c6fd2807SJeff Garzik /**
1161c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1162c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1163c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1164c6fd2807SJeff Garzik  *
1165c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1166c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1167c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1168c6fd2807SJeff Garzik  *
1169c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1170c6fd2807SJeff Garzik  *
1171c6fd2807SJeff Garzik  *	LOCKING:
1172c6fd2807SJeff Garzik  *	caller.
1173c6fd2807SJeff Garzik  */
1174c6fd2807SJeff Garzik 
1175c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1176c6fd2807SJeff Garzik {
1177c6fd2807SJeff Garzik 	u8 tmp;
1178c6fd2807SJeff Garzik 
1179c6fd2807SJeff Garzik 	if (device == 0)
1180c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1181c6fd2807SJeff Garzik 	else
1182c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1183c6fd2807SJeff Garzik 
11840d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1185c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1186c6fd2807SJeff Garzik }
1187c6fd2807SJeff Garzik 
1188c6fd2807SJeff Garzik /**
1189c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1190c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1191c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1192c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1193c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1194c6fd2807SJeff Garzik  *
1195c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1196c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1197c6fd2807SJeff Garzik  *	ATA channel.
1198c6fd2807SJeff Garzik  *
1199c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1200c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1201c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1202c6fd2807SJeff Garzik  *
1203c6fd2807SJeff Garzik  *	LOCKING:
1204c6fd2807SJeff Garzik  *	caller.
1205c6fd2807SJeff Garzik  */
1206c6fd2807SJeff Garzik 
1207c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1208c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1209c6fd2807SJeff Garzik {
1210c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
121144877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
121244877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1213c6fd2807SJeff Garzik 
1214c6fd2807SJeff Garzik 	if (wait)
1215c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1216c6fd2807SJeff Garzik 
1217c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1218c6fd2807SJeff Garzik 
1219c6fd2807SJeff Garzik 	if (wait) {
12209af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1221c6fd2807SJeff Garzik 			msleep(150);
1222c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1223c6fd2807SJeff Garzik 	}
1224c6fd2807SJeff Garzik }
1225c6fd2807SJeff Garzik 
1226c6fd2807SJeff Garzik /**
1227c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1228c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1229c6fd2807SJeff Garzik  *
1230c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1231c6fd2807SJeff Garzik  *	page.
1232c6fd2807SJeff Garzik  *
1233c6fd2807SJeff Garzik  *	LOCKING:
1234c6fd2807SJeff Garzik  *	caller.
1235c6fd2807SJeff Garzik  */
1236c6fd2807SJeff Garzik 
1237c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1238c6fd2807SJeff Garzik {
1239c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1240c6fd2807SJeff Garzik 		"53==0x%04x  "
1241c6fd2807SJeff Garzik 		"63==0x%04x  "
1242c6fd2807SJeff Garzik 		"64==0x%04x  "
1243c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1244c6fd2807SJeff Garzik 		id[49],
1245c6fd2807SJeff Garzik 		id[53],
1246c6fd2807SJeff Garzik 		id[63],
1247c6fd2807SJeff Garzik 		id[64],
1248c6fd2807SJeff Garzik 		id[75]);
1249c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1250c6fd2807SJeff Garzik 		"81==0x%04x  "
1251c6fd2807SJeff Garzik 		"82==0x%04x  "
1252c6fd2807SJeff Garzik 		"83==0x%04x  "
1253c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1254c6fd2807SJeff Garzik 		id[80],
1255c6fd2807SJeff Garzik 		id[81],
1256c6fd2807SJeff Garzik 		id[82],
1257c6fd2807SJeff Garzik 		id[83],
1258c6fd2807SJeff Garzik 		id[84]);
1259c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1260c6fd2807SJeff Garzik 		"93==0x%04x\n",
1261c6fd2807SJeff Garzik 		id[88],
1262c6fd2807SJeff Garzik 		id[93]);
1263c6fd2807SJeff Garzik }
1264c6fd2807SJeff Garzik 
1265c6fd2807SJeff Garzik /**
1266c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1267c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1268c6fd2807SJeff Garzik  *
1269c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1270c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1271c6fd2807SJeff Garzik  *
1272c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1273c6fd2807SJeff Garzik  *
1274c6fd2807SJeff Garzik  *	LOCKING:
1275c6fd2807SJeff Garzik  *	None.
1276c6fd2807SJeff Garzik  *
1277c6fd2807SJeff Garzik  *	RETURNS:
1278c6fd2807SJeff Garzik  *	Computed xfermask
1279c6fd2807SJeff Garzik  */
1280c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1281c6fd2807SJeff Garzik {
1282c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1283c6fd2807SJeff Garzik 
1284c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1285c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1286c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1287c6fd2807SJeff Garzik 		pio_mask <<= 3;
1288c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1289c6fd2807SJeff Garzik 	} else {
1290c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1291c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1292c6fd2807SJeff Garzik 		 * a mask.
1293c6fd2807SJeff Garzik 		 */
12947a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
129546767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
129646767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
129746767aebSAlan Cox 		else
129846767aebSAlan Cox 			pio_mask = 1;
1299c6fd2807SJeff Garzik 
1300c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1301c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1302c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1303c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1304c6fd2807SJeff Garzik 		 * will get this right anyway
1305c6fd2807SJeff Garzik 		 */
1306c6fd2807SJeff Garzik 	}
1307c6fd2807SJeff Garzik 
1308c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1309c6fd2807SJeff Garzik 
1310b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1311b352e57dSAlan Cox 		/*
1312b352e57dSAlan Cox 		 *	Process compact flash extended modes
1313b352e57dSAlan Cox 		 */
1314b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1315b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1316b352e57dSAlan Cox 
1317b352e57dSAlan Cox 		if (pio)
1318b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1319b352e57dSAlan Cox 		if (pio > 1)
1320b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1321b352e57dSAlan Cox 		if (dma)
1322b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1323b352e57dSAlan Cox 		if (dma > 1)
1324b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1325b352e57dSAlan Cox 	}
1326b352e57dSAlan Cox 
1327c6fd2807SJeff Garzik 	udma_mask = 0;
1328c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1329c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1330c6fd2807SJeff Garzik 
1331c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1332c6fd2807SJeff Garzik }
1333c6fd2807SJeff Garzik 
1334c6fd2807SJeff Garzik /**
1335c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1336c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1337c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
133865f27f38SDavid Howells  *	@data: data for @fn to use
1339c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1340c6fd2807SJeff Garzik  *
1341c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1342c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1343c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1344c6fd2807SJeff Garzik  *	one task is active at any given time.
1345c6fd2807SJeff Garzik  *
1346c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1347c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1348c6fd2807SJeff Garzik  *	synchronization.
1349c6fd2807SJeff Garzik  *
1350c6fd2807SJeff Garzik  *	LOCKING:
1351c6fd2807SJeff Garzik  *	Inherited from caller.
1352c6fd2807SJeff Garzik  */
135365f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1354c6fd2807SJeff Garzik 			 unsigned long delay)
1355c6fd2807SJeff Garzik {
135665f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
135765f27f38SDavid Howells 	ap->port_task_data = data;
1358c6fd2807SJeff Garzik 
135945a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
136045a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1361c6fd2807SJeff Garzik }
1362c6fd2807SJeff Garzik 
1363c6fd2807SJeff Garzik /**
1364c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1365c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1366c6fd2807SJeff Garzik  *
1367c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1368c6fd2807SJeff Garzik  *	be running or scheduled.
1369c6fd2807SJeff Garzik  *
1370c6fd2807SJeff Garzik  *	LOCKING:
1371c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1372c6fd2807SJeff Garzik  */
1373c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1374c6fd2807SJeff Garzik {
1375c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1376c6fd2807SJeff Garzik 
137745a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1378c6fd2807SJeff Garzik 
1379c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1380c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1381c6fd2807SJeff Garzik }
1382c6fd2807SJeff Garzik 
13837102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1384c6fd2807SJeff Garzik {
1385c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1386c6fd2807SJeff Garzik 
1387c6fd2807SJeff Garzik 	complete(waiting);
1388c6fd2807SJeff Garzik }
1389c6fd2807SJeff Garzik 
1390c6fd2807SJeff Garzik /**
13912432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1392c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1393c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1394c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1395c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13965c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
13972432697bSTejun Heo  *	@n_elem: Number of sg entries
13982b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1399c6fd2807SJeff Garzik  *
1400c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1401c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1402c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1403c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1404c6fd2807SJeff Garzik  *	clean up after timeout.
1405c6fd2807SJeff Garzik  *
1406c6fd2807SJeff Garzik  *	LOCKING:
1407c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1408c6fd2807SJeff Garzik  *
1409c6fd2807SJeff Garzik  *	RETURNS:
1410c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1411c6fd2807SJeff Garzik  */
14122432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1413c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
141487260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
14152b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1416c6fd2807SJeff Garzik {
14179af5c9c9STejun Heo 	struct ata_link *link = dev->link;
14189af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1419c6fd2807SJeff Garzik 	u8 command = tf->command;
1420c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1421c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1422c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1423da917d69STejun Heo 	int preempted_nr_active_links;
1424c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1425c6fd2807SJeff Garzik 	unsigned long flags;
1426c6fd2807SJeff Garzik 	unsigned int err_mask;
1427c6fd2807SJeff Garzik 	int rc;
1428c6fd2807SJeff Garzik 
1429c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1430c6fd2807SJeff Garzik 
1431c6fd2807SJeff Garzik 	/* no internal command while frozen */
1432c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1433c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1434c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1435c6fd2807SJeff Garzik 	}
1436c6fd2807SJeff Garzik 
1437c6fd2807SJeff Garzik 	/* initialize internal qc */
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1440c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1441c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1442c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1443c6fd2807SJeff Garzik 	 */
1444c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1445c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1446c6fd2807SJeff Garzik 	else
1447c6fd2807SJeff Garzik 		tag = 0;
1448c6fd2807SJeff Garzik 
1449c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1450c6fd2807SJeff Garzik 		BUG();
1451c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik 	qc->tag = tag;
1454c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1455c6fd2807SJeff Garzik 	qc->ap = ap;
1456c6fd2807SJeff Garzik 	qc->dev = dev;
1457c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1458c6fd2807SJeff Garzik 
14599af5c9c9STejun Heo 	preempted_tag = link->active_tag;
14609af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1461c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1462da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
14639af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
14649af5c9c9STejun Heo 	link->sactive = 0;
1465c6fd2807SJeff Garzik 	ap->qc_active = 0;
1466da917d69STejun Heo 	ap->nr_active_links = 0;
1467c6fd2807SJeff Garzik 
1468c6fd2807SJeff Garzik 	/* prepare & issue qc */
1469c6fd2807SJeff Garzik 	qc->tf = *tf;
1470c6fd2807SJeff Garzik 	if (cdb)
1471c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1472c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1473c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1474c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14752432697bSTejun Heo 		unsigned int i, buflen = 0;
147687260216SJens Axboe 		struct scatterlist *sg;
14772432697bSTejun Heo 
147887260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
147987260216SJens Axboe 			buflen += sg->length;
14802432697bSTejun Heo 
148187260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
148249c80429SBrian King 		qc->nbytes = buflen;
1483c6fd2807SJeff Garzik 	}
1484c6fd2807SJeff Garzik 
1485c6fd2807SJeff Garzik 	qc->private_data = &wait;
1486c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1487c6fd2807SJeff Garzik 
1488c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1489c6fd2807SJeff Garzik 
1490c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1491c6fd2807SJeff Garzik 
14922b789108STejun Heo 	if (!timeout)
14932b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
14942b789108STejun Heo 
14952b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1496c6fd2807SJeff Garzik 
1497c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1498c6fd2807SJeff Garzik 
1499c6fd2807SJeff Garzik 	if (!rc) {
1500c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1501c6fd2807SJeff Garzik 
1502c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1503c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1504c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1505c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1506c6fd2807SJeff Garzik 		 */
1507c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1508c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1509c6fd2807SJeff Garzik 
1510c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1511c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1512c6fd2807SJeff Garzik 			else
1513c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1514c6fd2807SJeff Garzik 
1515c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1516c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1517c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1518c6fd2807SJeff Garzik 		}
1519c6fd2807SJeff Garzik 
1520c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1521c6fd2807SJeff Garzik 	}
1522c6fd2807SJeff Garzik 
1523c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1524c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1525c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1526c6fd2807SJeff Garzik 
1527a51d644aSTejun Heo 	/* perform minimal error analysis */
1528a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1529a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1530a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1531a51d644aSTejun Heo 
1532a51d644aSTejun Heo 		if (!qc->err_mask)
1533c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1534a51d644aSTejun Heo 
1535a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1536a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1537c6fd2807SJeff Garzik 	}
1538c6fd2807SJeff Garzik 
1539c6fd2807SJeff Garzik 	/* finish up */
1540c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1541c6fd2807SJeff Garzik 
1542c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1543c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1544c6fd2807SJeff Garzik 
1545c6fd2807SJeff Garzik 	ata_qc_free(qc);
15469af5c9c9STejun Heo 	link->active_tag = preempted_tag;
15479af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1548c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1549da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1550c6fd2807SJeff Garzik 
1551c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1552c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1553c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1554c6fd2807SJeff Garzik 	 * port.
1555c6fd2807SJeff Garzik 	 *
1556c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1557c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1558c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1559c6fd2807SJeff Garzik 	 *
1560c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1561c6fd2807SJeff Garzik 	 */
1562c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1563c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1564c6fd2807SJeff Garzik 		ata_port_probe(ap);
1565c6fd2807SJeff Garzik 	}
1566c6fd2807SJeff Garzik 
1567c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1568c6fd2807SJeff Garzik 
1569c6fd2807SJeff Garzik 	return err_mask;
1570c6fd2807SJeff Garzik }
1571c6fd2807SJeff Garzik 
1572c6fd2807SJeff Garzik /**
157333480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15742432697bSTejun Heo  *	@dev: Device to which the command is sent
15752432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15762432697bSTejun Heo  *	@cdb: CDB for packet command
15772432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15782432697bSTejun Heo  *	@buf: Data buffer of the command
15792432697bSTejun Heo  *	@buflen: Length of data buffer
15802b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
15812432697bSTejun Heo  *
15822432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15832432697bSTejun Heo  *	buffer instead of sg list.
15842432697bSTejun Heo  *
15852432697bSTejun Heo  *	LOCKING:
15862432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15872432697bSTejun Heo  *
15882432697bSTejun Heo  *	RETURNS:
15892432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15902432697bSTejun Heo  */
15912432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15922432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15932b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
15942b789108STejun Heo 			   unsigned long timeout)
15952432697bSTejun Heo {
159633480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
159733480a0eSTejun Heo 	unsigned int n_elem = 0;
15982432697bSTejun Heo 
159933480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
160033480a0eSTejun Heo 		WARN_ON(!buf);
16012432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
160233480a0eSTejun Heo 		psg = &sg;
160333480a0eSTejun Heo 		n_elem++;
160433480a0eSTejun Heo 	}
16052432697bSTejun Heo 
16062b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
16072b789108STejun Heo 				    timeout);
16082432697bSTejun Heo }
16092432697bSTejun Heo 
16102432697bSTejun Heo /**
1611c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1612c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1613c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1614c6fd2807SJeff Garzik  *
1615c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1616c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1617c6fd2807SJeff Garzik  *
1618c6fd2807SJeff Garzik  *	LOCKING:
1619c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1620c6fd2807SJeff Garzik  *
1621c6fd2807SJeff Garzik  *	RETURNS:
1622c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1623c6fd2807SJeff Garzik  */
1624c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1625c6fd2807SJeff Garzik {
1626c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1627c6fd2807SJeff Garzik 
1628c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1629c6fd2807SJeff Garzik 
1630c6fd2807SJeff Garzik 	tf.command = cmd;
1631c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1632c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1633c6fd2807SJeff Garzik 
16342b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1635c6fd2807SJeff Garzik }
1636c6fd2807SJeff Garzik 
1637c6fd2807SJeff Garzik /**
1638c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1639c6fd2807SJeff Garzik  *	@adev: ATA device
1640c6fd2807SJeff Garzik  *
1641c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1642c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1643c6fd2807SJeff Garzik  */
1644c6fd2807SJeff Garzik 
1645c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1646c6fd2807SJeff Garzik {
1647432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1648432729f0SAlan Cox 	   as the caller should know this */
16499af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1650c6fd2807SJeff Garzik 		return 0;
1651432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1652432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1653c6fd2807SJeff Garzik 		return 1;
1654432729f0SAlan Cox 	/* We turn it on when possible */
1655432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1656432729f0SAlan Cox 		return 1;
1657432729f0SAlan Cox 	return 0;
1658432729f0SAlan Cox }
1659c6fd2807SJeff Garzik 
1660432729f0SAlan Cox /**
1661432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1662432729f0SAlan Cox  *	@adev: ATA device
1663432729f0SAlan Cox  *
1664432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1665432729f0SAlan Cox  *	-1 if no iordy mode is available.
1666432729f0SAlan Cox  */
1667432729f0SAlan Cox 
1668432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1669432729f0SAlan Cox {
1670c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1671c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1672432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1673c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1674c6fd2807SJeff Garzik 		if (pio) {
1675c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1676c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1677432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1678432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1679c6fd2807SJeff Garzik 		}
1680c6fd2807SJeff Garzik 	}
1681432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1682c6fd2807SJeff Garzik }
1683c6fd2807SJeff Garzik 
1684c6fd2807SJeff Garzik /**
1685c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1686c6fd2807SJeff Garzik  *	@dev: target device
1687c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1688bff04647STejun Heo  *	@flags: ATA_READID_* flags
1689c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1690c6fd2807SJeff Garzik  *
1691c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1692c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1693c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1694c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1695c6fd2807SJeff Garzik  *
169650a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
169750a99018SAlan Cox  *	now we abort if we hit that case.
169850a99018SAlan Cox  *
1699c6fd2807SJeff Garzik  *	LOCKING:
1700c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1701c6fd2807SJeff Garzik  *
1702c6fd2807SJeff Garzik  *	RETURNS:
1703c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1704c6fd2807SJeff Garzik  */
1705c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1706bff04647STejun Heo 		    unsigned int flags, u16 *id)
1707c6fd2807SJeff Garzik {
17089af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1709c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1710c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1711c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1712c6fd2807SJeff Garzik 	const char *reason;
171354936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1714c6fd2807SJeff Garzik 	int rc;
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
171744877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1718c6fd2807SJeff Garzik 
1719c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1720c6fd2807SJeff Garzik  retry:
1721c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1722c6fd2807SJeff Garzik 
1723c6fd2807SJeff Garzik 	switch (class) {
1724c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1725c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1726c6fd2807SJeff Garzik 		break;
1727c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1728c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1729c6fd2807SJeff Garzik 		break;
1730c6fd2807SJeff Garzik 	default:
1731c6fd2807SJeff Garzik 		rc = -ENODEV;
1732c6fd2807SJeff Garzik 		reason = "unsupported class";
1733c6fd2807SJeff Garzik 		goto err_out;
1734c6fd2807SJeff Garzik 	}
1735c6fd2807SJeff Garzik 
1736c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
173781afe893STejun Heo 
173881afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
173981afe893STejun Heo 	 * sure those are properly initialized.
174081afe893STejun Heo 	 */
174181afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
174281afe893STejun Heo 
174381afe893STejun Heo 	/* Device presence detection is unreliable on some
174481afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
174581afe893STejun Heo 	 */
174681afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1747c6fd2807SJeff Garzik 
1748c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
17492b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1750c6fd2807SJeff Garzik 	if (err_mask) {
1751800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
175255a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
175344877b4eSTejun Heo 				ap->print_id, dev->devno);
175455a8e2c8STejun Heo 			return -ENOENT;
175555a8e2c8STejun Heo 		}
175655a8e2c8STejun Heo 
175754936f8bSTejun Heo 		/* Device or controller might have reported the wrong
175854936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
175954936f8bSTejun Heo 		 * the current one is aborted by the device.
176054936f8bSTejun Heo 		 */
176154936f8bSTejun Heo 		if (may_fallback &&
176254936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
176354936f8bSTejun Heo 			may_fallback = 0;
176454936f8bSTejun Heo 
176554936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
176654936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
176754936f8bSTejun Heo 			else
176854936f8bSTejun Heo 				class = ATA_DEV_ATA;
176954936f8bSTejun Heo 			goto retry;
177054936f8bSTejun Heo 		}
177154936f8bSTejun Heo 
1772c6fd2807SJeff Garzik 		rc = -EIO;
1773c6fd2807SJeff Garzik 		reason = "I/O error";
1774c6fd2807SJeff Garzik 		goto err_out;
1775c6fd2807SJeff Garzik 	}
1776c6fd2807SJeff Garzik 
177754936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
177854936f8bSTejun Heo 	 * successfully at least once.
177954936f8bSTejun Heo 	 */
178054936f8bSTejun Heo 	may_fallback = 0;
178154936f8bSTejun Heo 
1782c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1783c6fd2807SJeff Garzik 
1784c6fd2807SJeff Garzik 	/* sanity check */
1785c6fd2807SJeff Garzik 	rc = -EINVAL;
17866070068bSAlan Cox 	reason = "device reports invalid type";
17874a3381feSJeff Garzik 
17884a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17894a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17904a3381feSJeff Garzik 			goto err_out;
17914a3381feSJeff Garzik 	} else {
17924a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1793c6fd2807SJeff Garzik 			goto err_out;
1794c6fd2807SJeff Garzik 	}
1795c6fd2807SJeff Garzik 
1796169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1797169439c2SMark Lord 		tried_spinup = 1;
1798169439c2SMark Lord 		/*
1799169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1800169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1801169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1802169439c2SMark Lord 		 */
1803218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1804fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1805169439c2SMark Lord 			rc = -EIO;
1806169439c2SMark Lord 			reason = "SPINUP failed";
1807169439c2SMark Lord 			goto err_out;
1808169439c2SMark Lord 		}
1809169439c2SMark Lord 		/*
1810169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1811169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1812169439c2SMark Lord 		 */
1813169439c2SMark Lord 		if (id[2] == 0x37c8)
1814169439c2SMark Lord 			goto retry;
1815169439c2SMark Lord 	}
1816169439c2SMark Lord 
1817bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1818c6fd2807SJeff Garzik 		/*
1819c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1820c6fd2807SJeff Garzik 		 * SRST RESET
182150a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
182250a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1823c6fd2807SJeff Garzik 		 * anything else..
1824c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
182550a99018SAlan Cox 		 *
182650a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
182750a99018SAlan Cox 		 * shoud never trigger.
1828c6fd2807SJeff Garzik 		 */
1829c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1830c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1831c6fd2807SJeff Garzik 			if (err_mask) {
1832c6fd2807SJeff Garzik 				rc = -EIO;
1833c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1834c6fd2807SJeff Garzik 				goto err_out;
1835c6fd2807SJeff Garzik 			}
1836c6fd2807SJeff Garzik 
1837c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1838c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1839c6fd2807SJeff Garzik 			 */
1840bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1841c6fd2807SJeff Garzik 			goto retry;
1842c6fd2807SJeff Garzik 		}
1843c6fd2807SJeff Garzik 	}
1844c6fd2807SJeff Garzik 
1845c6fd2807SJeff Garzik 	*p_class = class;
1846c6fd2807SJeff Garzik 
1847c6fd2807SJeff Garzik 	return 0;
1848c6fd2807SJeff Garzik 
1849c6fd2807SJeff Garzik  err_out:
1850c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1851c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1852c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1853c6fd2807SJeff Garzik 	return rc;
1854c6fd2807SJeff Garzik }
1855c6fd2807SJeff Garzik 
1856c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1857c6fd2807SJeff Garzik {
18589af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
18599af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1860c6fd2807SJeff Garzik }
1861c6fd2807SJeff Garzik 
1862c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1863c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1864c6fd2807SJeff Garzik {
18659af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1866c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1867c6fd2807SJeff Garzik 
1868c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1869c6fd2807SJeff Garzik 		desc[0] = '\0';
1870c6fd2807SJeff Garzik 		return;
1871c6fd2807SJeff Garzik 	}
187275683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
18736919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
18746919a0a6SAlan Cox 		return;
18756919a0a6SAlan Cox 	}
1876c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1877cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1878c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1879c6fd2807SJeff Garzik 	}
1880c6fd2807SJeff Garzik 
1881c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1882c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1883c6fd2807SJeff Garzik 	else
1884c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1885c6fd2807SJeff Garzik }
1886c6fd2807SJeff Garzik 
1887c6fd2807SJeff Garzik /**
1888c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1889c6fd2807SJeff Garzik  *	@dev: Target device to configure
1890c6fd2807SJeff Garzik  *
1891c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1892c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1893c6fd2807SJeff Garzik  *
1894c6fd2807SJeff Garzik  *	LOCKING:
1895c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1896c6fd2807SJeff Garzik  *
1897c6fd2807SJeff Garzik  *	RETURNS:
1898c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1899c6fd2807SJeff Garzik  */
1900efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1901c6fd2807SJeff Garzik {
19029af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
19039af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
19046746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1905c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1906c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1907b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
19083f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
19093f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1910c6fd2807SJeff Garzik 	int rc;
1911c6fd2807SJeff Garzik 
1912c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
191344877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
191444877b4eSTejun Heo 			       __FUNCTION__);
1915c6fd2807SJeff Garzik 		return 0;
1916c6fd2807SJeff Garzik 	}
1917c6fd2807SJeff Garzik 
1918c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
191944877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1920c6fd2807SJeff Garzik 
192175683fe7STejun Heo 	/* set horkage */
192275683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
192375683fe7STejun Heo 
19246746544cSTejun Heo 	/* let ACPI work its magic */
19256746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
19266746544cSTejun Heo 	if (rc)
19276746544cSTejun Heo 		return rc;
192808573a86SKristen Carlson Accardi 
192905027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
193005027adcSTejun Heo 	rc = ata_hpa_resize(dev);
193105027adcSTejun Heo 	if (rc)
193205027adcSTejun Heo 		return rc;
193305027adcSTejun Heo 
1934c6fd2807SJeff Garzik 	/* print device capabilities */
1935c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1936c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1937c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1938c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1939c6fd2807SJeff Garzik 			       __FUNCTION__,
1940c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1941c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1942c6fd2807SJeff Garzik 
1943c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1944c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1945c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1946c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1947c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1948c6fd2807SJeff Garzik 	dev->cylinders = 0;
1949c6fd2807SJeff Garzik 	dev->heads = 0;
1950c6fd2807SJeff Garzik 	dev->sectors = 0;
1951c6fd2807SJeff Garzik 
1952c6fd2807SJeff Garzik 	/*
1953c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1954c6fd2807SJeff Garzik 	 */
1955c6fd2807SJeff Garzik 
1956c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1957c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1958c6fd2807SJeff Garzik 
1959c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1960c6fd2807SJeff Garzik 		ata_dump_id(id);
1961c6fd2807SJeff Garzik 
1962ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1963ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1964ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1965ef143d57SAlbert Lee 
1966ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1967ef143d57SAlbert Lee 			sizeof(modelbuf));
1968ef143d57SAlbert Lee 
1969c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1970c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1971b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1972b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
197344877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
197444877b4eSTejun Heo 					       "supports DRM functions and may "
197544877b4eSTejun Heo 					       "not be fully accessable.\n");
1976b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
19772dcb407eSJeff Garzik 		} else
1978b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1979b352e57dSAlan Cox 
1980c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1981c6fd2807SJeff Garzik 
19823f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19833f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19843f64f565SEric D. Mudama 
1985c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1986c6fd2807SJeff Garzik 			const char *lba_desc;
1987c6fd2807SJeff Garzik 			char ncq_desc[20];
1988c6fd2807SJeff Garzik 
1989c6fd2807SJeff Garzik 			lba_desc = "LBA";
1990c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1991c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1992c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1993c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19946fc49adbSTejun Heo 
19956fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19966fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19976fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1998c6fd2807SJeff Garzik 			}
1999c6fd2807SJeff Garzik 
2000c6fd2807SJeff Garzik 			/* config NCQ */
2001c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2002c6fd2807SJeff Garzik 
2003c6fd2807SJeff Garzik 			/* print device info to dmesg */
20043f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
20053f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20063f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
20073f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
20083f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
20093f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20103f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2011c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
20123f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
20133f64f565SEric D. Mudama 			}
2014c6fd2807SJeff Garzik 		} else {
2015c6fd2807SJeff Garzik 			/* CHS */
2016c6fd2807SJeff Garzik 
2017c6fd2807SJeff Garzik 			/* Default translation */
2018c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2019c6fd2807SJeff Garzik 			dev->heads	= id[3];
2020c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2023c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2024c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2025c6fd2807SJeff Garzik 				dev->heads     = id[55];
2026c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2027c6fd2807SJeff Garzik 			}
2028c6fd2807SJeff Garzik 
2029c6fd2807SJeff Garzik 			/* print device info to dmesg */
20303f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2031c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
20323f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
20333f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
20343f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
20353f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20363f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
20373f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
20383f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
20393f64f565SEric D. Mudama 					dev->heads, dev->sectors);
20403f64f565SEric D. Mudama 			}
2041c6fd2807SJeff Garzik 		}
2042c6fd2807SJeff Garzik 
2043c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2044c6fd2807SJeff Garzik 	}
2045c6fd2807SJeff Garzik 
2046c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2047c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2048854c73a2STejun Heo 		const char *cdb_intr_string = "";
2049854c73a2STejun Heo 		const char *atapi_an_string = "";
20507d77b247STejun Heo 		u32 sntf;
2051c6fd2807SJeff Garzik 
2052c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2053c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2054c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2055c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2056c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2057c6fd2807SJeff Garzik 			rc = -EINVAL;
2058c6fd2807SJeff Garzik 			goto err_out_nosup;
2059c6fd2807SJeff Garzik 		}
2060c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2061c6fd2807SJeff Garzik 
20627d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
20637d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
20647d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
20657d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
20669f45cbd3SKristen Carlson Accardi 		 */
20677d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
20687d77b247STejun Heo 		    (!ap->nr_pmp_links ||
20697d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2070854c73a2STejun Heo 			unsigned int err_mask;
2071854c73a2STejun Heo 
20729f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2073218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2074218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2075854c73a2STejun Heo 			if (err_mask)
20769f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2077854c73a2STejun Heo 					"failed to enable ATAPI AN "
2078854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2079854c73a2STejun Heo 			else {
20809f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2081854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2082854c73a2STejun Heo 			}
20839f45cbd3SKristen Carlson Accardi 		}
20849f45cbd3SKristen Carlson Accardi 
2085c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2086c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2087c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2088c6fd2807SJeff Garzik 		}
2089c6fd2807SJeff Garzik 
2090c6fd2807SJeff Garzik 		/* print device info to dmesg */
2091c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2092ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2093854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2094ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2095c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2096854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2097c6fd2807SJeff Garzik 	}
2098c6fd2807SJeff Garzik 
2099914ed354STejun Heo 	/* determine max_sectors */
2100914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2101914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2102914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2103914ed354STejun Heo 
210493590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
210593590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
210693590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
210793590859SAlan Cox 		   idiot */
210893590859SAlan Cox 		if (print_info) {
210993590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
211093590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
211193590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
211293590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
211393590859SAlan Cox 		}
211493590859SAlan Cox 	}
211593590859SAlan Cox 
2116c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2117c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2118c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2119c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2120c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2121c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2122c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2123c6fd2807SJeff Garzik 	}
2124c6fd2807SJeff Garzik 
212575683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
212603ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
212703ec52deSTejun Heo 					 dev->max_sectors);
212818d6e9d5SAlbert Lee 
2129c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2130cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2133c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2134c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2135c6fd2807SJeff Garzik 	return 0;
2136c6fd2807SJeff Garzik 
2137c6fd2807SJeff Garzik err_out_nosup:
2138c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2139c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2140c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2141c6fd2807SJeff Garzik 	return rc;
2142c6fd2807SJeff Garzik }
2143c6fd2807SJeff Garzik 
2144c6fd2807SJeff Garzik /**
21452e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2146be0d18dfSAlan Cox  *	@ap: port
2147be0d18dfSAlan Cox  *
21482e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2149be0d18dfSAlan Cox  *	detection.
2150be0d18dfSAlan Cox  */
2151be0d18dfSAlan Cox 
2152be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2153be0d18dfSAlan Cox {
2154be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2155be0d18dfSAlan Cox }
2156be0d18dfSAlan Cox 
2157be0d18dfSAlan Cox /**
21582e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2159be0d18dfSAlan Cox  *	@ap: port
2160be0d18dfSAlan Cox  *
21612e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2162be0d18dfSAlan Cox  *	detection.
2163be0d18dfSAlan Cox  */
2164be0d18dfSAlan Cox 
2165be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2166be0d18dfSAlan Cox {
2167be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2168be0d18dfSAlan Cox }
2169be0d18dfSAlan Cox 
2170be0d18dfSAlan Cox /**
2171be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2172be0d18dfSAlan Cox  *	@ap: port
2173be0d18dfSAlan Cox  *
2174be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2175be0d18dfSAlan Cox  */
2176be0d18dfSAlan Cox 
2177be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2178be0d18dfSAlan Cox {
2179be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2180be0d18dfSAlan Cox }
2181be0d18dfSAlan Cox 
2182be0d18dfSAlan Cox /**
2183be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2184be0d18dfSAlan Cox  *	@ap: port
2185be0d18dfSAlan Cox  *
2186be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2187be0d18dfSAlan Cox  */
2188be0d18dfSAlan Cox 
2189be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2190be0d18dfSAlan Cox {
2191be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2192be0d18dfSAlan Cox }
2193be0d18dfSAlan Cox 
2194be0d18dfSAlan Cox /**
2195c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2196c6fd2807SJeff Garzik  *	@ap: Bus to probe
2197c6fd2807SJeff Garzik  *
2198c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2199c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2200c6fd2807SJeff Garzik  *	the bus.
2201c6fd2807SJeff Garzik  *
2202c6fd2807SJeff Garzik  *	LOCKING:
2203c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2204c6fd2807SJeff Garzik  *
2205c6fd2807SJeff Garzik  *	RETURNS:
2206c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2207c6fd2807SJeff Garzik  */
2208c6fd2807SJeff Garzik 
2209c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2210c6fd2807SJeff Garzik {
2211c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2212c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2213f58229f8STejun Heo 	int rc;
2214c6fd2807SJeff Garzik 	struct ata_device *dev;
2215c6fd2807SJeff Garzik 
2216c6fd2807SJeff Garzik 	ata_port_probe(ap);
2217c6fd2807SJeff Garzik 
2218f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2219f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2220c6fd2807SJeff Garzik 
2221c6fd2807SJeff Garzik  retry:
2222c6fd2807SJeff Garzik 	/* reset and determine device classes */
2223c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2224c6fd2807SJeff Garzik 
2225f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2226c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2227c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2228c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2229c6fd2807SJeff Garzik 		else
2230c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2231c6fd2807SJeff Garzik 
2232c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2233c6fd2807SJeff Garzik 	}
2234c6fd2807SJeff Garzik 
2235c6fd2807SJeff Garzik 	ata_port_probe(ap);
2236c6fd2807SJeff Garzik 
2237c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2238c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2239c6fd2807SJeff Garzik 
2240f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2241f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2242c6fd2807SJeff Garzik 
2243f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2244f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2245f31f0cc2SJeff Garzik 	   the slave device */
2246f31f0cc2SJeff Garzik 
2247f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2248f58229f8STejun Heo 		if (tries[dev->devno])
2249f58229f8STejun Heo 			dev->class = classes[dev->devno];
2250c6fd2807SJeff Garzik 
2251c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2252c6fd2807SJeff Garzik 			continue;
2253c6fd2807SJeff Garzik 
2254bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2255bff04647STejun Heo 				     dev->id);
2256c6fd2807SJeff Garzik 		if (rc)
2257c6fd2807SJeff Garzik 			goto fail;
2258f31f0cc2SJeff Garzik 	}
2259f31f0cc2SJeff Garzik 
2260be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2261be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2262be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2263be0d18dfSAlan Cox 
2264614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2265614fe29bSAlan Cox 	   reported cable types and sensed types */
2266614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2267614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2268614fe29bSAlan Cox 			continue;
2269614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2270614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2271614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2272614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2273614fe29bSAlan Cox 	}
2274614fe29bSAlan Cox 
2275f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2276f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2277f31f0cc2SJeff Garzik 
2278f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2279f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2280f31f0cc2SJeff Garzik 			continue;
2281c6fd2807SJeff Garzik 
22829af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2283efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
22849af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2285c6fd2807SJeff Garzik 		if (rc)
2286c6fd2807SJeff Garzik 			goto fail;
2287c6fd2807SJeff Garzik 	}
2288c6fd2807SJeff Garzik 
2289c6fd2807SJeff Garzik 	/* configure transfer mode */
22900260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
22914ae72a1eSTejun Heo 	if (rc)
2292c6fd2807SJeff Garzik 		goto fail;
2293c6fd2807SJeff Garzik 
2294f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2295f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2296c6fd2807SJeff Garzik 			return 0;
2297c6fd2807SJeff Garzik 
2298c6fd2807SJeff Garzik 	/* no device present, disable port */
2299c6fd2807SJeff Garzik 	ata_port_disable(ap);
2300c6fd2807SJeff Garzik 	return -ENODEV;
2301c6fd2807SJeff Garzik 
2302c6fd2807SJeff Garzik  fail:
23034ae72a1eSTejun Heo 	tries[dev->devno]--;
23044ae72a1eSTejun Heo 
2305c6fd2807SJeff Garzik 	switch (rc) {
2306c6fd2807SJeff Garzik 	case -EINVAL:
23074ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2308c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2309c6fd2807SJeff Garzik 		break;
23104ae72a1eSTejun Heo 
23114ae72a1eSTejun Heo 	case -ENODEV:
23124ae72a1eSTejun Heo 		/* give it just one more chance */
23134ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2314c6fd2807SJeff Garzik 	case -EIO:
23154ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
23164ae72a1eSTejun Heo 			/* This is the last chance, better to slow
23174ae72a1eSTejun Heo 			 * down than lose it.
23184ae72a1eSTejun Heo 			 */
2319936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
23204ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
23214ae72a1eSTejun Heo 		}
2322c6fd2807SJeff Garzik 	}
2323c6fd2807SJeff Garzik 
23244ae72a1eSTejun Heo 	if (!tries[dev->devno])
2325c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2326c6fd2807SJeff Garzik 
2327c6fd2807SJeff Garzik 	goto retry;
2328c6fd2807SJeff Garzik }
2329c6fd2807SJeff Garzik 
2330c6fd2807SJeff Garzik /**
2331c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2332c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2333c6fd2807SJeff Garzik  *
2334c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2335c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2336c6fd2807SJeff Garzik  *
2337cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2338c6fd2807SJeff Garzik  *	serialization.
2339c6fd2807SJeff Garzik  */
2340c6fd2807SJeff Garzik 
2341c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2342c6fd2807SJeff Garzik {
2343c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2344c6fd2807SJeff Garzik }
2345c6fd2807SJeff Garzik 
2346c6fd2807SJeff Garzik /**
2347c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2348936fd732STejun Heo  *	@link: SATA link to printk link status about
2349c6fd2807SJeff Garzik  *
2350c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2351c6fd2807SJeff Garzik  *
2352c6fd2807SJeff Garzik  *	LOCKING:
2353c6fd2807SJeff Garzik  *	None.
2354c6fd2807SJeff Garzik  */
2355936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2356c6fd2807SJeff Garzik {
2357c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2358c6fd2807SJeff Garzik 
2359936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2360c6fd2807SJeff Garzik 		return;
2361936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2362c6fd2807SJeff Garzik 
2363936fd732STejun Heo 	if (ata_link_online(link)) {
2364c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2365936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2366c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2367c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2368c6fd2807SJeff Garzik 	} else {
2369936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2370c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2371c6fd2807SJeff Garzik 				sstatus, scontrol);
2372c6fd2807SJeff Garzik 	}
2373c6fd2807SJeff Garzik }
2374c6fd2807SJeff Garzik 
2375c6fd2807SJeff Garzik /**
2376c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2377c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2378c6fd2807SJeff Garzik  *
2379c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2380c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2381c6fd2807SJeff Garzik  *	clear any reset condition.
2382c6fd2807SJeff Garzik  *
2383c6fd2807SJeff Garzik  *	LOCKING:
2384c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2385c6fd2807SJeff Garzik  *
2386c6fd2807SJeff Garzik  */
2387c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2388c6fd2807SJeff Garzik {
2389936fd732STejun Heo 	struct ata_link *link = &ap->link;
2390c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2391936fd732STejun Heo 	u32 sstatus;
2392c6fd2807SJeff Garzik 
2393c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2394c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2395936fd732STejun Heo 		sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2396c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2397c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2398c6fd2807SJeff Garzik 		mdelay(1);
2399c6fd2807SJeff Garzik 	}
2400c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2401936fd732STejun Heo 	sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2402c6fd2807SJeff Garzik 
2403c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2404c6fd2807SJeff Garzik 	do {
2405c6fd2807SJeff Garzik 		msleep(200);
2406936fd732STejun Heo 		sata_scr_read(link, SCR_STATUS, &sstatus);
2407c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2408c6fd2807SJeff Garzik 			break;
2409c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2410c6fd2807SJeff Garzik 
2411c6fd2807SJeff Garzik 	/* print link status */
2412936fd732STejun Heo 	sata_print_link_status(link);
2413c6fd2807SJeff Garzik 
2414c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2415936fd732STejun Heo 	if (!ata_link_offline(link))
2416c6fd2807SJeff Garzik 		ata_port_probe(ap);
2417c6fd2807SJeff Garzik 	else
2418c6fd2807SJeff Garzik 		ata_port_disable(ap);
2419c6fd2807SJeff Garzik 
2420c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2421c6fd2807SJeff Garzik 		return;
2422c6fd2807SJeff Garzik 
2423c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2424c6fd2807SJeff Garzik 		ata_port_disable(ap);
2425c6fd2807SJeff Garzik 		return;
2426c6fd2807SJeff Garzik 	}
2427c6fd2807SJeff Garzik 
2428c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2429c6fd2807SJeff Garzik }
2430c6fd2807SJeff Garzik 
2431c6fd2807SJeff Garzik /**
2432c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2433c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2434c6fd2807SJeff Garzik  *
2435c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2436c6fd2807SJeff Garzik  *	the bus for devices.
2437c6fd2807SJeff Garzik  *
2438c6fd2807SJeff Garzik  *	LOCKING:
2439c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2440c6fd2807SJeff Garzik  *
2441c6fd2807SJeff Garzik  */
2442c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2443c6fd2807SJeff Garzik {
2444c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2445c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2446c6fd2807SJeff Garzik 		return;
2447c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2448c6fd2807SJeff Garzik }
2449c6fd2807SJeff Garzik 
2450c6fd2807SJeff Garzik /**
2451c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2452c6fd2807SJeff Garzik  *	@adev: device
2453c6fd2807SJeff Garzik  *
2454c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2455c6fd2807SJeff Garzik  *	present NULL is returned
2456c6fd2807SJeff Garzik  */
2457c6fd2807SJeff Garzik 
2458c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2459c6fd2807SJeff Garzik {
24609af5c9c9STejun Heo 	struct ata_link *link = adev->link;
24619af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2462c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2463c6fd2807SJeff Garzik 		return NULL;
2464c6fd2807SJeff Garzik 	return pair;
2465c6fd2807SJeff Garzik }
2466c6fd2807SJeff Garzik 
2467c6fd2807SJeff Garzik /**
2468c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2469c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2470c6fd2807SJeff Garzik  *
2471c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2472c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2473c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2474c6fd2807SJeff Garzik  *	on this port.
2475c6fd2807SJeff Garzik  *
2476cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2477c6fd2807SJeff Garzik  *	serialization.
2478c6fd2807SJeff Garzik  */
2479c6fd2807SJeff Garzik 
2480c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2481c6fd2807SJeff Garzik {
24829af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
24839af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2484c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2485c6fd2807SJeff Garzik }
2486c6fd2807SJeff Garzik 
2487c6fd2807SJeff Garzik /**
2488c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2489936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2490c6fd2807SJeff Garzik  *
2491936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2492c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2493c6fd2807SJeff Garzik  *	using sata_set_spd().
2494c6fd2807SJeff Garzik  *
2495c6fd2807SJeff Garzik  *	LOCKING:
2496c6fd2807SJeff Garzik  *	Inherited from caller.
2497c6fd2807SJeff Garzik  *
2498c6fd2807SJeff Garzik  *	RETURNS:
2499c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2500c6fd2807SJeff Garzik  */
2501936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2502c6fd2807SJeff Garzik {
2503c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2504c6fd2807SJeff Garzik 	int rc, highbit;
2505c6fd2807SJeff Garzik 
2506936fd732STejun Heo 	if (!sata_scr_valid(link))
2507008a7896STejun Heo 		return -EOPNOTSUPP;
2508008a7896STejun Heo 
2509008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2510936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2511008a7896STejun Heo 	 */
2512936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2513008a7896STejun Heo 	if (rc == 0)
2514008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2515008a7896STejun Heo 	else
2516936fd732STejun Heo 		spd = link->sata_spd;
2517c6fd2807SJeff Garzik 
2518936fd732STejun Heo 	mask = link->sata_spd_limit;
2519c6fd2807SJeff Garzik 	if (mask <= 1)
2520c6fd2807SJeff Garzik 		return -EINVAL;
2521008a7896STejun Heo 
2522008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2523c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2524c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2525c6fd2807SJeff Garzik 
2526008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2527008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2528008a7896STejun Heo 	 */
2529008a7896STejun Heo 	if (spd > 1)
2530008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2531008a7896STejun Heo 	else
2532008a7896STejun Heo 		mask &= 1;
2533008a7896STejun Heo 
2534008a7896STejun Heo 	/* were we already at the bottom? */
2535c6fd2807SJeff Garzik 	if (!mask)
2536c6fd2807SJeff Garzik 		return -EINVAL;
2537c6fd2807SJeff Garzik 
2538936fd732STejun Heo 	link->sata_spd_limit = mask;
2539c6fd2807SJeff Garzik 
2540936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2541c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2542c6fd2807SJeff Garzik 
2543c6fd2807SJeff Garzik 	return 0;
2544c6fd2807SJeff Garzik }
2545c6fd2807SJeff Garzik 
2546936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2547c6fd2807SJeff Garzik {
2548c6fd2807SJeff Garzik 	u32 spd, limit;
2549c6fd2807SJeff Garzik 
2550936fd732STejun Heo 	if (link->sata_spd_limit == UINT_MAX)
2551c6fd2807SJeff Garzik 		limit = 0;
2552c6fd2807SJeff Garzik 	else
2553936fd732STejun Heo 		limit = fls(link->sata_spd_limit);
2554c6fd2807SJeff Garzik 
2555c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2556c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2557c6fd2807SJeff Garzik 
2558c6fd2807SJeff Garzik 	return spd != limit;
2559c6fd2807SJeff Garzik }
2560c6fd2807SJeff Garzik 
2561c6fd2807SJeff Garzik /**
2562c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2563936fd732STejun Heo  *	@link: Link in question
2564c6fd2807SJeff Garzik  *
2565c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2566936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2567c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2568c6fd2807SJeff Garzik  *	configuration.
2569c6fd2807SJeff Garzik  *
2570c6fd2807SJeff Garzik  *	LOCKING:
2571c6fd2807SJeff Garzik  *	Inherited from caller.
2572c6fd2807SJeff Garzik  *
2573c6fd2807SJeff Garzik  *	RETURNS:
2574c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2575c6fd2807SJeff Garzik  */
2576936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2577c6fd2807SJeff Garzik {
2578c6fd2807SJeff Garzik 	u32 scontrol;
2579c6fd2807SJeff Garzik 
2580936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2581c6fd2807SJeff Garzik 		return 0;
2582c6fd2807SJeff Garzik 
2583936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2584c6fd2807SJeff Garzik }
2585c6fd2807SJeff Garzik 
2586c6fd2807SJeff Garzik /**
2587c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2588936fd732STejun Heo  *	@link: Link to set SATA spd for
2589c6fd2807SJeff Garzik  *
2590936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2591c6fd2807SJeff Garzik  *
2592c6fd2807SJeff Garzik  *	LOCKING:
2593c6fd2807SJeff Garzik  *	Inherited from caller.
2594c6fd2807SJeff Garzik  *
2595c6fd2807SJeff Garzik  *	RETURNS:
2596c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2597c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2598c6fd2807SJeff Garzik  */
2599936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2600c6fd2807SJeff Garzik {
2601c6fd2807SJeff Garzik 	u32 scontrol;
2602c6fd2807SJeff Garzik 	int rc;
2603c6fd2807SJeff Garzik 
2604936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2605c6fd2807SJeff Garzik 		return rc;
2606c6fd2807SJeff Garzik 
2607936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2608c6fd2807SJeff Garzik 		return 0;
2609c6fd2807SJeff Garzik 
2610936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2611c6fd2807SJeff Garzik 		return rc;
2612c6fd2807SJeff Garzik 
2613c6fd2807SJeff Garzik 	return 1;
2614c6fd2807SJeff Garzik }
2615c6fd2807SJeff Garzik 
2616c6fd2807SJeff Garzik /*
2617c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2618c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2619c6fd2807SJeff Garzik  */
2620c6fd2807SJeff Garzik /*
2621b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2622c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2623b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2624b352e57dSAlan Cox  *
2625b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2626c6fd2807SJeff Garzik  */
2627c6fd2807SJeff Garzik 
2628c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2629c6fd2807SJeff Garzik 
2630c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2631c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2632c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2633c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2634c6fd2807SJeff Garzik 
2635b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2636b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2637c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2638c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2639c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2640c6fd2807SJeff Garzik 
2641c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2642c6fd2807SJeff Garzik 
2643c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2644c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2645c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2646c6fd2807SJeff Garzik 
2647c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2648c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2649c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2650c6fd2807SJeff Garzik 
2651b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2652b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2653c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2654c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2655c6fd2807SJeff Garzik 
2656c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2657c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2658c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2659c6fd2807SJeff Garzik 
2660c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2661c6fd2807SJeff Garzik 
2662c6fd2807SJeff Garzik 	{ 0xFF }
2663c6fd2807SJeff Garzik };
2664c6fd2807SJeff Garzik 
2665c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2666c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2667c6fd2807SJeff Garzik 
2668c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2669c6fd2807SJeff Garzik {
2670c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2671c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2672c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2673c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2674c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2675c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2676c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2677c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2678c6fd2807SJeff Garzik }
2679c6fd2807SJeff Garzik 
2680c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2681c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2682c6fd2807SJeff Garzik {
2683c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2684c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2685c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2686c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2687c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2688c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2689c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2690c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2691c6fd2807SJeff Garzik }
2692c6fd2807SJeff Garzik 
2693c6fd2807SJeff Garzik static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
2694c6fd2807SJeff Garzik {
2695c6fd2807SJeff Garzik 	const struct ata_timing *t;
2696c6fd2807SJeff Garzik 
2697c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2698c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2699c6fd2807SJeff Garzik 			return NULL;
2700c6fd2807SJeff Garzik 	return t;
2701c6fd2807SJeff Garzik }
2702c6fd2807SJeff Garzik 
2703c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2704c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2705c6fd2807SJeff Garzik {
2706c6fd2807SJeff Garzik 	const struct ata_timing *s;
2707c6fd2807SJeff Garzik 	struct ata_timing p;
2708c6fd2807SJeff Garzik 
2709c6fd2807SJeff Garzik 	/*
2710c6fd2807SJeff Garzik 	 * Find the mode.
2711c6fd2807SJeff Garzik 	 */
2712c6fd2807SJeff Garzik 
2713c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2714c6fd2807SJeff Garzik 		return -EINVAL;
2715c6fd2807SJeff Garzik 
2716c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2717c6fd2807SJeff Garzik 
2718c6fd2807SJeff Garzik 	/*
2719c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2720c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2721c6fd2807SJeff Garzik 	 */
2722c6fd2807SJeff Garzik 
2723c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2724c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2725c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2726c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2727c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2728c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2729c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2730c6fd2807SJeff Garzik 		}
2731c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2732c6fd2807SJeff Garzik 	}
2733c6fd2807SJeff Garzik 
2734c6fd2807SJeff Garzik 	/*
2735c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2736c6fd2807SJeff Garzik 	 */
2737c6fd2807SJeff Garzik 
2738c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2739c6fd2807SJeff Garzik 
2740c6fd2807SJeff Garzik 	/*
2741c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2742c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2743c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2744c6fd2807SJeff Garzik 	 */
2745c6fd2807SJeff Garzik 
2746fd3367afSAlan 	if (speed > XFER_PIO_6) {
2747c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2748c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2749c6fd2807SJeff Garzik 	}
2750c6fd2807SJeff Garzik 
2751c6fd2807SJeff Garzik 	/*
2752c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2753c6fd2807SJeff Garzik 	 */
2754c6fd2807SJeff Garzik 
2755c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2756c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2757c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2758c6fd2807SJeff Garzik 	}
2759c6fd2807SJeff Garzik 
2760c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2761c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2762c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2763c6fd2807SJeff Garzik 	}
27644f701d1eSAlan Cox 
27654f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
27664f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
27674f701d1eSAlan Cox 	   if so we must correct this */
27684f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
27694f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2770c6fd2807SJeff Garzik 
2771c6fd2807SJeff Garzik 	return 0;
2772c6fd2807SJeff Garzik }
2773c6fd2807SJeff Garzik 
2774c6fd2807SJeff Garzik /**
2775c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2776c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2777458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2778c6fd2807SJeff Garzik  *
2779c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2780c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2781c6fd2807SJeff Garzik  *	will apply the limit.
2782c6fd2807SJeff Garzik  *
2783c6fd2807SJeff Garzik  *	LOCKING:
2784c6fd2807SJeff Garzik  *	Inherited from caller.
2785c6fd2807SJeff Garzik  *
2786c6fd2807SJeff Garzik  *	RETURNS:
2787c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2788c6fd2807SJeff Garzik  */
2789458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2790c6fd2807SJeff Garzik {
2791458337dbSTejun Heo 	char buf[32];
2792458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2793458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2794458337dbSTejun Heo 	int quiet, highbit;
2795c6fd2807SJeff Garzik 
2796458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2797458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2798458337dbSTejun Heo 
2799458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2800458337dbSTejun Heo 						  dev->mwdma_mask,
2801c6fd2807SJeff Garzik 						  dev->udma_mask);
2802458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2803c6fd2807SJeff Garzik 
2804458337dbSTejun Heo 	switch (sel) {
2805458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2806458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2807458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2808458337dbSTejun Heo 		break;
2809458337dbSTejun Heo 
2810458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2811458337dbSTejun Heo 		if (udma_mask) {
2812458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2813458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2814458337dbSTejun Heo 			if (!udma_mask)
2815458337dbSTejun Heo 				return -ENOENT;
2816458337dbSTejun Heo 		} else if (mwdma_mask) {
2817458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2818458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2819458337dbSTejun Heo 			if (!mwdma_mask)
2820458337dbSTejun Heo 				return -ENOENT;
2821458337dbSTejun Heo 		}
2822458337dbSTejun Heo 		break;
2823458337dbSTejun Heo 
2824458337dbSTejun Heo 	case ATA_DNXFER_40C:
2825458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2826458337dbSTejun Heo 		break;
2827458337dbSTejun Heo 
2828458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2829458337dbSTejun Heo 		pio_mask &= 1;
2830458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2831458337dbSTejun Heo 		mwdma_mask = 0;
2832458337dbSTejun Heo 		udma_mask = 0;
2833458337dbSTejun Heo 		break;
2834458337dbSTejun Heo 
2835458337dbSTejun Heo 	default:
2836458337dbSTejun Heo 		BUG();
2837458337dbSTejun Heo 	}
2838458337dbSTejun Heo 
2839458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2840458337dbSTejun Heo 
2841458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2842458337dbSTejun Heo 		return -ENOENT;
2843458337dbSTejun Heo 
2844458337dbSTejun Heo 	if (!quiet) {
2845458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2846458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2847458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2848458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2849458337dbSTejun Heo 		else
2850458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2851458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2852458337dbSTejun Heo 
2853458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2854458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2855458337dbSTejun Heo 	}
2856c6fd2807SJeff Garzik 
2857c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2858c6fd2807SJeff Garzik 			    &dev->udma_mask);
2859c6fd2807SJeff Garzik 
2860c6fd2807SJeff Garzik 	return 0;
2861c6fd2807SJeff Garzik }
2862c6fd2807SJeff Garzik 
2863c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2864c6fd2807SJeff Garzik {
28659af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
2866c6fd2807SJeff Garzik 	unsigned int err_mask;
2867c6fd2807SJeff Garzik 	int rc;
2868c6fd2807SJeff Garzik 
2869c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2870c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2871c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2872c6fd2807SJeff Garzik 
2873c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
28742dcb407eSJeff Garzik 
287511750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
287611750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
287711750a40SAlan 		err_mask &= ~AC_ERR_DEV;
28782dcb407eSJeff Garzik 
28790bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
28800bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
28810bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
28820bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
28830bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
28842dcb407eSJeff Garzik 
28853acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
28863acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
28873acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
28883acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
28893acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
28903acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
28913acaf94bSAlan Cox 
2892c6fd2807SJeff Garzik 	if (err_mask) {
2893c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2894c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2895c6fd2807SJeff Garzik 		return -EIO;
2896c6fd2807SJeff Garzik 	}
2897c6fd2807SJeff Garzik 
2898baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2899422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2900baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2901c6fd2807SJeff Garzik 	if (rc)
2902c6fd2807SJeff Garzik 		return rc;
2903c6fd2807SJeff Garzik 
2904c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2905c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2906c6fd2807SJeff Garzik 
2907c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2908c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2909c6fd2807SJeff Garzik 	return 0;
2910c6fd2807SJeff Garzik }
2911c6fd2807SJeff Garzik 
2912c6fd2807SJeff Garzik /**
291304351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
29140260731fSTejun Heo  *	@link: link on which timings will be programmed
2915c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2916c6fd2807SJeff Garzik  *
291704351821SAlan  *	Standard implementation of the function used to tune and set
291804351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
291904351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2920c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2921c6fd2807SJeff Garzik  *
2922c6fd2807SJeff Garzik  *	LOCKING:
2923c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2924c6fd2807SJeff Garzik  *
2925c6fd2807SJeff Garzik  *	RETURNS:
2926c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2927c6fd2807SJeff Garzik  */
292804351821SAlan 
29290260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2930c6fd2807SJeff Garzik {
29310260731fSTejun Heo 	struct ata_port *ap = link->ap;
2932c6fd2807SJeff Garzik 	struct ata_device *dev;
2933f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
2934c6fd2807SJeff Garzik 
2935c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2936f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2937c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2938b3a70601SAlan Cox 		unsigned int mode_mask;
2939c6fd2807SJeff Garzik 
2940c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2941c6fd2807SJeff Garzik 			continue;
2942c6fd2807SJeff Garzik 
2943b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
2944b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
2945b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
2946b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
2947b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
2948b3a70601SAlan Cox 
2949c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2950c6fd2807SJeff Garzik 
2951c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2952c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2953b3a70601SAlan Cox 
2954b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
2955b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2956b3a70601SAlan Cox 		else
2957b3a70601SAlan Cox 			dma_mask = 0;
2958b3a70601SAlan Cox 
2959c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2960c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2961c6fd2807SJeff Garzik 
2962c6fd2807SJeff Garzik 		found = 1;
2963c6fd2807SJeff Garzik 		if (dev->dma_mode)
2964c6fd2807SJeff Garzik 			used_dma = 1;
2965c6fd2807SJeff Garzik 	}
2966c6fd2807SJeff Garzik 	if (!found)
2967c6fd2807SJeff Garzik 		goto out;
2968c6fd2807SJeff Garzik 
2969c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2970f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2971c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2972c6fd2807SJeff Garzik 			continue;
2973c6fd2807SJeff Garzik 
2974c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2975c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2976c6fd2807SJeff Garzik 			rc = -EINVAL;
2977c6fd2807SJeff Garzik 			goto out;
2978c6fd2807SJeff Garzik 		}
2979c6fd2807SJeff Garzik 
2980c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2981c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2982c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2983c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2984c6fd2807SJeff Garzik 	}
2985c6fd2807SJeff Garzik 
2986c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2987f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2988c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2989c6fd2807SJeff Garzik 			continue;
2990c6fd2807SJeff Garzik 
2991c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2992c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2993c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2994c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2995c6fd2807SJeff Garzik 	}
2996c6fd2807SJeff Garzik 
2997c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2998f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
299918d90debSAlan 		/* don't update suspended devices' xfer mode */
30009666f400STejun Heo 		if (!ata_dev_enabled(dev))
3001c6fd2807SJeff Garzik 			continue;
3002c6fd2807SJeff Garzik 
3003c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3004c6fd2807SJeff Garzik 		if (rc)
3005c6fd2807SJeff Garzik 			goto out;
3006c6fd2807SJeff Garzik 	}
3007c6fd2807SJeff Garzik 
3008c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3009c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3010c6fd2807SJeff Garzik 	 */
3011cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3012032af1ceSAlan 		ap->host->simplex_claimed = ap;
3013c6fd2807SJeff Garzik 
3014c6fd2807SJeff Garzik  out:
3015c6fd2807SJeff Garzik 	if (rc)
3016c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3017c6fd2807SJeff Garzik 	return rc;
3018c6fd2807SJeff Garzik }
3019c6fd2807SJeff Garzik 
3020c6fd2807SJeff Garzik /**
302104351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
30220260731fSTejun Heo  *	@link: link on which timings will be programmed
302304351821SAlan  *	@r_failed_dev: out paramter for failed device
302404351821SAlan  *
302504351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
302604351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
302704351821SAlan  *	returned in @r_failed_dev.
302804351821SAlan  *
302904351821SAlan  *	LOCKING:
303004351821SAlan  *	PCI/etc. bus probe sem.
303104351821SAlan  *
303204351821SAlan  *	RETURNS:
303304351821SAlan  *	0 on success, negative errno otherwise
303404351821SAlan  */
30350260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
303604351821SAlan {
30370260731fSTejun Heo 	struct ata_port *ap = link->ap;
30380260731fSTejun Heo 
303904351821SAlan 	/* has private set_mode? */
304004351821SAlan 	if (ap->ops->set_mode)
30410260731fSTejun Heo 		return ap->ops->set_mode(link, r_failed_dev);
30420260731fSTejun Heo 	return ata_do_set_mode(link, r_failed_dev);
304304351821SAlan }
304404351821SAlan 
304504351821SAlan /**
3046c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3047c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3048c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3049c6fd2807SJeff Garzik  *
3050c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3051c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3052c6fd2807SJeff Garzik  *	other threads.
3053c6fd2807SJeff Garzik  *
3054c6fd2807SJeff Garzik  *	LOCKING:
3055cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3056c6fd2807SJeff Garzik  */
3057c6fd2807SJeff Garzik 
3058c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3059c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3060c6fd2807SJeff Garzik {
3061c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3062c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3063c6fd2807SJeff Garzik }
3064c6fd2807SJeff Garzik 
3065c6fd2807SJeff Garzik /**
3066c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3067c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3068c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3069c6fd2807SJeff Garzik  *	@tmout: overall timeout
3070c6fd2807SJeff Garzik  *
3071c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3072c6fd2807SJeff Garzik  *	or a timeout occurs.
3073c6fd2807SJeff Garzik  *
3074d1adc1bbSTejun Heo  *	LOCKING:
3075d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3076d1adc1bbSTejun Heo  *
3077d1adc1bbSTejun Heo  *	RETURNS:
3078d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3079c6fd2807SJeff Garzik  */
3080d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3081c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3082c6fd2807SJeff Garzik {
3083c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3084c6fd2807SJeff Garzik 	u8 status;
3085c6fd2807SJeff Garzik 
3086c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3087c6fd2807SJeff Garzik 	timer_start = jiffies;
3088c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3089d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3090d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3091c6fd2807SJeff Garzik 		msleep(50);
3092c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3093c6fd2807SJeff Garzik 	}
3094c6fd2807SJeff Garzik 
3095d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3096c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
309735aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
309835aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3099c6fd2807SJeff Garzik 
3100c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3101d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3102d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3103c6fd2807SJeff Garzik 		msleep(50);
3104c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3105c6fd2807SJeff Garzik 	}
3106c6fd2807SJeff Garzik 
3107d1adc1bbSTejun Heo 	if (status == 0xff)
3108d1adc1bbSTejun Heo 		return -ENODEV;
3109d1adc1bbSTejun Heo 
3110c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3111c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
311235aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
311335aa7a43SJeff Garzik 				tmout / HZ, status);
3114d1adc1bbSTejun Heo 		return -EBUSY;
3115c6fd2807SJeff Garzik 	}
3116c6fd2807SJeff Garzik 
3117c6fd2807SJeff Garzik 	return 0;
3118c6fd2807SJeff Garzik }
3119c6fd2807SJeff Garzik 
3120d4b2bab4STejun Heo /**
312188ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
312288ff6eafSTejun Heo  *	@ap: port containing status register to be polled
312388ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
312488ff6eafSTejun Heo  *
312588ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
312688ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
312788ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
312888ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
312988ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
313088ff6eafSTejun Heo  *	status.
313188ff6eafSTejun Heo  *
313288ff6eafSTejun Heo  *	LOCKING:
313388ff6eafSTejun Heo  *	Kernel thread context (may sleep).
313488ff6eafSTejun Heo  */
313588ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
313688ff6eafSTejun Heo {
313788ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
313888ff6eafSTejun Heo 
313988ff6eafSTejun Heo 	if (time_before(until, deadline))
314088ff6eafSTejun Heo 		deadline = until;
314188ff6eafSTejun Heo 
314288ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
314388ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
314488ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
314588ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
314688ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
314788ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
314888ff6eafSTejun Heo 	 * delay here as well.
314988ff6eafSTejun Heo 	 *
315088ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
315188ff6eafSTejun Heo 	 */
315288ff6eafSTejun Heo 	msleep(150);
315388ff6eafSTejun Heo 
315488ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
315588ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
315688ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
315788ff6eafSTejun Heo 	 * than that.
315888ff6eafSTejun Heo 	 */
315988ff6eafSTejun Heo 	while (1) {
316088ff6eafSTejun Heo 		u8 status = ata_chk_status(ap);
316188ff6eafSTejun Heo 
316288ff6eafSTejun Heo 		if (status != 0xff || time_after(jiffies, deadline))
316388ff6eafSTejun Heo 			return;
316488ff6eafSTejun Heo 
316588ff6eafSTejun Heo 		msleep(50);
316688ff6eafSTejun Heo 	}
316788ff6eafSTejun Heo }
316888ff6eafSTejun Heo 
316988ff6eafSTejun Heo /**
3170d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3171d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3172d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3173d4b2bab4STejun Heo  *
3174d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3175d4b2bab4STejun Heo  *	occurs.
3176d4b2bab4STejun Heo  *
3177d4b2bab4STejun Heo  *	LOCKING:
3178d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3179d4b2bab4STejun Heo  *
3180d4b2bab4STejun Heo  *	RETURNS:
3181d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3182d4b2bab4STejun Heo  */
3183d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3184d4b2bab4STejun Heo {
3185d4b2bab4STejun Heo 	unsigned long start = jiffies;
3186d4b2bab4STejun Heo 	int warned = 0;
3187d4b2bab4STejun Heo 
3188d4b2bab4STejun Heo 	while (1) {
3189d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3190d4b2bab4STejun Heo 		unsigned long now = jiffies;
3191d4b2bab4STejun Heo 
3192d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3193d4b2bab4STejun Heo 			return 0;
3194936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3195d4b2bab4STejun Heo 			return -ENODEV;
3196d4b2bab4STejun Heo 		if (time_after(now, deadline))
3197d4b2bab4STejun Heo 			return -EBUSY;
3198d4b2bab4STejun Heo 
3199d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3200d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3201d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3202d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3203d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3204d4b2bab4STejun Heo 			warned = 1;
3205d4b2bab4STejun Heo 		}
3206d4b2bab4STejun Heo 
3207d4b2bab4STejun Heo 		msleep(50);
3208d4b2bab4STejun Heo 	}
3209d4b2bab4STejun Heo }
3210d4b2bab4STejun Heo 
3211d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3212d4b2bab4STejun Heo 			      unsigned long deadline)
3213c6fd2807SJeff Garzik {
3214c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3215c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3216c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
32179b89391cSTejun Heo 	int rc, ret = 0;
3218c6fd2807SJeff Garzik 
3219c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3220c6fd2807SJeff Garzik 	 * BSY bit to clear
3221c6fd2807SJeff Garzik 	 */
3222d4b2bab4STejun Heo 	if (dev0) {
3223d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
32249b89391cSTejun Heo 		if (rc) {
32259b89391cSTejun Heo 			if (rc != -ENODEV)
3226d4b2bab4STejun Heo 				return rc;
32279b89391cSTejun Heo 			ret = rc;
32289b89391cSTejun Heo 		}
3229d4b2bab4STejun Heo 	}
3230c6fd2807SJeff Garzik 
3231e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3232e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3233c6fd2807SJeff Garzik 	 */
3234e141d999STejun Heo 	if (dev1) {
3235e141d999STejun Heo 		int i;
3236c6fd2807SJeff Garzik 
3237c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3238e141d999STejun Heo 
3239e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3240e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3241e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3242e141d999STejun Heo 		 */
3243e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3244e141d999STejun Heo 			u8 nsect, lbal;
3245e141d999STejun Heo 
32460d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
32470d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3248c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3249c6fd2807SJeff Garzik 				break;
3250c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3251c6fd2807SJeff Garzik 		}
3252e141d999STejun Heo 
3253d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
32549b89391cSTejun Heo 		if (rc) {
32559b89391cSTejun Heo 			if (rc != -ENODEV)
3256d4b2bab4STejun Heo 				return rc;
32579b89391cSTejun Heo 			ret = rc;
32589b89391cSTejun Heo 		}
3259d4b2bab4STejun Heo 	}
3260c6fd2807SJeff Garzik 
3261c6fd2807SJeff Garzik 	/* is all this really necessary? */
3262c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3263c6fd2807SJeff Garzik 	if (dev1)
3264c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3265c6fd2807SJeff Garzik 	if (dev0)
3266c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3267d4b2bab4STejun Heo 
32689b89391cSTejun Heo 	return ret;
3269c6fd2807SJeff Garzik }
3270c6fd2807SJeff Garzik 
3271d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3272d4b2bab4STejun Heo 			     unsigned long deadline)
3273c6fd2807SJeff Garzik {
3274c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3275681c80b5SAlan Cox 	struct ata_device *dev;
3276681c80b5SAlan Cox 	int i = 0;
3277c6fd2807SJeff Garzik 
327844877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3279c6fd2807SJeff Garzik 
3280c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
32810d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3282c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
32830d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3284c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
32850d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3286c6fd2807SJeff Garzik 
3287681c80b5SAlan Cox 	/* If we issued an SRST then an ATA drive (not ATAPI)
3288681c80b5SAlan Cox 	 * may have changed configuration and be in PIO0 timing. If
3289681c80b5SAlan Cox 	 * we did a hard reset (or are coming from power on) this is
3290681c80b5SAlan Cox 	 * true for ATA or ATAPI. Until we've set a suitable controller
3291681c80b5SAlan Cox 	 * mode we should not touch the bus as we may be talking too fast.
3292681c80b5SAlan Cox 	 */
3293681c80b5SAlan Cox 
3294681c80b5SAlan Cox 	ata_link_for_each_dev(dev, &ap->link)
3295681c80b5SAlan Cox 		dev->pio_mode = XFER_PIO_0;
3296681c80b5SAlan Cox 
3297681c80b5SAlan Cox 	/* If the controller has a pio mode setup function then use
3298681c80b5SAlan Cox 	   it to set the chipset to rights. Don't touch the DMA setup
3299681c80b5SAlan Cox 	   as that will be dealt with when revalidating */
3300681c80b5SAlan Cox 	if (ap->ops->set_piomode) {
3301681c80b5SAlan Cox 		ata_link_for_each_dev(dev, &ap->link)
3302681c80b5SAlan Cox 			if (devmask & (1 << i++))
3303681c80b5SAlan Cox 				ap->ops->set_piomode(ap, dev);
3304681c80b5SAlan Cox 	}
3305681c80b5SAlan Cox 
330688ff6eafSTejun Heo 	/* wait a while before checking status */
330788ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3308c6fd2807SJeff Garzik 
3309c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3310c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3311c6fd2807SJeff Garzik 	 * pulldown resistor.
3312c6fd2807SJeff Garzik 	 */
3313150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
33149b89391cSTejun Heo 		return -ENODEV;
3315c6fd2807SJeff Garzik 
3316d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3317c6fd2807SJeff Garzik }
3318c6fd2807SJeff Garzik 
3319c6fd2807SJeff Garzik /**
3320c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3321c6fd2807SJeff Garzik  *	@ap: port to reset
3322c6fd2807SJeff Garzik  *
3323c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3324c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3325c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3326c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3327c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3328c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3329c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3330c6fd2807SJeff Garzik  *
3331c6fd2807SJeff Garzik  *	LOCKING:
3332c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3333cca3974eSJeff Garzik  *	Obtains host lock.
3334c6fd2807SJeff Garzik  *
3335c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3336c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3337c6fd2807SJeff Garzik  */
3338c6fd2807SJeff Garzik 
3339c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3340c6fd2807SJeff Garzik {
33419af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3342c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3343c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3344c6fd2807SJeff Garzik 	u8 err;
3345c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
33469b89391cSTejun Heo 	int rc;
3347c6fd2807SJeff Garzik 
334844877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3349c6fd2807SJeff Garzik 
3350c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3351c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3352c6fd2807SJeff Garzik 		dev0 = 1;
3353c6fd2807SJeff Garzik 	else {
3354c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3355c6fd2807SJeff Garzik 		if (slave_possible)
3356c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3357c6fd2807SJeff Garzik 	}
3358c6fd2807SJeff Garzik 
3359c6fd2807SJeff Garzik 	if (dev0)
3360c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3361c6fd2807SJeff Garzik 	if (dev1)
3362c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3363c6fd2807SJeff Garzik 
3364c6fd2807SJeff Garzik 	/* select device 0 again */
3365c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3366c6fd2807SJeff Garzik 
3367c6fd2807SJeff Garzik 	/* issue bus reset */
33689b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
33699b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
33709b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3371c6fd2807SJeff Garzik 			goto err_out;
33729b89391cSTejun Heo 	}
3373c6fd2807SJeff Garzik 
3374c6fd2807SJeff Garzik 	/*
3375c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3376c6fd2807SJeff Garzik 	 */
33773f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3378c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
33793f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3380c6fd2807SJeff Garzik 
3381c6fd2807SJeff Garzik 	/* is double-select really necessary? */
33829af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3383c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
33849af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3385c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3386c6fd2807SJeff Garzik 
3387c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
33889af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
33899af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3390c6fd2807SJeff Garzik 		goto err_out;
3391c6fd2807SJeff Garzik 
3392c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3393c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
33940d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3395c6fd2807SJeff Garzik 	}
3396c6fd2807SJeff Garzik 
3397c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3398c6fd2807SJeff Garzik 	return;
3399c6fd2807SJeff Garzik 
3400c6fd2807SJeff Garzik err_out:
3401c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3402ac8869d5SJeff Garzik 	ata_port_disable(ap);
3403c6fd2807SJeff Garzik 
3404c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3405c6fd2807SJeff Garzik }
3406c6fd2807SJeff Garzik 
3407c6fd2807SJeff Garzik /**
3408936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3409936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3410c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3411d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3412c6fd2807SJeff Garzik  *
3413936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3414c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3415c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3416d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3417d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3418c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3419c6fd2807SJeff Garzik  *
3420d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3421d4b2bab4STejun Heo  *	two is used.
3422d4b2bab4STejun Heo  *
3423c6fd2807SJeff Garzik  *	LOCKING:
3424c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3425c6fd2807SJeff Garzik  *
3426c6fd2807SJeff Garzik  *	RETURNS:
3427c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3428c6fd2807SJeff Garzik  */
3429936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3430d4b2bab4STejun Heo 		       unsigned long deadline)
3431c6fd2807SJeff Garzik {
3432c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3433d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3434d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3435c6fd2807SJeff Garzik 	u32 last, cur;
3436c6fd2807SJeff Garzik 	int rc;
3437c6fd2807SJeff Garzik 
3438d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3439d4b2bab4STejun Heo 	if (time_before(t, deadline))
3440d4b2bab4STejun Heo 		deadline = t;
3441d4b2bab4STejun Heo 
3442936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3443c6fd2807SJeff Garzik 		return rc;
3444c6fd2807SJeff Garzik 	cur &= 0xf;
3445c6fd2807SJeff Garzik 
3446c6fd2807SJeff Garzik 	last = cur;
3447c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3448c6fd2807SJeff Garzik 
3449c6fd2807SJeff Garzik 	while (1) {
3450c6fd2807SJeff Garzik 		msleep(interval_msec);
3451936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3452c6fd2807SJeff Garzik 			return rc;
3453c6fd2807SJeff Garzik 		cur &= 0xf;
3454c6fd2807SJeff Garzik 
3455c6fd2807SJeff Garzik 		/* DET stable? */
3456c6fd2807SJeff Garzik 		if (cur == last) {
3457d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3458c6fd2807SJeff Garzik 				continue;
3459c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3460c6fd2807SJeff Garzik 				return 0;
3461c6fd2807SJeff Garzik 			continue;
3462c6fd2807SJeff Garzik 		}
3463c6fd2807SJeff Garzik 
3464c6fd2807SJeff Garzik 		/* unstable, start over */
3465c6fd2807SJeff Garzik 		last = cur;
3466c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3467c6fd2807SJeff Garzik 
3468f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3469f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3470f1545154STejun Heo 		 */
3471d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3472f1545154STejun Heo 			return -EPIPE;
3473c6fd2807SJeff Garzik 	}
3474c6fd2807SJeff Garzik }
3475c6fd2807SJeff Garzik 
3476c6fd2807SJeff Garzik /**
3477936fd732STejun Heo  *	sata_link_resume - resume SATA link
3478936fd732STejun Heo  *	@link: ATA link to resume SATA
3479c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3480d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3481c6fd2807SJeff Garzik  *
3482936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3483c6fd2807SJeff Garzik  *
3484c6fd2807SJeff Garzik  *	LOCKING:
3485c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3486c6fd2807SJeff Garzik  *
3487c6fd2807SJeff Garzik  *	RETURNS:
3488c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3489c6fd2807SJeff Garzik  */
3490936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3491d4b2bab4STejun Heo 		     unsigned long deadline)
3492c6fd2807SJeff Garzik {
3493c6fd2807SJeff Garzik 	u32 scontrol;
3494c6fd2807SJeff Garzik 	int rc;
3495c6fd2807SJeff Garzik 
3496936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3497c6fd2807SJeff Garzik 		return rc;
3498c6fd2807SJeff Garzik 
3499c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3500c6fd2807SJeff Garzik 
3501936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3502c6fd2807SJeff Garzik 		return rc;
3503c6fd2807SJeff Garzik 
3504c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3505c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3506c6fd2807SJeff Garzik 	 */
3507c6fd2807SJeff Garzik 	msleep(200);
3508c6fd2807SJeff Garzik 
3509936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3510c6fd2807SJeff Garzik }
3511c6fd2807SJeff Garzik 
3512c6fd2807SJeff Garzik /**
3513c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3514cc0680a5STejun Heo  *	@link: ATA link to be reset
3515d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3516c6fd2807SJeff Garzik  *
3517cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3518b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3519b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3520b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3521b8cffc6aSTejun Heo  *	should just whine, not fail.
3522c6fd2807SJeff Garzik  *
3523c6fd2807SJeff Garzik  *	LOCKING:
3524c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3525c6fd2807SJeff Garzik  *
3526c6fd2807SJeff Garzik  *	RETURNS:
3527c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3528c6fd2807SJeff Garzik  */
3529cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3530c6fd2807SJeff Garzik {
3531cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3532936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3533c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3534c6fd2807SJeff Garzik 	int rc;
3535c6fd2807SJeff Garzik 
353631daabdaSTejun Heo 	/* handle link resume */
3537c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
35380c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3539c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3540c6fd2807SJeff Garzik 
3541633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3542633273a3STejun Heo 	 * is supported.
3543633273a3STejun Heo 	 */
3544633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3545633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3546633273a3STejun Heo 
3547c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3548c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3549c6fd2807SJeff Garzik 		return 0;
3550c6fd2807SJeff Garzik 
3551936fd732STejun Heo 	/* if SATA, resume link */
3552a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3553936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3554b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3555b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3556cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3557c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3558c6fd2807SJeff Garzik 	}
3559c6fd2807SJeff Garzik 
3560c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3561c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3562c6fd2807SJeff Garzik 	 */
35630c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3564b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
35656dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3566cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3567b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3568b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3569b8cffc6aSTejun Heo 		}
3570b8cffc6aSTejun Heo 	}
3571c6fd2807SJeff Garzik 
3572c6fd2807SJeff Garzik 	return 0;
3573c6fd2807SJeff Garzik }
3574c6fd2807SJeff Garzik 
3575c6fd2807SJeff Garzik /**
3576c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3577cc0680a5STejun Heo  *	@link: ATA link to reset
3578c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3579d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3580c6fd2807SJeff Garzik  *
3581c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3582c6fd2807SJeff Garzik  *
3583c6fd2807SJeff Garzik  *	LOCKING:
3584c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3585c6fd2807SJeff Garzik  *
3586c6fd2807SJeff Garzik  *	RETURNS:
3587c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3588c6fd2807SJeff Garzik  */
3589cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3590d4b2bab4STejun Heo 		      unsigned long deadline)
3591c6fd2807SJeff Garzik {
3592cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3593c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3594d4b2bab4STejun Heo 	unsigned int devmask = 0;
3595d4b2bab4STejun Heo 	int rc;
3596c6fd2807SJeff Garzik 	u8 err;
3597c6fd2807SJeff Garzik 
3598c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3599c6fd2807SJeff Garzik 
3600936fd732STejun Heo 	if (ata_link_offline(link)) {
3601c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3602c6fd2807SJeff Garzik 		goto out;
3603c6fd2807SJeff Garzik 	}
3604c6fd2807SJeff Garzik 
3605c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3606c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3607c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3608c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3609c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3610c6fd2807SJeff Garzik 
3611c6fd2807SJeff Garzik 	/* select device 0 again */
3612c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3613c6fd2807SJeff Garzik 
3614c6fd2807SJeff Garzik 	/* issue bus reset */
3615c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3616d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
36179b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3618936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3619cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3620d4b2bab4STejun Heo 		return rc;
3621c6fd2807SJeff Garzik 	}
3622c6fd2807SJeff Garzik 
3623c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
36243f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
36253f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3626c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
36273f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
36283f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3629c6fd2807SJeff Garzik 
3630c6fd2807SJeff Garzik  out:
3631c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3632c6fd2807SJeff Garzik 	return 0;
3633c6fd2807SJeff Garzik }
3634c6fd2807SJeff Garzik 
3635c6fd2807SJeff Garzik /**
3636cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3637cc0680a5STejun Heo  *	@link: link to reset
3638b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3639d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3640c6fd2807SJeff Garzik  *
3641cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3642c6fd2807SJeff Garzik  *
3643c6fd2807SJeff Garzik  *	LOCKING:
3644c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3645c6fd2807SJeff Garzik  *
3646c6fd2807SJeff Garzik  *	RETURNS:
3647c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3648c6fd2807SJeff Garzik  */
3649cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3650d4b2bab4STejun Heo 			unsigned long deadline)
3651c6fd2807SJeff Garzik {
3652c6fd2807SJeff Garzik 	u32 scontrol;
3653c6fd2807SJeff Garzik 	int rc;
3654c6fd2807SJeff Garzik 
3655c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3656c6fd2807SJeff Garzik 
3657936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3658c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3659c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3660c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3661c6fd2807SJeff Garzik 		 * and Sil3124.
3662c6fd2807SJeff Garzik 		 */
3663936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3664b6103f6dSTejun Heo 			goto out;
3665c6fd2807SJeff Garzik 
3666cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3667c6fd2807SJeff Garzik 
3668936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3669b6103f6dSTejun Heo 			goto out;
3670c6fd2807SJeff Garzik 
3671936fd732STejun Heo 		sata_set_spd(link);
3672c6fd2807SJeff Garzik 	}
3673c6fd2807SJeff Garzik 
3674c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3675936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3676b6103f6dSTejun Heo 		goto out;
3677c6fd2807SJeff Garzik 
3678c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3679c6fd2807SJeff Garzik 
3680936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3681b6103f6dSTejun Heo 		goto out;
3682c6fd2807SJeff Garzik 
3683c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3684c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3685c6fd2807SJeff Garzik 	 */
3686c6fd2807SJeff Garzik 	msleep(1);
3687c6fd2807SJeff Garzik 
3688936fd732STejun Heo 	/* bring link back */
3689936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3690b6103f6dSTejun Heo  out:
3691b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3692b6103f6dSTejun Heo 	return rc;
3693b6103f6dSTejun Heo }
3694b6103f6dSTejun Heo 
3695b6103f6dSTejun Heo /**
3696b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3697cc0680a5STejun Heo  *	@link: link to reset
3698b6103f6dSTejun Heo  *	@class: resulting class of attached device
3699d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3700b6103f6dSTejun Heo  *
3701b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3702b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3703b6103f6dSTejun Heo  *
3704b6103f6dSTejun Heo  *	LOCKING:
3705b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3706b6103f6dSTejun Heo  *
3707b6103f6dSTejun Heo  *	RETURNS:
3708b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3709b6103f6dSTejun Heo  */
3710cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3711d4b2bab4STejun Heo 		       unsigned long deadline)
3712b6103f6dSTejun Heo {
3713cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3714936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3715b6103f6dSTejun Heo 	int rc;
3716b6103f6dSTejun Heo 
3717b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3718b6103f6dSTejun Heo 
3719b6103f6dSTejun Heo 	/* do hardreset */
3720cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3721b6103f6dSTejun Heo 	if (rc) {
3722cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3723b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3724b6103f6dSTejun Heo 		return rc;
3725b6103f6dSTejun Heo 	}
3726c6fd2807SJeff Garzik 
3727c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3728936fd732STejun Heo 	if (ata_link_offline(link)) {
3729c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3730c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3731c6fd2807SJeff Garzik 		return 0;
3732c6fd2807SJeff Garzik 	}
3733c6fd2807SJeff Garzik 
373488ff6eafSTejun Heo 	/* wait a while before checking status */
373588ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
373634fee227STejun Heo 
3737633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3738633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3739633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3740633273a3STejun Heo 	 * second and request follow-up SRST.
3741633273a3STejun Heo 	 */
3742633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3743633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3744633273a3STejun Heo 		return -EAGAIN;
3745633273a3STejun Heo 	}
3746633273a3STejun Heo 
3747d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
37489b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
37499b89391cSTejun Heo 	if (rc) {
3750cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3751d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3752d4b2bab4STejun Heo 		return rc;
3753c6fd2807SJeff Garzik 	}
3754c6fd2807SJeff Garzik 
3755c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3756c6fd2807SJeff Garzik 
37573f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3758c6fd2807SJeff Garzik 
3759c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3760c6fd2807SJeff Garzik 	return 0;
3761c6fd2807SJeff Garzik }
3762c6fd2807SJeff Garzik 
3763c6fd2807SJeff Garzik /**
3764c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3765cc0680a5STejun Heo  *	@link: the target ata_link
3766c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3767c6fd2807SJeff Garzik  *
3768c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3769c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3770c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3771c6fd2807SJeff Garzik  *
3772c6fd2807SJeff Garzik  *	LOCKING:
3773c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3774c6fd2807SJeff Garzik  */
3775cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3776c6fd2807SJeff Garzik {
3777cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3778c6fd2807SJeff Garzik 	u32 serror;
3779c6fd2807SJeff Garzik 
3780c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3781c6fd2807SJeff Garzik 
3782c6fd2807SJeff Garzik 	/* print link status */
3783936fd732STejun Heo 	sata_print_link_status(link);
3784c6fd2807SJeff Garzik 
3785c6fd2807SJeff Garzik 	/* clear SError */
3786936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3787936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3788c6fd2807SJeff Garzik 
3789c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3790c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3791c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3792c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3793c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3794c6fd2807SJeff Garzik 
3795c6fd2807SJeff Garzik 	/* bail out if no device is present */
3796c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3797c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3798c6fd2807SJeff Garzik 		return;
3799c6fd2807SJeff Garzik 	}
3800c6fd2807SJeff Garzik 
3801c6fd2807SJeff Garzik 	/* set up device control */
38020d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
38030d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3804c6fd2807SJeff Garzik 
3805c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3806c6fd2807SJeff Garzik }
3807c6fd2807SJeff Garzik 
3808c6fd2807SJeff Garzik /**
3809c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3810c6fd2807SJeff Garzik  *	@dev: device to compare against
3811c6fd2807SJeff Garzik  *	@new_class: class of the new device
3812c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3813c6fd2807SJeff Garzik  *
3814c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3815c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3816c6fd2807SJeff Garzik  *	@new_id.
3817c6fd2807SJeff Garzik  *
3818c6fd2807SJeff Garzik  *	LOCKING:
3819c6fd2807SJeff Garzik  *	None.
3820c6fd2807SJeff Garzik  *
3821c6fd2807SJeff Garzik  *	RETURNS:
3822c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3823c6fd2807SJeff Garzik  */
3824c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3825c6fd2807SJeff Garzik 			       const u16 *new_id)
3826c6fd2807SJeff Garzik {
3827c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3828a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3829a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3830c6fd2807SJeff Garzik 
3831c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3832c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3833c6fd2807SJeff Garzik 			       dev->class, new_class);
3834c6fd2807SJeff Garzik 		return 0;
3835c6fd2807SJeff Garzik 	}
3836c6fd2807SJeff Garzik 
3837a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3838a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3839a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3840a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3841c6fd2807SJeff Garzik 
3842c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3843c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3844c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3845c6fd2807SJeff Garzik 		return 0;
3846c6fd2807SJeff Garzik 	}
3847c6fd2807SJeff Garzik 
3848c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3849c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3850c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3851c6fd2807SJeff Garzik 		return 0;
3852c6fd2807SJeff Garzik 	}
3853c6fd2807SJeff Garzik 
3854c6fd2807SJeff Garzik 	return 1;
3855c6fd2807SJeff Garzik }
3856c6fd2807SJeff Garzik 
3857c6fd2807SJeff Garzik /**
3858fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
38593fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3860bff04647STejun Heo  *	@readid_flags: read ID flags
3861c6fd2807SJeff Garzik  *
3862c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3863c6fd2807SJeff Garzik  *	the port.
3864c6fd2807SJeff Garzik  *
3865c6fd2807SJeff Garzik  *	LOCKING:
3866c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3867c6fd2807SJeff Garzik  *
3868c6fd2807SJeff Garzik  *	RETURNS:
3869c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3870c6fd2807SJeff Garzik  */
3871fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3872c6fd2807SJeff Garzik {
3873c6fd2807SJeff Garzik 	unsigned int class = dev->class;
38749af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
3875c6fd2807SJeff Garzik 	int rc;
3876c6fd2807SJeff Garzik 
3877c6fd2807SJeff Garzik 	/* read ID data */
3878bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3879c6fd2807SJeff Garzik 	if (rc)
3880fe30911bSTejun Heo 		return rc;
3881c6fd2807SJeff Garzik 
3882c6fd2807SJeff Garzik 	/* is the device still there? */
3883fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3884fe30911bSTejun Heo 		return -ENODEV;
3885c6fd2807SJeff Garzik 
3886c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3887fe30911bSTejun Heo 	return 0;
3888fe30911bSTejun Heo }
3889fe30911bSTejun Heo 
3890fe30911bSTejun Heo /**
3891fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3892fe30911bSTejun Heo  *	@dev: device to revalidate
3893422c9daaSTejun Heo  *	@new_class: new class code
3894fe30911bSTejun Heo  *	@readid_flags: read ID flags
3895fe30911bSTejun Heo  *
3896fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3897fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3898fe30911bSTejun Heo  *
3899fe30911bSTejun Heo  *	LOCKING:
3900fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3901fe30911bSTejun Heo  *
3902fe30911bSTejun Heo  *	RETURNS:
3903fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3904fe30911bSTejun Heo  */
3905422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3906422c9daaSTejun Heo 		       unsigned int readid_flags)
3907fe30911bSTejun Heo {
39086ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3909fe30911bSTejun Heo 	int rc;
3910fe30911bSTejun Heo 
3911fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3912fe30911bSTejun Heo 		return -ENODEV;
3913fe30911bSTejun Heo 
3914422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3915422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
3916422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3917422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3918422c9daaSTejun Heo 			       dev->class, new_class);
3919422c9daaSTejun Heo 		rc = -ENODEV;
3920422c9daaSTejun Heo 		goto fail;
3921422c9daaSTejun Heo 	}
3922422c9daaSTejun Heo 
3923fe30911bSTejun Heo 	/* re-read ID */
3924fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3925fe30911bSTejun Heo 	if (rc)
3926fe30911bSTejun Heo 		goto fail;
3927c6fd2807SJeff Garzik 
3928c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3929efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
39306ddcd3b0STejun Heo 	if (rc)
39316ddcd3b0STejun Heo 		goto fail;
39326ddcd3b0STejun Heo 
39336ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
3934b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3935b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
39366ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
39376ddcd3b0STejun Heo 			       "%llu != %llu\n",
39386ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
39396ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
39408270bec4STejun Heo 
39418270bec4STejun Heo 		/* restore original n_sectors */
39428270bec4STejun Heo 		dev->n_sectors = n_sectors;
39438270bec4STejun Heo 
39446ddcd3b0STejun Heo 		rc = -ENODEV;
39456ddcd3b0STejun Heo 		goto fail;
39466ddcd3b0STejun Heo 	}
39476ddcd3b0STejun Heo 
3948c6fd2807SJeff Garzik 	return 0;
3949c6fd2807SJeff Garzik 
3950c6fd2807SJeff Garzik  fail:
3951c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3952c6fd2807SJeff Garzik 	return rc;
3953c6fd2807SJeff Garzik }
3954c6fd2807SJeff Garzik 
39556919a0a6SAlan Cox struct ata_blacklist_entry {
39566919a0a6SAlan Cox 	const char *model_num;
39576919a0a6SAlan Cox 	const char *model_rev;
39586919a0a6SAlan Cox 	unsigned long horkage;
39596919a0a6SAlan Cox };
39606919a0a6SAlan Cox 
39616919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
39626919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
39636919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
39646919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
39656919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
39666919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
39676919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
39686919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
39696919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
39706919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
39716919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
39726919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
39736919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
39746919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
39756919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
39766919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
39776919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
39786919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
39796919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
39806919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
39816919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
39826919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
39836919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
39846919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
39856919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
39866919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
39876919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
39886919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
39896919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
39906919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
39916919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
399239f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
39933af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
39943af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
39953af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
39966919a0a6SAlan Cox 
399718d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
399840a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
399918d6e9d5SAlbert Lee 
40006919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
40016919a0a6SAlan Cox 
40026919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
40036919a0a6SAlan Cox 	/* NCQ is slow */
40046919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
400509125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
400609125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
40077acfaf30SPaul Rolland 	/* NCQ is broken */
4008539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
40090e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
40100b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
40110b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
4012da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4013539cc7c7SJeff Garzik 
401436e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
401536e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
401636e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
401736e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
401836e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4019bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
4020bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
40212f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
402270edb185STejun Heo 	{ "HDT722516DLA380",	"V43OA96A",	ATA_HORKAGE_NONCQ, },
4023e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
40240c173174STejun Heo 	{ "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
40252f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
40267f567620STejun Heo 	{ "WDC WD3200AAJS-00RYA0", "12.01B01",	ATA_HORKAGE_NONCQ, },
4027a520f261STejun Heo 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
40287f567620STejun Heo 	{ "ST9120822AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
40293fb6589cSTejun Heo 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
4030954bb005STejun Heo 	{ "ST9160821AS",	"3.ALD",	ATA_HORKAGE_NONCQ, },
403113587960STejun Heo 	{ "ST9160821AS",	"3.CCD",	ATA_HORKAGE_NONCQ, },
40327f567620STejun Heo 	{ "ST3160812AS",	"3.ADJ",	ATA_HORKAGE_NONCQ, },
40337f567620STejun Heo 	{ "ST980813AS",		"3.ADB",	ATA_HORKAGE_NONCQ, },
40345d6aca8dSTejun Heo 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
40356919a0a6SAlan Cox 
403616c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
403716c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
403816c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
403916c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
404016c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
40416919a0a6SAlan Cox 
404293328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
404393328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
404493328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
404593328e11SAlan Cox 
40466919a0a6SAlan Cox 	/* End Marker */
40476919a0a6SAlan Cox 	{ }
4048c6fd2807SJeff Garzik };
4049c6fd2807SJeff Garzik 
4050741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4051539cc7c7SJeff Garzik {
4052539cc7c7SJeff Garzik 	const char *p;
4053539cc7c7SJeff Garzik 	int len;
4054539cc7c7SJeff Garzik 
4055539cc7c7SJeff Garzik 	/*
4056539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4057539cc7c7SJeff Garzik 	 */
4058539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4059539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4060539cc7c7SJeff Garzik 		len = p - patt;
4061317b50b8SAndrew Paprocki 	else {
4062539cc7c7SJeff Garzik 		len = strlen(name);
4063317b50b8SAndrew Paprocki 		if (!len) {
4064317b50b8SAndrew Paprocki 			if (!*patt)
4065317b50b8SAndrew Paprocki 				return 0;
4066317b50b8SAndrew Paprocki 			return -1;
4067317b50b8SAndrew Paprocki 		}
4068317b50b8SAndrew Paprocki 	}
4069539cc7c7SJeff Garzik 
4070539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4071539cc7c7SJeff Garzik }
4072539cc7c7SJeff Garzik 
407375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4074c6fd2807SJeff Garzik {
40758bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
40768bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
40776919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4078c6fd2807SJeff Garzik 
40798bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
40808bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4081c6fd2807SJeff Garzik 
40826919a0a6SAlan Cox 	while (ad->model_num) {
4083539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
40846919a0a6SAlan Cox 			if (ad->model_rev == NULL)
40856919a0a6SAlan Cox 				return ad->horkage;
4086539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
40876919a0a6SAlan Cox 				return ad->horkage;
4088c6fd2807SJeff Garzik 		}
40896919a0a6SAlan Cox 		ad++;
4090c6fd2807SJeff Garzik 	}
4091c6fd2807SJeff Garzik 	return 0;
4092c6fd2807SJeff Garzik }
4093c6fd2807SJeff Garzik 
40946919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
40956919a0a6SAlan Cox {
40966919a0a6SAlan Cox 	/* We don't support polling DMA.
40976919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
40986919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
40996919a0a6SAlan Cox 	 */
41009af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
41016919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
41026919a0a6SAlan Cox 		return 1;
410375683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
41046919a0a6SAlan Cox }
41056919a0a6SAlan Cox 
4106c6fd2807SJeff Garzik /**
4107c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4108c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4109c6fd2807SJeff Garzik  *
4110c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4111c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4112c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4113c6fd2807SJeff Garzik  *	blacklist, etc...
4114c6fd2807SJeff Garzik  *
4115c6fd2807SJeff Garzik  *	LOCKING:
4116c6fd2807SJeff Garzik  *	None.
4117c6fd2807SJeff Garzik  */
4118c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4119c6fd2807SJeff Garzik {
41209af5c9c9STejun Heo 	struct ata_link *link = dev->link;
41219af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4122cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4123c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4124c6fd2807SJeff Garzik 
4125c6fd2807SJeff Garzik 	/* controller modes available */
4126c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4127c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4128c6fd2807SJeff Garzik 
41298343f889SRobert Hancock 	/* drive modes available */
4130c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4131c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4132c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4133c6fd2807SJeff Garzik 
4134b352e57dSAlan Cox 	/*
4135b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4136b352e57dSAlan Cox 	 *	cable
4137b352e57dSAlan Cox 	 */
4138b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4139b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4140b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4141b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4142b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4143b352e57dSAlan Cox 	}
4144b352e57dSAlan Cox 
4145c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4146c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4147c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4148c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4149c6fd2807SJeff Garzik 	}
4150c6fd2807SJeff Garzik 
415114d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
415214d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4153c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4154c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4155c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4156c6fd2807SJeff Garzik 	}
4157c6fd2807SJeff Garzik 
4158e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4159e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4160e424675fSJeff Garzik 
4161c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4162a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4163c6fd2807SJeff Garzik 
41648343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
41658343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
41668343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
41678343f889SRobert Hancock 	 * solely limited by the cable.
41688343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
41698343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
41708343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
41718343f889SRobert Hancock 	 */
41728343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
41738343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
41748343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
41758343f889SRobert Hancock 		    (ata_drive_40wire(dev->id) &&
41768343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
41778343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
41788343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
41798343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
41808343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
41818343f889SRobert Hancock 		}
41828343f889SRobert Hancock 
4183c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4184c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4185c6fd2807SJeff Garzik }
4186c6fd2807SJeff Garzik 
4187c6fd2807SJeff Garzik /**
4188c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4189c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4190c6fd2807SJeff Garzik  *
4191c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4192c6fd2807SJeff Garzik  *	on port @ap.
4193c6fd2807SJeff Garzik  *
4194c6fd2807SJeff Garzik  *	LOCKING:
4195c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4196c6fd2807SJeff Garzik  *
4197c6fd2807SJeff Garzik  *	RETURNS:
4198c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4199c6fd2807SJeff Garzik  */
4200c6fd2807SJeff Garzik 
4201c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4202c6fd2807SJeff Garzik {
4203c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4204c6fd2807SJeff Garzik 	unsigned int err_mask;
4205c6fd2807SJeff Garzik 
4206c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4207c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4208c6fd2807SJeff Garzik 
4209464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4210464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4211464cf177STejun Heo 	 */
4212c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4213c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4214c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4215464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4216c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4217c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
4218c6fd2807SJeff Garzik 
42192b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4220c6fd2807SJeff Garzik 
4221c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4222c6fd2807SJeff Garzik 	return err_mask;
4223c6fd2807SJeff Garzik }
4224c6fd2807SJeff Garzik /**
4225218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
42269f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
42279f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4228218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
42299f45cbd3SKristen Carlson Accardi  *
42309f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4231218f3d30SJeff Garzik  *	on port @ap with sector count
42329f45cbd3SKristen Carlson Accardi  *
42339f45cbd3SKristen Carlson Accardi  *	LOCKING:
42349f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
42359f45cbd3SKristen Carlson Accardi  *
42369f45cbd3SKristen Carlson Accardi  *	RETURNS:
42379f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
42389f45cbd3SKristen Carlson Accardi  */
4239218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4240218f3d30SJeff Garzik 					u8 feature)
42419f45cbd3SKristen Carlson Accardi {
42429f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
42439f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
42449f45cbd3SKristen Carlson Accardi 
42459f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
42469f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
42479f45cbd3SKristen Carlson Accardi 
42489f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
42499f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
42509f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
42519f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
42529f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4253218f3d30SJeff Garzik 	tf.nsect = feature;
42549f45cbd3SKristen Carlson Accardi 
42552b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
42569f45cbd3SKristen Carlson Accardi 
42579f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
42589f45cbd3SKristen Carlson Accardi 	return err_mask;
42599f45cbd3SKristen Carlson Accardi }
42609f45cbd3SKristen Carlson Accardi 
42619f45cbd3SKristen Carlson Accardi /**
4262c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4263c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4264c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4265c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4266c6fd2807SJeff Garzik  *
4267c6fd2807SJeff Garzik  *	LOCKING:
4268c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4269c6fd2807SJeff Garzik  *
4270c6fd2807SJeff Garzik  *	RETURNS:
4271c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4272c6fd2807SJeff Garzik  */
4273c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4274c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4275c6fd2807SJeff Garzik {
4276c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4277c6fd2807SJeff Garzik 	unsigned int err_mask;
4278c6fd2807SJeff Garzik 
4279c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4280c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4281c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4282c6fd2807SJeff Garzik 
4283c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4284c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4285c6fd2807SJeff Garzik 
4286c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4287c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4288c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4289c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4290c6fd2807SJeff Garzik 	tf.nsect = sectors;
4291c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4292c6fd2807SJeff Garzik 
42932b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
429418b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
429518b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
429618b2466cSAlan Cox 	   drive reported working geometry */
429718b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
429818b2466cSAlan Cox 		err_mask = 0;
4299c6fd2807SJeff Garzik 
4300c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4301c6fd2807SJeff Garzik 	return err_mask;
4302c6fd2807SJeff Garzik }
4303c6fd2807SJeff Garzik 
4304c6fd2807SJeff Garzik /**
4305c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4306c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4307c6fd2807SJeff Garzik  *
4308c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4309c6fd2807SJeff Garzik  *
4310c6fd2807SJeff Garzik  *	LOCKING:
4311cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4312c6fd2807SJeff Garzik  */
431370e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4314c6fd2807SJeff Garzik {
4315c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4316c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4317c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4318c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4319c6fd2807SJeff Garzik 
4320c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4321c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4322c6fd2807SJeff Garzik 
4323c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4324c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4325c6fd2807SJeff Garzik 
4326c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4327c6fd2807SJeff Garzik 
4328c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4329c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4330c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4331c6fd2807SJeff Garzik 	 */
4332c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4333c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4334c6fd2807SJeff Garzik 
4335c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4336c6fd2807SJeff Garzik 		if (qc->n_elem)
4337c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4338c6fd2807SJeff Garzik 		/* restore last sg */
433987260216SJens Axboe 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4340c6fd2807SJeff Garzik 		if (pad_buf) {
4341c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
434245711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4343c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4344c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4345c6fd2807SJeff Garzik 		}
4346c6fd2807SJeff Garzik 	} else {
4347c6fd2807SJeff Garzik 		if (qc->n_elem)
4348c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4349c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4350c6fd2807SJeff Garzik 				dir);
4351c6fd2807SJeff Garzik 		/* restore sg */
4352c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4353c6fd2807SJeff Garzik 		if (pad_buf)
4354c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4355c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4356c6fd2807SJeff Garzik 	}
4357c6fd2807SJeff Garzik 
4358c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4359c6fd2807SJeff Garzik 	qc->__sg = NULL;
4360c6fd2807SJeff Garzik }
4361c6fd2807SJeff Garzik 
4362c6fd2807SJeff Garzik /**
4363c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4364c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4365c6fd2807SJeff Garzik  *
4366c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4367c6fd2807SJeff Garzik  *	associated with the current disk command.
4368c6fd2807SJeff Garzik  *
4369c6fd2807SJeff Garzik  *	LOCKING:
4370cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4371c6fd2807SJeff Garzik  *
4372c6fd2807SJeff Garzik  */
4373c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4374c6fd2807SJeff Garzik {
4375c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4376c6fd2807SJeff Garzik 	struct scatterlist *sg;
4377c6fd2807SJeff Garzik 	unsigned int idx;
4378c6fd2807SJeff Garzik 
4379c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4380c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4381c6fd2807SJeff Garzik 
4382c6fd2807SJeff Garzik 	idx = 0;
4383c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4384c6fd2807SJeff Garzik 		u32 addr, offset;
4385c6fd2807SJeff Garzik 		u32 sg_len, len;
4386c6fd2807SJeff Garzik 
4387c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4388c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4389c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4390c6fd2807SJeff Garzik 		 */
4391c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4392c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4393c6fd2807SJeff Garzik 
4394c6fd2807SJeff Garzik 		while (sg_len) {
4395c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4396c6fd2807SJeff Garzik 			len = sg_len;
4397c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4398c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4399c6fd2807SJeff Garzik 
4400c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4401c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4402c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4403c6fd2807SJeff Garzik 
4404c6fd2807SJeff Garzik 			idx++;
4405c6fd2807SJeff Garzik 			sg_len -= len;
4406c6fd2807SJeff Garzik 			addr += len;
4407c6fd2807SJeff Garzik 		}
4408c6fd2807SJeff Garzik 	}
4409c6fd2807SJeff Garzik 
4410c6fd2807SJeff Garzik 	if (idx)
4411c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4412c6fd2807SJeff Garzik }
4413b9a4197eSTejun Heo 
4414c6fd2807SJeff Garzik /**
4415d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4416d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4417d26fc955SAlan Cox  *
4418d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4419d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4420d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4421d26fc955SAlan Cox  *	controllers that don't follow the spec.
4422d26fc955SAlan Cox  *
4423d26fc955SAlan Cox  *	LOCKING:
4424d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4425d26fc955SAlan Cox  *
4426d26fc955SAlan Cox  */
4427d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4428d26fc955SAlan Cox {
4429d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4430d26fc955SAlan Cox 	struct scatterlist *sg;
4431d26fc955SAlan Cox 	unsigned int idx;
4432d26fc955SAlan Cox 
4433d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4434d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4435d26fc955SAlan Cox 
4436d26fc955SAlan Cox 	idx = 0;
4437d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4438d26fc955SAlan Cox 		u32 addr, offset;
4439d26fc955SAlan Cox 		u32 sg_len, len, blen;
4440d26fc955SAlan Cox 
4441d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4442d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4443d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4444d26fc955SAlan Cox 		 */
4445d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4446d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4447d26fc955SAlan Cox 
4448d26fc955SAlan Cox 		while (sg_len) {
4449d26fc955SAlan Cox 			offset = addr & 0xffff;
4450d26fc955SAlan Cox 			len = sg_len;
4451d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4452d26fc955SAlan Cox 				len = 0x10000 - offset;
4453d26fc955SAlan Cox 
4454d26fc955SAlan Cox 			blen = len & 0xffff;
4455d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4456d26fc955SAlan Cox 			if (blen == 0) {
4457d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4458d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4459d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4460d26fc955SAlan Cox 				blen = 0x8000;
4461d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4462d26fc955SAlan Cox 			}
4463d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4464d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4465d26fc955SAlan Cox 
4466d26fc955SAlan Cox 			idx++;
4467d26fc955SAlan Cox 			sg_len -= len;
4468d26fc955SAlan Cox 			addr += len;
4469d26fc955SAlan Cox 		}
4470d26fc955SAlan Cox 	}
4471d26fc955SAlan Cox 
4472d26fc955SAlan Cox 	if (idx)
4473d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4474d26fc955SAlan Cox }
4475d26fc955SAlan Cox 
4476d26fc955SAlan Cox /**
4477c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4478c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4479c6fd2807SJeff Garzik  *
4480c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4481c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4482c6fd2807SJeff Garzik  *	supplied PACKET command.
4483c6fd2807SJeff Garzik  *
4484c6fd2807SJeff Garzik  *	LOCKING:
4485cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4486c6fd2807SJeff Garzik  *
4487c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4488c6fd2807SJeff Garzik  *               nonzero otherwise
4489c6fd2807SJeff Garzik  */
4490c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4491c6fd2807SJeff Garzik {
4492c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4493c6fd2807SJeff Garzik 
4494b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4495b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4496b9a4197eSTejun Heo 	 */
4497b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
44986f23a31dSAlbert Lee 		return 1;
44996f23a31dSAlbert Lee 
4500c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4501b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4502c6fd2807SJeff Garzik 
4503b9a4197eSTejun Heo 	return 0;
4504c6fd2807SJeff Garzik }
4505b9a4197eSTejun Heo 
4506c6fd2807SJeff Garzik /**
450731cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
450831cc23b3STejun Heo  *	@qc: ATA command in question
450931cc23b3STejun Heo  *
451031cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
451131cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
451231cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
451331cc23b3STejun Heo  *	whether a new command @qc can be issued.
451431cc23b3STejun Heo  *
451531cc23b3STejun Heo  *	LOCKING:
451631cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
451731cc23b3STejun Heo  *
451831cc23b3STejun Heo  *	RETURNS:
451931cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
452031cc23b3STejun Heo  */
452131cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
452231cc23b3STejun Heo {
452331cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
452431cc23b3STejun Heo 
452531cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
452631cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
452731cc23b3STejun Heo 			return 0;
452831cc23b3STejun Heo 	} else {
452931cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
453031cc23b3STejun Heo 			return 0;
453131cc23b3STejun Heo 	}
453231cc23b3STejun Heo 
453331cc23b3STejun Heo 	return ATA_DEFER_LINK;
453431cc23b3STejun Heo }
453531cc23b3STejun Heo 
453631cc23b3STejun Heo /**
4537c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4538c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4539c6fd2807SJeff Garzik  *
4540c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4541c6fd2807SJeff Garzik  *
4542c6fd2807SJeff Garzik  *	LOCKING:
4543cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4544c6fd2807SJeff Garzik  */
4545c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4546c6fd2807SJeff Garzik {
4547c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4548c6fd2807SJeff Garzik 		return;
4549c6fd2807SJeff Garzik 
4550c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4551c6fd2807SJeff Garzik }
4552c6fd2807SJeff Garzik 
4553d26fc955SAlan Cox /**
4554d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4555d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4556d26fc955SAlan Cox  *
4557d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4558d26fc955SAlan Cox  *
4559d26fc955SAlan Cox  *	LOCKING:
4560d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4561d26fc955SAlan Cox  */
4562d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4563d26fc955SAlan Cox {
4564d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4565d26fc955SAlan Cox 		return;
4566d26fc955SAlan Cox 
4567d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4568d26fc955SAlan Cox }
4569d26fc955SAlan Cox 
4570c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4571c6fd2807SJeff Garzik 
4572c6fd2807SJeff Garzik /**
4573c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4574c6fd2807SJeff Garzik  *	@qc: Command to be associated
4575c6fd2807SJeff Garzik  *	@buf: Memory buffer
4576c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4577c6fd2807SJeff Garzik  *
4578c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4579c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4580c6fd2807SJeff Garzik  *
4581c6fd2807SJeff Garzik  *	LOCKING:
4582cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4583c6fd2807SJeff Garzik  */
4584c6fd2807SJeff Garzik 
4585c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4586c6fd2807SJeff Garzik {
4587c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4588c6fd2807SJeff Garzik 
4589c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4590c6fd2807SJeff Garzik 	qc->n_elem = 1;
4591c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4592c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4593c6fd2807SJeff Garzik 	qc->nbytes = buflen;
459487260216SJens Axboe 	qc->cursg = qc->__sg;
4595c6fd2807SJeff Garzik 
459661c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4597c6fd2807SJeff Garzik }
4598c6fd2807SJeff Garzik 
4599c6fd2807SJeff Garzik /**
4600c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4601c6fd2807SJeff Garzik  *	@qc: Command to be associated
4602c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4603c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4604c6fd2807SJeff Garzik  *
4605c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4606c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4607c6fd2807SJeff Garzik  *	elements.
4608c6fd2807SJeff Garzik  *
4609c6fd2807SJeff Garzik  *	LOCKING:
4610cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4611c6fd2807SJeff Garzik  */
4612c6fd2807SJeff Garzik 
4613c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4614c6fd2807SJeff Garzik 		 unsigned int n_elem)
4615c6fd2807SJeff Garzik {
4616c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4617c6fd2807SJeff Garzik 	qc->__sg = sg;
4618c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4619c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
462087260216SJens Axboe 	qc->cursg = qc->__sg;
4621c6fd2807SJeff Garzik }
4622c6fd2807SJeff Garzik 
4623c6fd2807SJeff Garzik /**
4624c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4625c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4626c6fd2807SJeff Garzik  *
4627c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4628c6fd2807SJeff Garzik  *
4629c6fd2807SJeff Garzik  *	LOCKING:
4630cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4631c6fd2807SJeff Garzik  *
4632c6fd2807SJeff Garzik  *	RETURNS:
4633c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4634c6fd2807SJeff Garzik  */
4635c6fd2807SJeff Garzik 
4636c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4637c6fd2807SJeff Garzik {
4638c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4639c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4640c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4641c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4642c6fd2807SJeff Garzik 	int trim_sg = 0;
4643c6fd2807SJeff Garzik 
4644c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4645c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4646c6fd2807SJeff Garzik 	if (qc->pad_len) {
4647c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4648c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4649c6fd2807SJeff Garzik 
4650c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4651c6fd2807SJeff Garzik 
4652c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4653c6fd2807SJeff Garzik 
4654c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4655c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4656c6fd2807SJeff Garzik 			       qc->pad_len);
4657c6fd2807SJeff Garzik 
4658c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4659c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4660c6fd2807SJeff Garzik 		/* trim sg */
4661c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4662c6fd2807SJeff Garzik 		if (sg->length == 0)
4663c6fd2807SJeff Garzik 			trim_sg = 1;
4664c6fd2807SJeff Garzik 
4665c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4666c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4667c6fd2807SJeff Garzik 	}
4668c6fd2807SJeff Garzik 
4669c6fd2807SJeff Garzik 	if (trim_sg) {
4670c6fd2807SJeff Garzik 		qc->n_elem--;
4671c6fd2807SJeff Garzik 		goto skip_map;
4672c6fd2807SJeff Garzik 	}
4673c6fd2807SJeff Garzik 
4674c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4675c6fd2807SJeff Garzik 				     sg->length, dir);
4676c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4677c6fd2807SJeff Garzik 		/* restore sg */
4678c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4679c6fd2807SJeff Garzik 		return -1;
4680c6fd2807SJeff Garzik 	}
4681c6fd2807SJeff Garzik 
4682c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4683c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4684c6fd2807SJeff Garzik 
4685c6fd2807SJeff Garzik skip_map:
4686c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4687c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4688c6fd2807SJeff Garzik 
4689c6fd2807SJeff Garzik 	return 0;
4690c6fd2807SJeff Garzik }
4691c6fd2807SJeff Garzik 
4692c6fd2807SJeff Garzik /**
4693c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4694c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4695c6fd2807SJeff Garzik  *
4696c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4697c6fd2807SJeff Garzik  *
4698c6fd2807SJeff Garzik  *	LOCKING:
4699cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4700c6fd2807SJeff Garzik  *
4701c6fd2807SJeff Garzik  *	RETURNS:
4702c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4703c6fd2807SJeff Garzik  *
4704c6fd2807SJeff Garzik  */
4705c6fd2807SJeff Garzik 
4706c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4707c6fd2807SJeff Garzik {
4708c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4709c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
471087260216SJens Axboe 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4711c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4712c6fd2807SJeff Garzik 
471344877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4714c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4715c6fd2807SJeff Garzik 
4716c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4717c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4718c6fd2807SJeff Garzik 	if (qc->pad_len) {
4719c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4720c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4721c6fd2807SJeff Garzik 		unsigned int offset;
4722c6fd2807SJeff Garzik 
4723c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4724c6fd2807SJeff Garzik 
4725c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4726c6fd2807SJeff Garzik 
4727c6fd2807SJeff Garzik 		/*
4728c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4729c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4730c6fd2807SJeff Garzik 		 */
4731c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4732642f1490SJens Axboe 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4733642f1490SJens Axboe 				qc->pad_len, offset_in_page(offset));
4734c6fd2807SJeff Garzik 
4735c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
473645711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4737c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4738c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4739c6fd2807SJeff Garzik 		}
4740c6fd2807SJeff Garzik 
4741c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4742c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4743c6fd2807SJeff Garzik 		/* trim last sg */
4744c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4745c6fd2807SJeff Garzik 		if (lsg->length == 0)
4746c6fd2807SJeff Garzik 			trim_sg = 1;
4747c6fd2807SJeff Garzik 
4748c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4749c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4750c6fd2807SJeff Garzik 	}
4751c6fd2807SJeff Garzik 
4752c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4753c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4754c6fd2807SJeff Garzik 		pre_n_elem--;
4755c6fd2807SJeff Garzik 
4756c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4757c6fd2807SJeff Garzik 		n_elem = 0;
4758c6fd2807SJeff Garzik 		goto skip_map;
4759c6fd2807SJeff Garzik 	}
4760c6fd2807SJeff Garzik 
4761c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4762c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4763c6fd2807SJeff Garzik 	if (n_elem < 1) {
4764c6fd2807SJeff Garzik 		/* restore last sg */
4765c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4766c6fd2807SJeff Garzik 		return -1;
4767c6fd2807SJeff Garzik 	}
4768c6fd2807SJeff Garzik 
4769c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4770c6fd2807SJeff Garzik 
4771c6fd2807SJeff Garzik skip_map:
4772c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4773c6fd2807SJeff Garzik 
4774c6fd2807SJeff Garzik 	return 0;
4775c6fd2807SJeff Garzik }
4776c6fd2807SJeff Garzik 
4777c6fd2807SJeff Garzik /**
4778c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4779c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4780c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4781c6fd2807SJeff Garzik  *
4782c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4783c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4784c6fd2807SJeff Garzik  *	vice-versa.
4785c6fd2807SJeff Garzik  *
4786c6fd2807SJeff Garzik  *	LOCKING:
4787c6fd2807SJeff Garzik  *	Inherited from caller.
4788c6fd2807SJeff Garzik  */
4789c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4790c6fd2807SJeff Garzik {
4791c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4792c6fd2807SJeff Garzik 	unsigned int i;
4793c6fd2807SJeff Garzik 
4794c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4795c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4796c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4797c6fd2807SJeff Garzik }
4798c6fd2807SJeff Garzik 
4799c6fd2807SJeff Garzik /**
48000d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4801c6fd2807SJeff Garzik  *	@adev: device to target
4802c6fd2807SJeff Garzik  *	@buf: data buffer
4803c6fd2807SJeff Garzik  *	@buflen: buffer length
4804c6fd2807SJeff Garzik  *	@write_data: read/write
4805c6fd2807SJeff Garzik  *
4806c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4807c6fd2807SJeff Garzik  *
4808c6fd2807SJeff Garzik  *	LOCKING:
4809c6fd2807SJeff Garzik  *	Inherited from caller.
4810c6fd2807SJeff Garzik  */
48110d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4812c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4813c6fd2807SJeff Garzik {
48149af5c9c9STejun Heo 	struct ata_port *ap = adev->link->ap;
4815c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4816c6fd2807SJeff Garzik 
4817c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4818c6fd2807SJeff Garzik 	if (write_data)
48190d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4820c6fd2807SJeff Garzik 	else
48210d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4822c6fd2807SJeff Garzik 
4823c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4824c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4825c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4826c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4827c6fd2807SJeff Garzik 
4828c6fd2807SJeff Garzik 		if (write_data) {
4829c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
48300d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4831c6fd2807SJeff Garzik 		} else {
48320d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4833c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4834c6fd2807SJeff Garzik 		}
4835c6fd2807SJeff Garzik 	}
4836c6fd2807SJeff Garzik }
4837c6fd2807SJeff Garzik 
4838c6fd2807SJeff Garzik /**
48390d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4840c6fd2807SJeff Garzik  *	@adev: device to target
4841c6fd2807SJeff Garzik  *	@buf: data buffer
4842c6fd2807SJeff Garzik  *	@buflen: buffer length
4843c6fd2807SJeff Garzik  *	@write_data: read/write
4844c6fd2807SJeff Garzik  *
4845c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4846c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4847c6fd2807SJeff Garzik  *
4848c6fd2807SJeff Garzik  *	LOCKING:
4849c6fd2807SJeff Garzik  *	Inherited from caller.
4850c6fd2807SJeff Garzik  */
48510d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4852c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4853c6fd2807SJeff Garzik {
4854c6fd2807SJeff Garzik 	unsigned long flags;
4855c6fd2807SJeff Garzik 	local_irq_save(flags);
48560d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4857c6fd2807SJeff Garzik 	local_irq_restore(flags);
4858c6fd2807SJeff Garzik }
4859c6fd2807SJeff Garzik 
4860c6fd2807SJeff Garzik 
4861c6fd2807SJeff Garzik /**
48625a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4863c6fd2807SJeff Garzik  *	@qc: Command on going
4864c6fd2807SJeff Garzik  *
48655a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4866c6fd2807SJeff Garzik  *
4867c6fd2807SJeff Garzik  *	LOCKING:
4868c6fd2807SJeff Garzik  *	Inherited from caller.
4869c6fd2807SJeff Garzik  */
4870c6fd2807SJeff Garzik 
4871c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4872c6fd2807SJeff Garzik {
4873c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4874c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4875c6fd2807SJeff Garzik 	struct page *page;
4876c6fd2807SJeff Garzik 	unsigned int offset;
4877c6fd2807SJeff Garzik 	unsigned char *buf;
4878c6fd2807SJeff Garzik 
48795a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4880c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4881c6fd2807SJeff Garzik 
488245711f1aSJens Axboe 	page = sg_page(qc->cursg);
488387260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
4884c6fd2807SJeff Garzik 
4885c6fd2807SJeff Garzik 	/* get the current page and offset */
4886c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4887c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4888c6fd2807SJeff Garzik 
4889c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4890c6fd2807SJeff Garzik 
4891c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4892c6fd2807SJeff Garzik 		unsigned long flags;
4893c6fd2807SJeff Garzik 
4894c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4895c6fd2807SJeff Garzik 		local_irq_save(flags);
4896c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4897c6fd2807SJeff Garzik 
4898c6fd2807SJeff Garzik 		/* do the actual data transfer */
48995a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4900c6fd2807SJeff Garzik 
4901c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4902c6fd2807SJeff Garzik 		local_irq_restore(flags);
4903c6fd2807SJeff Garzik 	} else {
4904c6fd2807SJeff Garzik 		buf = page_address(page);
49055a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4906c6fd2807SJeff Garzik 	}
4907c6fd2807SJeff Garzik 
49085a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
49095a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4910c6fd2807SJeff Garzik 
491187260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
491287260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
4913c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4914c6fd2807SJeff Garzik 	}
4915c6fd2807SJeff Garzik }
4916c6fd2807SJeff Garzik 
4917c6fd2807SJeff Garzik /**
49185a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4919c6fd2807SJeff Garzik  *	@qc: Command on going
4920c6fd2807SJeff Garzik  *
49215a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4922c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4923c6fd2807SJeff Garzik  *
4924c6fd2807SJeff Garzik  *	LOCKING:
4925c6fd2807SJeff Garzik  *	Inherited from caller.
4926c6fd2807SJeff Garzik  */
4927c6fd2807SJeff Garzik 
4928c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4929c6fd2807SJeff Garzik {
4930c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4931c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4932c6fd2807SJeff Garzik 		unsigned int nsect;
4933c6fd2807SJeff Garzik 
4934c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4935c6fd2807SJeff Garzik 
49365a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4937726f0785STejun Heo 			    qc->dev->multi_count);
4938c6fd2807SJeff Garzik 		while (nsect--)
4939c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4940c6fd2807SJeff Garzik 	} else
4941c6fd2807SJeff Garzik 		ata_pio_sector(qc);
49424cc980b3SAlbert Lee 
49434cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
4944c6fd2807SJeff Garzik }
4945c6fd2807SJeff Garzik 
4946c6fd2807SJeff Garzik /**
4947c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4948c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4949c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4950c6fd2807SJeff Garzik  *
4951c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4952c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4953c6fd2807SJeff Garzik  *
4954c6fd2807SJeff Garzik  *	LOCKING:
4955c6fd2807SJeff Garzik  *	caller.
4956c6fd2807SJeff Garzik  */
4957c6fd2807SJeff Garzik 
4958c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4959c6fd2807SJeff Garzik {
4960c6fd2807SJeff Garzik 	/* send SCSI cdb */
4961c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4962c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4963c6fd2807SJeff Garzik 
4964c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4965c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4966c6fd2807SJeff Garzik 
4967c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4968c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4969c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4970c6fd2807SJeff Garzik 		break;
4971c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4972c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4973c6fd2807SJeff Garzik 		break;
4974c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4975c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4976c6fd2807SJeff Garzik 		/* initiate bmdma */
4977c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4978c6fd2807SJeff Garzik 		break;
4979c6fd2807SJeff Garzik 	}
4980c6fd2807SJeff Garzik }
4981c6fd2807SJeff Garzik 
4982c6fd2807SJeff Garzik /**
4983c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4984c6fd2807SJeff Garzik  *	@qc: Command on going
4985c6fd2807SJeff Garzik  *	@bytes: number of bytes
4986c6fd2807SJeff Garzik  *
4987c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4988c6fd2807SJeff Garzik  *
4989c6fd2807SJeff Garzik  *	LOCKING:
4990c6fd2807SJeff Garzik  *	Inherited from caller.
4991c6fd2807SJeff Garzik  *
4992c6fd2807SJeff Garzik  */
4993c6fd2807SJeff Garzik 
4994c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4995c6fd2807SJeff Garzik {
4996c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4997c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
49980874ee76SFUJITA Tomonori 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4999c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5000c6fd2807SJeff Garzik 	struct page *page;
5001c6fd2807SJeff Garzik 	unsigned char *buf;
5002c6fd2807SJeff Garzik 	unsigned int offset, count;
50030874ee76SFUJITA Tomonori 	int no_more_sg = 0;
5004c6fd2807SJeff Garzik 
5005c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
5006c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik next_sg:
50090874ee76SFUJITA Tomonori 	if (unlikely(no_more_sg)) {
5010c6fd2807SJeff Garzik 		/*
5011c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
5012c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
5013c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
5014c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
5015c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
5016c6fd2807SJeff Garzik 		 */
5017c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
5018c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
5019c6fd2807SJeff Garzik 		unsigned int i;
5020c6fd2807SJeff Garzik 
5021c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
5022c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
5023c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
5024c6fd2807SJeff Garzik 
5025c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
5026c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5027c6fd2807SJeff Garzik 
5028c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5029c6fd2807SJeff Garzik 		return;
5030c6fd2807SJeff Garzik 	}
5031c6fd2807SJeff Garzik 
503287260216SJens Axboe 	sg = qc->cursg;
5033c6fd2807SJeff Garzik 
503445711f1aSJens Axboe 	page = sg_page(sg);
5035c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5036c6fd2807SJeff Garzik 
5037c6fd2807SJeff Garzik 	/* get the current page and offset */
5038c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5039c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5040c6fd2807SJeff Garzik 
5041c6fd2807SJeff Garzik 	/* don't overrun current sg */
5042c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5043c6fd2807SJeff Garzik 
5044c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5045c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5046c6fd2807SJeff Garzik 
5047c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5048c6fd2807SJeff Garzik 
5049c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5050c6fd2807SJeff Garzik 		unsigned long flags;
5051c6fd2807SJeff Garzik 
5052c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5053c6fd2807SJeff Garzik 		local_irq_save(flags);
5054c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5055c6fd2807SJeff Garzik 
5056c6fd2807SJeff Garzik 		/* do the actual data transfer */
5057c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5058c6fd2807SJeff Garzik 
5059c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5060c6fd2807SJeff Garzik 		local_irq_restore(flags);
5061c6fd2807SJeff Garzik 	} else {
5062c6fd2807SJeff Garzik 		buf = page_address(page);
5063c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5064c6fd2807SJeff Garzik 	}
5065c6fd2807SJeff Garzik 
5066c6fd2807SJeff Garzik 	bytes -= count;
5067c6fd2807SJeff Garzik 	qc->curbytes += count;
5068c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5069c6fd2807SJeff Garzik 
5070c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
50710874ee76SFUJITA Tomonori 		if (qc->cursg == lsg)
50720874ee76SFUJITA Tomonori 			no_more_sg = 1;
50730874ee76SFUJITA Tomonori 
507487260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5075c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5076c6fd2807SJeff Garzik 	}
5077c6fd2807SJeff Garzik 
5078c6fd2807SJeff Garzik 	if (bytes)
5079c6fd2807SJeff Garzik 		goto next_sg;
5080c6fd2807SJeff Garzik }
5081c6fd2807SJeff Garzik 
5082c6fd2807SJeff Garzik /**
5083c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5084c6fd2807SJeff Garzik  *	@qc: Command on going
5085c6fd2807SJeff Garzik  *
5086c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5087c6fd2807SJeff Garzik  *
5088c6fd2807SJeff Garzik  *	LOCKING:
5089c6fd2807SJeff Garzik  *	Inherited from caller.
5090c6fd2807SJeff Garzik  */
5091c6fd2807SJeff Garzik 
5092c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5093c6fd2807SJeff Garzik {
5094c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5095c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5096c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5097c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5098c6fd2807SJeff Garzik 
5099c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5100c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5101c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5102c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5103c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5104c6fd2807SJeff Garzik 	 */
5105c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5106c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5107c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5108c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5109c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5110c6fd2807SJeff Garzik 
5111c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
5112c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
5113c6fd2807SJeff Garzik 		goto err_out;
5114c6fd2807SJeff Garzik 
5115c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5116c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5117c6fd2807SJeff Garzik 	if (do_write != i_write)
5118c6fd2807SJeff Garzik 		goto err_out;
5119c6fd2807SJeff Garzik 
512044877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5121c6fd2807SJeff Garzik 
5122c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
51234cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5124c6fd2807SJeff Garzik 
5125c6fd2807SJeff Garzik 	return;
5126c6fd2807SJeff Garzik 
5127c6fd2807SJeff Garzik err_out:
5128c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5129c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5130c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5131c6fd2807SJeff Garzik }
5132c6fd2807SJeff Garzik 
5133c6fd2807SJeff Garzik /**
5134c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5135c6fd2807SJeff Garzik  *	@ap: the target ata_port
5136c6fd2807SJeff Garzik  *	@qc: qc on going
5137c6fd2807SJeff Garzik  *
5138c6fd2807SJeff Garzik  *	RETURNS:
5139c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5140c6fd2807SJeff Garzik  */
5141c6fd2807SJeff Garzik 
5142c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5143c6fd2807SJeff Garzik {
5144c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5145c6fd2807SJeff Garzik 		return 1;
5146c6fd2807SJeff Garzik 
5147c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5148c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5149c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5150c6fd2807SJeff Garzik 		    return 1;
5151c6fd2807SJeff Garzik 
5152c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
5153c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5154c6fd2807SJeff Garzik 			return 1;
5155c6fd2807SJeff Garzik 	}
5156c6fd2807SJeff Garzik 
5157c6fd2807SJeff Garzik 	return 0;
5158c6fd2807SJeff Garzik }
5159c6fd2807SJeff Garzik 
5160c6fd2807SJeff Garzik /**
5161c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5162c6fd2807SJeff Garzik  *	@qc: Command to complete
5163c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5164c6fd2807SJeff Garzik  *
5165c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5166c6fd2807SJeff Garzik  *
5167c6fd2807SJeff Garzik  *	LOCKING:
5168cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5169c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5170c6fd2807SJeff Garzik  */
5171c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5172c6fd2807SJeff Garzik {
5173c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5174c6fd2807SJeff Garzik 	unsigned long flags;
5175c6fd2807SJeff Garzik 
5176c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5177c6fd2807SJeff Garzik 		if (in_wq) {
5178c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5179c6fd2807SJeff Garzik 
5180cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5181cca3974eSJeff Garzik 			 * released.
5182c6fd2807SJeff Garzik 			 */
5183c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5184c6fd2807SJeff Garzik 			if (qc) {
5185c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
518683625006SAkira Iguchi 					ap->ops->irq_on(ap);
5187c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5188c6fd2807SJeff Garzik 				} else
5189c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5190c6fd2807SJeff Garzik 			}
5191c6fd2807SJeff Garzik 
5192c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5193c6fd2807SJeff Garzik 		} else {
5194c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5195c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5196c6fd2807SJeff Garzik 			else
5197c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5198c6fd2807SJeff Garzik 		}
5199c6fd2807SJeff Garzik 	} else {
5200c6fd2807SJeff Garzik 		if (in_wq) {
5201c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
520283625006SAkira Iguchi 			ap->ops->irq_on(ap);
5203c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5204c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5205c6fd2807SJeff Garzik 		} else
5206c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5207c6fd2807SJeff Garzik 	}
5208c6fd2807SJeff Garzik }
5209c6fd2807SJeff Garzik 
5210c6fd2807SJeff Garzik /**
5211c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5212c6fd2807SJeff Garzik  *	@ap: the target ata_port
5213c6fd2807SJeff Garzik  *	@qc: qc on going
5214c6fd2807SJeff Garzik  *	@status: current device status
5215c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5216c6fd2807SJeff Garzik  *
5217c6fd2807SJeff Garzik  *	RETURNS:
5218c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5219c6fd2807SJeff Garzik  */
5220c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5221c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5222c6fd2807SJeff Garzik {
5223c6fd2807SJeff Garzik 	unsigned long flags = 0;
5224c6fd2807SJeff Garzik 	int poll_next;
5225c6fd2807SJeff Garzik 
5226c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5227c6fd2807SJeff Garzik 
5228c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5229c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5230c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5231c6fd2807SJeff Garzik 	 */
5232c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5233c6fd2807SJeff Garzik 
5234c6fd2807SJeff Garzik fsm_start:
5235c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
523644877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5237c6fd2807SJeff Garzik 
5238c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5239c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5240c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5241c6fd2807SJeff Garzik 
5242c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5243c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5244c6fd2807SJeff Garzik 		 * takes over after sending the data.
5245c6fd2807SJeff Garzik 		 */
5246c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5247c6fd2807SJeff Garzik 
5248c6fd2807SJeff Garzik 		/* check device status */
5249c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5250c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5251c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5252c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5253c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5254c6fd2807SJeff Garzik 			else
5255c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5256c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5257c6fd2807SJeff Garzik 
5258c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5259c6fd2807SJeff Garzik 			goto fsm_start;
5260c6fd2807SJeff Garzik 		}
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5263c6fd2807SJeff Garzik 		 * when it finds something wrong.
5264c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5265c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5266c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5267c6fd2807SJeff Garzik 		 */
5268c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
526944877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
527044877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
5271c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
5272c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5273c6fd2807SJeff Garzik 			goto fsm_start;
5274c6fd2807SJeff Garzik 		}
5275c6fd2807SJeff Garzik 
5276c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5277c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5278c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5279c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5280c6fd2807SJeff Garzik 		 */
5281c6fd2807SJeff Garzik 		if (in_wq)
5282c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5283c6fd2807SJeff Garzik 
5284c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5285c6fd2807SJeff Garzik 			/* PIO data out protocol.
5286c6fd2807SJeff Garzik 			 * send first data block.
5287c6fd2807SJeff Garzik 			 */
5288c6fd2807SJeff Garzik 
5289c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5290c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5291c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5292c6fd2807SJeff Garzik 			 */
5293c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5294c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5295c6fd2807SJeff Garzik 		} else
5296c6fd2807SJeff Garzik 			/* send CDB */
5297c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5298c6fd2807SJeff Garzik 
5299c6fd2807SJeff Garzik 		if (in_wq)
5300c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5301c6fd2807SJeff Garzik 
5302c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5303c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5304c6fd2807SJeff Garzik 		 */
5305c6fd2807SJeff Garzik 		break;
5306c6fd2807SJeff Garzik 
5307c6fd2807SJeff Garzik 	case HSM_ST:
5308c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
5309c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
5310c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5311c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5312c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5313c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5314c6fd2807SJeff Garzik 				 */
5315c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5316c6fd2807SJeff Garzik 				goto fsm_start;
5317c6fd2807SJeff Garzik 			}
5318c6fd2807SJeff Garzik 
5319c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5320c6fd2807SJeff Garzik 			 * when it finds something wrong.
5321c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5322c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5323c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5324c6fd2807SJeff Garzik 			 */
5325c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
532644877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
532744877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
532844877b4eSTejun Heo 						status);
5329c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5330c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5331c6fd2807SJeff Garzik 				goto fsm_start;
5332c6fd2807SJeff Garzik 			}
5333c6fd2807SJeff Garzik 
5334c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5335c6fd2807SJeff Garzik 
5336c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5337c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5338c6fd2807SJeff Garzik 				goto fsm_start;
5339c6fd2807SJeff Garzik 
5340c6fd2807SJeff Garzik 		} else {
5341c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5342c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5343c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5344c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5345c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5346c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5347c6fd2807SJeff Garzik 				else
534855a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
534955a8e2c8STejun Heo 					 * Phantom devices also trigger this
535055a8e2c8STejun Heo 					 * condition.  Mark hint.
535155a8e2c8STejun Heo 					 */
535255a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
535355a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5354c6fd2807SJeff Garzik 
5355c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5356c6fd2807SJeff Garzik 				goto fsm_start;
5357c6fd2807SJeff Garzik 			}
5358c6fd2807SJeff Garzik 
5359c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5360c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5361c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5362c6fd2807SJeff Garzik 			 * block of junk data before changing the
5363c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5364c6fd2807SJeff Garzik 			 *
5365c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5366c6fd2807SJeff Garzik 			 * sense since the data block has been
5367c6fd2807SJeff Garzik 			 * transferred to the device.
5368c6fd2807SJeff Garzik 			 */
5369c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5370c6fd2807SJeff Garzik 				/* data might be corrputed */
5371c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5372c6fd2807SJeff Garzik 
5373c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5374c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5375c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5376c6fd2807SJeff Garzik 				}
5377c6fd2807SJeff Garzik 
5378c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5379c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5380c6fd2807SJeff Garzik 
5381c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5382c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5383c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5384c6fd2807SJeff Garzik 				 */
5385c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5386c6fd2807SJeff Garzik 				goto fsm_start;
5387c6fd2807SJeff Garzik 			}
5388c6fd2807SJeff Garzik 
5389c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5390c6fd2807SJeff Garzik 
5391c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5392c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5393c6fd2807SJeff Garzik 				/* all data read */
5394c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5395c6fd2807SJeff Garzik 				goto fsm_start;
5396c6fd2807SJeff Garzik 			}
5397c6fd2807SJeff Garzik 		}
5398c6fd2807SJeff Garzik 
5399c6fd2807SJeff Garzik 		poll_next = 1;
5400c6fd2807SJeff Garzik 		break;
5401c6fd2807SJeff Garzik 
5402c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5403c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5404c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5405c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5406c6fd2807SJeff Garzik 			goto fsm_start;
5407c6fd2807SJeff Garzik 		}
5408c6fd2807SJeff Garzik 
5409c6fd2807SJeff Garzik 		/* no more data to transfer */
5410c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
541144877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5412c6fd2807SJeff Garzik 
5413c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5414c6fd2807SJeff Garzik 
5415c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5416c6fd2807SJeff Garzik 
5417c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5418c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5419c6fd2807SJeff Garzik 
5420c6fd2807SJeff Garzik 		poll_next = 0;
5421c6fd2807SJeff Garzik 		break;
5422c6fd2807SJeff Garzik 
5423c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5424c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5425c6fd2807SJeff Garzik 		 * know what's wrong and recover
5426c6fd2807SJeff Garzik 		 */
5427c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5428c6fd2807SJeff Garzik 
5429c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5430c6fd2807SJeff Garzik 
5431c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5432c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5433c6fd2807SJeff Garzik 
5434c6fd2807SJeff Garzik 		poll_next = 0;
5435c6fd2807SJeff Garzik 		break;
5436c6fd2807SJeff Garzik 	default:
5437c6fd2807SJeff Garzik 		poll_next = 0;
5438c6fd2807SJeff Garzik 		BUG();
5439c6fd2807SJeff Garzik 	}
5440c6fd2807SJeff Garzik 
5441c6fd2807SJeff Garzik 	return poll_next;
5442c6fd2807SJeff Garzik }
5443c6fd2807SJeff Garzik 
544465f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5445c6fd2807SJeff Garzik {
544665f27f38SDavid Howells 	struct ata_port *ap =
544765f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
544865f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5449c6fd2807SJeff Garzik 	u8 status;
5450c6fd2807SJeff Garzik 	int poll_next;
5451c6fd2807SJeff Garzik 
5452c6fd2807SJeff Garzik fsm_start:
5453c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5454c6fd2807SJeff Garzik 
5455c6fd2807SJeff Garzik 	/*
5456c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5457c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5458c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5459c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5460c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5461c6fd2807SJeff Garzik 	 */
5462c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5463c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5464c6fd2807SJeff Garzik 		msleep(2);
5465c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5466c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5467c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5468c6fd2807SJeff Garzik 			return;
5469c6fd2807SJeff Garzik 		}
5470c6fd2807SJeff Garzik 	}
5471c6fd2807SJeff Garzik 
5472c6fd2807SJeff Garzik 	/* move the HSM */
5473c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5474c6fd2807SJeff Garzik 
5475c6fd2807SJeff Garzik 	/* another command or interrupt handler
5476c6fd2807SJeff Garzik 	 * may be running at this point.
5477c6fd2807SJeff Garzik 	 */
5478c6fd2807SJeff Garzik 	if (poll_next)
5479c6fd2807SJeff Garzik 		goto fsm_start;
5480c6fd2807SJeff Garzik }
5481c6fd2807SJeff Garzik 
5482c6fd2807SJeff Garzik /**
5483c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5484c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5485c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5486c6fd2807SJeff Garzik  *
5487c6fd2807SJeff Garzik  *	LOCKING:
5488c6fd2807SJeff Garzik  *	None.
5489c6fd2807SJeff Garzik  */
5490c6fd2807SJeff Garzik 
5491c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5492c6fd2807SJeff Garzik {
5493c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5494c6fd2807SJeff Garzik 	unsigned int i;
5495c6fd2807SJeff Garzik 
5496c6fd2807SJeff Garzik 	/* no command while frozen */
5497c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5498c6fd2807SJeff Garzik 		return NULL;
5499c6fd2807SJeff Garzik 
5500c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5501c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5502c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5503c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5504c6fd2807SJeff Garzik 			break;
5505c6fd2807SJeff Garzik 		}
5506c6fd2807SJeff Garzik 
5507c6fd2807SJeff Garzik 	if (qc)
5508c6fd2807SJeff Garzik 		qc->tag = i;
5509c6fd2807SJeff Garzik 
5510c6fd2807SJeff Garzik 	return qc;
5511c6fd2807SJeff Garzik }
5512c6fd2807SJeff Garzik 
5513c6fd2807SJeff Garzik /**
5514c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5515c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5516c6fd2807SJeff Garzik  *
5517c6fd2807SJeff Garzik  *	LOCKING:
5518c6fd2807SJeff Garzik  *	None.
5519c6fd2807SJeff Garzik  */
5520c6fd2807SJeff Garzik 
5521c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5522c6fd2807SJeff Garzik {
55239af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5524c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5525c6fd2807SJeff Garzik 
5526c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5527c6fd2807SJeff Garzik 	if (qc) {
5528c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5529c6fd2807SJeff Garzik 		qc->ap = ap;
5530c6fd2807SJeff Garzik 		qc->dev = dev;
5531c6fd2807SJeff Garzik 
5532c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5533c6fd2807SJeff Garzik 	}
5534c6fd2807SJeff Garzik 
5535c6fd2807SJeff Garzik 	return qc;
5536c6fd2807SJeff Garzik }
5537c6fd2807SJeff Garzik 
5538c6fd2807SJeff Garzik /**
5539c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5540c6fd2807SJeff Garzik  *	@qc: Command to complete
5541c6fd2807SJeff Garzik  *
5542c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5543c6fd2807SJeff Garzik  *	in case something prevents using it.
5544c6fd2807SJeff Garzik  *
5545c6fd2807SJeff Garzik  *	LOCKING:
5546cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5547c6fd2807SJeff Garzik  */
5548c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5549c6fd2807SJeff Garzik {
5550c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5551c6fd2807SJeff Garzik 	unsigned int tag;
5552c6fd2807SJeff Garzik 
5553c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5554c6fd2807SJeff Garzik 
5555c6fd2807SJeff Garzik 	qc->flags = 0;
5556c6fd2807SJeff Garzik 	tag = qc->tag;
5557c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5558c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5559c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5560c6fd2807SJeff Garzik 	}
5561c6fd2807SJeff Garzik }
5562c6fd2807SJeff Garzik 
5563c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5564c6fd2807SJeff Garzik {
5565c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
55669af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5567c6fd2807SJeff Garzik 
5568c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5569c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5570c6fd2807SJeff Garzik 
5571c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5572c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5573c6fd2807SJeff Garzik 
5574c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5575da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
55769af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5577da917d69STejun Heo 		if (!link->sactive)
5578da917d69STejun Heo 			ap->nr_active_links--;
5579da917d69STejun Heo 	} else {
55809af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5581da917d69STejun Heo 		ap->nr_active_links--;
5582da917d69STejun Heo 	}
5583da917d69STejun Heo 
5584da917d69STejun Heo 	/* clear exclusive status */
5585da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5586da917d69STejun Heo 		     ap->excl_link == link))
5587da917d69STejun Heo 		ap->excl_link = NULL;
5588c6fd2807SJeff Garzik 
5589c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5590c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5591c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5592c6fd2807SJeff Garzik 	 */
5593c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5594c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5595c6fd2807SJeff Garzik 
5596c6fd2807SJeff Garzik 	/* call completion callback */
5597c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5598c6fd2807SJeff Garzik }
5599c6fd2807SJeff Garzik 
560039599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
560139599a53STejun Heo {
560239599a53STejun Heo 	struct ata_port *ap = qc->ap;
560339599a53STejun Heo 
560439599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
56054742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
560639599a53STejun Heo }
560739599a53STejun Heo 
5608c6fd2807SJeff Garzik /**
5609c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5610c6fd2807SJeff Garzik  *	@qc: Command to complete
5611c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5612c6fd2807SJeff Garzik  *
5613c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5614c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5615c6fd2807SJeff Garzik  *
5616c6fd2807SJeff Garzik  *	LOCKING:
5617cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5618c6fd2807SJeff Garzik  */
5619c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5620c6fd2807SJeff Garzik {
5621c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5622c6fd2807SJeff Garzik 
5623c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5624c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5625c6fd2807SJeff Garzik 	 *
5626c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5627c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5628c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5629c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5630c6fd2807SJeff Garzik 	 *
5631c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5632c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5633c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5634c6fd2807SJeff Garzik 	 * taken care of.
5635c6fd2807SJeff Garzik 	 */
5636c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
56374dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
56384dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
56394dbfa39bSTejun Heo 
5640c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5641c6fd2807SJeff Garzik 
5642c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5643c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5644c6fd2807SJeff Garzik 
5645c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5646c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5647c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
564839599a53STejun Heo 				fill_result_tf(qc);
5649c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5650c6fd2807SJeff Garzik 				return;
5651c6fd2807SJeff Garzik 			}
5652c6fd2807SJeff Garzik 		}
5653c6fd2807SJeff Garzik 
5654c6fd2807SJeff Garzik 		/* read result TF if requested */
5655c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
565639599a53STejun Heo 			fill_result_tf(qc);
5657c6fd2807SJeff Garzik 
56584dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
56594dbfa39bSTejun Heo 		 * completion.
56604dbfa39bSTejun Heo 		 */
56614dbfa39bSTejun Heo 		switch (qc->tf.command) {
56624dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
56634dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
56644dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
56654dbfa39bSTejun Heo 				break;
56664dbfa39bSTejun Heo 			/* fall through */
56674dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
56684dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
56694dbfa39bSTejun Heo 			/* revalidate device */
56704dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
56714dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
56724dbfa39bSTejun Heo 			break;
5673054a5fbaSTejun Heo 
5674054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5675054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5676054a5fbaSTejun Heo 			break;
56774dbfa39bSTejun Heo 		}
56784dbfa39bSTejun Heo 
5679c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5680c6fd2807SJeff Garzik 	} else {
5681c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5682c6fd2807SJeff Garzik 			return;
5683c6fd2807SJeff Garzik 
5684c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5685c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
568639599a53STejun Heo 			fill_result_tf(qc);
5687c6fd2807SJeff Garzik 
5688c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5689c6fd2807SJeff Garzik 	}
5690c6fd2807SJeff Garzik }
5691c6fd2807SJeff Garzik 
5692c6fd2807SJeff Garzik /**
5693c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5694c6fd2807SJeff Garzik  *	@ap: port in question
5695c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5696c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5697c6fd2807SJeff Garzik  *
5698c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5699c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5700c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5701c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5702c6fd2807SJeff Garzik  *
5703c6fd2807SJeff Garzik  *	LOCKING:
5704cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5705c6fd2807SJeff Garzik  *
5706c6fd2807SJeff Garzik  *	RETURNS:
5707c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5708c6fd2807SJeff Garzik  */
5709c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5710c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5711c6fd2807SJeff Garzik {
5712c6fd2807SJeff Garzik 	int nr_done = 0;
5713c6fd2807SJeff Garzik 	u32 done_mask;
5714c6fd2807SJeff Garzik 	int i;
5715c6fd2807SJeff Garzik 
5716c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5717c6fd2807SJeff Garzik 
5718c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5719c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5720c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5721c6fd2807SJeff Garzik 		return -EINVAL;
5722c6fd2807SJeff Garzik 	}
5723c6fd2807SJeff Garzik 
5724c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5725c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5726c6fd2807SJeff Garzik 
5727c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5728c6fd2807SJeff Garzik 			continue;
5729c6fd2807SJeff Garzik 
5730c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5731c6fd2807SJeff Garzik 			if (finish_qc)
5732c6fd2807SJeff Garzik 				finish_qc(qc);
5733c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5734c6fd2807SJeff Garzik 			nr_done++;
5735c6fd2807SJeff Garzik 		}
5736c6fd2807SJeff Garzik 	}
5737c6fd2807SJeff Garzik 
5738c6fd2807SJeff Garzik 	return nr_done;
5739c6fd2807SJeff Garzik }
5740c6fd2807SJeff Garzik 
5741c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5742c6fd2807SJeff Garzik {
5743c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5744c6fd2807SJeff Garzik 
5745c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5746c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5747c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5748c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5749c6fd2807SJeff Garzik 		return 1;
5750c6fd2807SJeff Garzik 
5751c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5752c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5753c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5754c6fd2807SJeff Garzik 			return 1;
5755c6fd2807SJeff Garzik 
5756c6fd2807SJeff Garzik 		/* fall through */
5757c6fd2807SJeff Garzik 
5758c6fd2807SJeff Garzik 	default:
5759c6fd2807SJeff Garzik 		return 0;
5760c6fd2807SJeff Garzik 	}
5761c6fd2807SJeff Garzik 
5762c6fd2807SJeff Garzik 	/* never reached */
5763c6fd2807SJeff Garzik }
5764c6fd2807SJeff Garzik 
5765c6fd2807SJeff Garzik /**
5766c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5767c6fd2807SJeff Garzik  *	@qc: command to issue to device
5768c6fd2807SJeff Garzik  *
5769c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5770c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5771c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5772c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5773c6fd2807SJeff Garzik  *
5774c6fd2807SJeff Garzik  *	LOCKING:
5775cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5776c6fd2807SJeff Garzik  */
5777c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5778c6fd2807SJeff Garzik {
5779c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57809af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5781c6fd2807SJeff Garzik 
5782c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5783c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5784c6fd2807SJeff Garzik 	 * request ATAPI sense.
5785c6fd2807SJeff Garzik 	 */
57869af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5787c6fd2807SJeff Garzik 
5788c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
57899af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5790da917d69STejun Heo 
5791da917d69STejun Heo 		if (!link->sactive)
5792da917d69STejun Heo 			ap->nr_active_links++;
57939af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5794c6fd2807SJeff Garzik 	} else {
57959af5c9c9STejun Heo 		WARN_ON(link->sactive);
5796da917d69STejun Heo 
5797da917d69STejun Heo 		ap->nr_active_links++;
57989af5c9c9STejun Heo 		link->active_tag = qc->tag;
5799c6fd2807SJeff Garzik 	}
5800c6fd2807SJeff Garzik 
5801c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5802c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5803c6fd2807SJeff Garzik 
5804c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5805c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5806c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5807c6fd2807SJeff Garzik 				goto sg_err;
5808c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5809c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5810c6fd2807SJeff Garzik 				goto sg_err;
5811c6fd2807SJeff Garzik 		}
5812c6fd2807SJeff Garzik 	} else {
5813c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5814c6fd2807SJeff Garzik 	}
5815c6fd2807SJeff Garzik 
5816054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
5817054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5818054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
5819054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5820054a5fbaSTejun Heo 		ata_link_abort(link);
5821054a5fbaSTejun Heo 		return;
5822054a5fbaSTejun Heo 	}
5823054a5fbaSTejun Heo 
5824c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5825c6fd2807SJeff Garzik 
5826c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5827c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5828c6fd2807SJeff Garzik 		goto err;
5829c6fd2807SJeff Garzik 	return;
5830c6fd2807SJeff Garzik 
5831c6fd2807SJeff Garzik sg_err:
5832c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5833c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5834c6fd2807SJeff Garzik err:
5835c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5836c6fd2807SJeff Garzik }
5837c6fd2807SJeff Garzik 
5838c6fd2807SJeff Garzik /**
5839c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5840c6fd2807SJeff Garzik  *	@qc: command to issue to device
5841c6fd2807SJeff Garzik  *
5842c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5843c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5844c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5845c6fd2807SJeff Garzik  *	is slightly different.
5846c6fd2807SJeff Garzik  *
5847c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5848c6fd2807SJeff Garzik  *
5849c6fd2807SJeff Garzik  *	LOCKING:
5850cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5851c6fd2807SJeff Garzik  *
5852c6fd2807SJeff Garzik  *	RETURNS:
5853c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5854c6fd2807SJeff Garzik  */
5855c6fd2807SJeff Garzik 
5856c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5857c6fd2807SJeff Garzik {
5858c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5859c6fd2807SJeff Garzik 
5860c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5861c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5862c6fd2807SJeff Garzik 	 */
5863c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5864c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5865c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5866e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5867c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5868c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5869c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5870c6fd2807SJeff Garzik 			break;
5871c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5872c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5873c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5874c6fd2807SJeff Garzik 				BUG();
5875c6fd2807SJeff Garzik 			break;
5876c6fd2807SJeff Garzik 		default:
5877c6fd2807SJeff Garzik 			break;
5878c6fd2807SJeff Garzik 		}
5879c6fd2807SJeff Garzik 	}
5880c6fd2807SJeff Garzik 
5881c6fd2807SJeff Garzik 	/* select the device */
5882c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5883c6fd2807SJeff Garzik 
5884c6fd2807SJeff Garzik 	/* start the command */
5885c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5886c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5887c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5888c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5889c6fd2807SJeff Garzik 
5890c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5891c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5892c6fd2807SJeff Garzik 
5893c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5894c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5895c6fd2807SJeff Garzik 
5896c6fd2807SJeff Garzik 		break;
5897c6fd2807SJeff Garzik 
5898c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5899c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5900c6fd2807SJeff Garzik 
5901c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5902c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5903c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5904c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5905c6fd2807SJeff Garzik 		break;
5906c6fd2807SJeff Garzik 
5907c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5908c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5909c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5910c6fd2807SJeff Garzik 
5911c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5912c6fd2807SJeff Garzik 
5913c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5914c6fd2807SJeff Garzik 			/* PIO data out protocol */
5915c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5916c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5917c6fd2807SJeff Garzik 
5918c6fd2807SJeff Garzik 			/* always send first data block using
5919c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5920c6fd2807SJeff Garzik 			 */
5921c6fd2807SJeff Garzik 		} else {
5922c6fd2807SJeff Garzik 			/* PIO data in protocol */
5923c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5924c6fd2807SJeff Garzik 
5925c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5926c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5927c6fd2807SJeff Garzik 
5928c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5929c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5930c6fd2807SJeff Garzik 			 */
5931c6fd2807SJeff Garzik 		}
5932c6fd2807SJeff Garzik 
5933c6fd2807SJeff Garzik 		break;
5934c6fd2807SJeff Garzik 
5935c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5936c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5937c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5938c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5939c6fd2807SJeff Garzik 
5940c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5941c6fd2807SJeff Garzik 
5942c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5943c6fd2807SJeff Garzik 
5944c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5945c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5946c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5947c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5948c6fd2807SJeff Garzik 		break;
5949c6fd2807SJeff Garzik 
5950c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5951c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5952c6fd2807SJeff Garzik 
5953c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5954c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5955c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5956c6fd2807SJeff Garzik 
5957c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5958c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5959c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5960c6fd2807SJeff Garzik 		break;
5961c6fd2807SJeff Garzik 
5962c6fd2807SJeff Garzik 	default:
5963c6fd2807SJeff Garzik 		WARN_ON(1);
5964c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5965c6fd2807SJeff Garzik 	}
5966c6fd2807SJeff Garzik 
5967c6fd2807SJeff Garzik 	return 0;
5968c6fd2807SJeff Garzik }
5969c6fd2807SJeff Garzik 
5970c6fd2807SJeff Garzik /**
5971c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5972c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5973c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5974c6fd2807SJeff Garzik  *
5975c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5976c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5977c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5978c6fd2807SJeff Garzik  *
5979c6fd2807SJeff Garzik  *	LOCKING:
5980cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5981c6fd2807SJeff Garzik  *
5982c6fd2807SJeff Garzik  *	RETURNS:
5983c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5984c6fd2807SJeff Garzik  */
5985c6fd2807SJeff Garzik 
5986c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
5987c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
5988c6fd2807SJeff Garzik {
59899af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
5990c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5991c6fd2807SJeff Garzik 
5992c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
599344877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5994c6fd2807SJeff Garzik 
5995c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5996c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5997c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5998c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5999c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6000c6fd2807SJeff Garzik 		 */
6001c6fd2807SJeff Garzik 
6002c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6003c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
6004c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
6005c6fd2807SJeff Garzik 		 */
6006c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6007c6fd2807SJeff Garzik 			goto idle_irq;
6008c6fd2807SJeff Garzik 		break;
6009c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6010c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
6011c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6012c6fd2807SJeff Garzik 			/* check status of DMA engine */
6013c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
601444877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
601544877b4eSTejun Heo 				ap->print_id, host_stat);
6016c6fd2807SJeff Garzik 
6017c6fd2807SJeff Garzik 			/* if it's not our irq... */
6018c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6019c6fd2807SJeff Garzik 				goto idle_irq;
6020c6fd2807SJeff Garzik 
6021c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6022c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6023c6fd2807SJeff Garzik 
6024c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6025c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6026c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6027c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6028c6fd2807SJeff Garzik 			}
6029c6fd2807SJeff Garzik 		}
6030c6fd2807SJeff Garzik 		break;
6031c6fd2807SJeff Garzik 	case HSM_ST:
6032c6fd2807SJeff Garzik 		break;
6033c6fd2807SJeff Garzik 	default:
6034c6fd2807SJeff Garzik 		goto idle_irq;
6035c6fd2807SJeff Garzik 	}
6036c6fd2807SJeff Garzik 
6037c6fd2807SJeff Garzik 	/* check altstatus */
6038c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6039c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6040c6fd2807SJeff Garzik 		goto idle_irq;
6041c6fd2807SJeff Garzik 
6042c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6043c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6044c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6045c6fd2807SJeff Garzik 		goto idle_irq;
6046c6fd2807SJeff Garzik 
6047c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6048c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6049c6fd2807SJeff Garzik 
6050c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6051ea54763fSTejun Heo 
6052ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6053ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6054ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6055ea54763fSTejun Heo 
6056c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6057c6fd2807SJeff Garzik 
6058c6fd2807SJeff Garzik idle_irq:
6059c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6060c6fd2807SJeff Garzik 
6061c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6062c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
60636d32d30fSJeff Garzik 		ata_chk_status(ap);
60646d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6065c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6066c6fd2807SJeff Garzik 		return 1;
6067c6fd2807SJeff Garzik 	}
6068c6fd2807SJeff Garzik #endif
6069c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6070c6fd2807SJeff Garzik }
6071c6fd2807SJeff Garzik 
6072c6fd2807SJeff Garzik /**
6073c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6074c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6075cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6076c6fd2807SJeff Garzik  *
6077c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6078c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6079c6fd2807SJeff Garzik  *
6080c6fd2807SJeff Garzik  *	LOCKING:
6081cca3974eSJeff Garzik  *	Obtains host lock during operation.
6082c6fd2807SJeff Garzik  *
6083c6fd2807SJeff Garzik  *	RETURNS:
6084c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6085c6fd2807SJeff Garzik  */
6086c6fd2807SJeff Garzik 
60877d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6088c6fd2807SJeff Garzik {
6089cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6090c6fd2807SJeff Garzik 	unsigned int i;
6091c6fd2807SJeff Garzik 	unsigned int handled = 0;
6092c6fd2807SJeff Garzik 	unsigned long flags;
6093c6fd2807SJeff Garzik 
6094c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6095cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6096c6fd2807SJeff Garzik 
6097cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6098c6fd2807SJeff Garzik 		struct ata_port *ap;
6099c6fd2807SJeff Garzik 
6100cca3974eSJeff Garzik 		ap = host->ports[i];
6101c6fd2807SJeff Garzik 		if (ap &&
6102c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6103c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6104c6fd2807SJeff Garzik 
61059af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6106c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6107c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6108c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6109c6fd2807SJeff Garzik 		}
6110c6fd2807SJeff Garzik 	}
6111c6fd2807SJeff Garzik 
6112cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6113c6fd2807SJeff Garzik 
6114c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6115c6fd2807SJeff Garzik }
6116c6fd2807SJeff Garzik 
6117c6fd2807SJeff Garzik /**
6118c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6119936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6120c6fd2807SJeff Garzik  *
6121936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6122c6fd2807SJeff Garzik  *
6123c6fd2807SJeff Garzik  *	LOCKING:
6124c6fd2807SJeff Garzik  *	None.
6125c6fd2807SJeff Garzik  *
6126c6fd2807SJeff Garzik  *	RETURNS:
6127c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6128c6fd2807SJeff Garzik  */
6129936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6130c6fd2807SJeff Garzik {
6131936fd732STejun Heo 	struct ata_port *ap = link->ap;
6132936fd732STejun Heo 
6133a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6134c6fd2807SJeff Garzik }
6135c6fd2807SJeff Garzik 
6136c6fd2807SJeff Garzik /**
6137c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6138936fd732STejun Heo  *	@link: ATA link to read SCR for
6139c6fd2807SJeff Garzik  *	@reg: SCR to read
6140c6fd2807SJeff Garzik  *	@val: Place to store read value
6141c6fd2807SJeff Garzik  *
6142936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6143633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6144633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6145c6fd2807SJeff Garzik  *
6146c6fd2807SJeff Garzik  *	LOCKING:
6147633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6148c6fd2807SJeff Garzik  *
6149c6fd2807SJeff Garzik  *	RETURNS:
6150c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6151c6fd2807SJeff Garzik  */
6152936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6153c6fd2807SJeff Garzik {
6154633273a3STejun Heo 	if (ata_is_host_link(link)) {
6155936fd732STejun Heo 		struct ata_port *ap = link->ap;
6156936fd732STejun Heo 
6157936fd732STejun Heo 		if (sata_scr_valid(link))
6158da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6159c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6160c6fd2807SJeff Garzik 	}
6161c6fd2807SJeff Garzik 
6162633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6163633273a3STejun Heo }
6164633273a3STejun Heo 
6165c6fd2807SJeff Garzik /**
6166c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6167936fd732STejun Heo  *	@link: ATA link to write SCR for
6168c6fd2807SJeff Garzik  *	@reg: SCR to write
6169c6fd2807SJeff Garzik  *	@val: value to write
6170c6fd2807SJeff Garzik  *
6171936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6172633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6173633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6174c6fd2807SJeff Garzik  *
6175c6fd2807SJeff Garzik  *	LOCKING:
6176633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6177c6fd2807SJeff Garzik  *
6178c6fd2807SJeff Garzik  *	RETURNS:
6179c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6180c6fd2807SJeff Garzik  */
6181936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6182c6fd2807SJeff Garzik {
6183633273a3STejun Heo 	if (ata_is_host_link(link)) {
6184936fd732STejun Heo 		struct ata_port *ap = link->ap;
6185936fd732STejun Heo 
6186936fd732STejun Heo 		if (sata_scr_valid(link))
6187da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6188c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6189c6fd2807SJeff Garzik 	}
6190c6fd2807SJeff Garzik 
6191633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6192633273a3STejun Heo }
6193633273a3STejun Heo 
6194c6fd2807SJeff Garzik /**
6195c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6196936fd732STejun Heo  *	@link: ATA link to write SCR for
6197c6fd2807SJeff Garzik  *	@reg: SCR to write
6198c6fd2807SJeff Garzik  *	@val: value to write
6199c6fd2807SJeff Garzik  *
6200c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6201c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6202c6fd2807SJeff Garzik  *
6203c6fd2807SJeff Garzik  *	LOCKING:
6204633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6205c6fd2807SJeff Garzik  *
6206c6fd2807SJeff Garzik  *	RETURNS:
6207c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6208c6fd2807SJeff Garzik  */
6209936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6210c6fd2807SJeff Garzik {
6211633273a3STejun Heo 	if (ata_is_host_link(link)) {
6212936fd732STejun Heo 		struct ata_port *ap = link->ap;
6213da3dbb17STejun Heo 		int rc;
6214da3dbb17STejun Heo 
6215936fd732STejun Heo 		if (sata_scr_valid(link)) {
6216da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6217da3dbb17STejun Heo 			if (rc == 0)
6218da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6219da3dbb17STejun Heo 			return rc;
6220c6fd2807SJeff Garzik 		}
6221c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6222c6fd2807SJeff Garzik 	}
6223c6fd2807SJeff Garzik 
6224633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6225633273a3STejun Heo }
6226633273a3STejun Heo 
6227c6fd2807SJeff Garzik /**
6228936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6229936fd732STejun Heo  *	@link: ATA link to test
6230c6fd2807SJeff Garzik  *
6231936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6232936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6233936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6234c6fd2807SJeff Garzik  *
6235c6fd2807SJeff Garzik  *	LOCKING:
6236c6fd2807SJeff Garzik  *	None.
6237c6fd2807SJeff Garzik  *
6238c6fd2807SJeff Garzik  *	RETURNS:
6239c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6240c6fd2807SJeff Garzik  */
6241936fd732STejun Heo int ata_link_online(struct ata_link *link)
6242c6fd2807SJeff Garzik {
6243c6fd2807SJeff Garzik 	u32 sstatus;
6244c6fd2807SJeff Garzik 
6245936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6246936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6247c6fd2807SJeff Garzik 		return 1;
6248c6fd2807SJeff Garzik 	return 0;
6249c6fd2807SJeff Garzik }
6250c6fd2807SJeff Garzik 
6251c6fd2807SJeff Garzik /**
6252936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6253936fd732STejun Heo  *	@link: ATA link to test
6254c6fd2807SJeff Garzik  *
6255936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6256936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6257936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6258c6fd2807SJeff Garzik  *
6259c6fd2807SJeff Garzik  *	LOCKING:
6260c6fd2807SJeff Garzik  *	None.
6261c6fd2807SJeff Garzik  *
6262c6fd2807SJeff Garzik  *	RETURNS:
6263c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6264c6fd2807SJeff Garzik  */
6265936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6266c6fd2807SJeff Garzik {
6267c6fd2807SJeff Garzik 	u32 sstatus;
6268c6fd2807SJeff Garzik 
6269936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6270936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6271c6fd2807SJeff Garzik 		return 1;
6272c6fd2807SJeff Garzik 	return 0;
6273c6fd2807SJeff Garzik }
6274c6fd2807SJeff Garzik 
6275c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6276c6fd2807SJeff Garzik {
6277c6fd2807SJeff Garzik 	unsigned int err_mask;
6278c6fd2807SJeff Garzik 	u8 cmd;
6279c6fd2807SJeff Garzik 
6280c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6281c6fd2807SJeff Garzik 		return 0;
6282c6fd2807SJeff Garzik 
62836fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6284c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6285c6fd2807SJeff Garzik 	else
6286c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6287c6fd2807SJeff Garzik 
62884f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
62894f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
62904f34337bSAlan Cox 	   a further flush command to continue the writeback until it
62914f34337bSAlan Cox 	   does not error */
6292c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6293c6fd2807SJeff Garzik 	if (err_mask) {
6294c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6295c6fd2807SJeff Garzik 		return -EIO;
6296c6fd2807SJeff Garzik 	}
6297c6fd2807SJeff Garzik 
6298c6fd2807SJeff Garzik 	return 0;
6299c6fd2807SJeff Garzik }
6300c6fd2807SJeff Garzik 
63016ffa01d8STejun Heo #ifdef CONFIG_PM
6302cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6303cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6304cca3974eSJeff Garzik 			       int wait)
6305c6fd2807SJeff Garzik {
6306c6fd2807SJeff Garzik 	unsigned long flags;
6307c6fd2807SJeff Garzik 	int i, rc;
6308c6fd2807SJeff Garzik 
6309cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6310cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6311e3667ebfSTejun Heo 		struct ata_link *link;
6312c6fd2807SJeff Garzik 
6313c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6314c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6315c6fd2807SJeff Garzik 		 */
6316c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6317c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6318c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6319c6fd2807SJeff Garzik 		}
6320c6fd2807SJeff Garzik 
6321c6fd2807SJeff Garzik 		/* request PM ops to EH */
6322c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6323c6fd2807SJeff Garzik 
6324c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6325c6fd2807SJeff Garzik 		if (wait) {
6326c6fd2807SJeff Garzik 			rc = 0;
6327c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6328c6fd2807SJeff Garzik 		}
6329c6fd2807SJeff Garzik 
6330c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6331e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6332e3667ebfSTejun Heo 			link->eh_info.action |= action;
6333e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6334e3667ebfSTejun Heo 		}
6335c6fd2807SJeff Garzik 
6336c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6337c6fd2807SJeff Garzik 
6338c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6339c6fd2807SJeff Garzik 
6340c6fd2807SJeff Garzik 		/* wait and check result */
6341c6fd2807SJeff Garzik 		if (wait) {
6342c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6343c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6344c6fd2807SJeff Garzik 			if (rc)
6345c6fd2807SJeff Garzik 				return rc;
6346c6fd2807SJeff Garzik 		}
6347c6fd2807SJeff Garzik 	}
6348c6fd2807SJeff Garzik 
6349c6fd2807SJeff Garzik 	return 0;
6350c6fd2807SJeff Garzik }
6351c6fd2807SJeff Garzik 
6352c6fd2807SJeff Garzik /**
6353cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6354cca3974eSJeff Garzik  *	@host: host to suspend
6355c6fd2807SJeff Garzik  *	@mesg: PM message
6356c6fd2807SJeff Garzik  *
6357cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6358c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6359c6fd2807SJeff Garzik  *	to finish.
6360c6fd2807SJeff Garzik  *
6361c6fd2807SJeff Garzik  *	LOCKING:
6362c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6363c6fd2807SJeff Garzik  *
6364c6fd2807SJeff Garzik  *	RETURNS:
6365c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6366c6fd2807SJeff Garzik  */
6367cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6368c6fd2807SJeff Garzik {
63699666f400STejun Heo 	int rc;
6370c6fd2807SJeff Garzik 
6371cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
63729666f400STejun Heo 	if (rc == 0)
6373cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6374c6fd2807SJeff Garzik 	return rc;
6375c6fd2807SJeff Garzik }
6376c6fd2807SJeff Garzik 
6377c6fd2807SJeff Garzik /**
6378cca3974eSJeff Garzik  *	ata_host_resume - resume host
6379cca3974eSJeff Garzik  *	@host: host to resume
6380c6fd2807SJeff Garzik  *
6381cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6382c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6383c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6384c6fd2807SJeff Garzik  *
6385c6fd2807SJeff Garzik  *	LOCKING:
6386c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6387c6fd2807SJeff Garzik  */
6388cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6389c6fd2807SJeff Garzik {
6390cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6391c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6392cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6393c6fd2807SJeff Garzik }
63946ffa01d8STejun Heo #endif
6395c6fd2807SJeff Garzik 
6396c6fd2807SJeff Garzik /**
6397c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6398c6fd2807SJeff Garzik  *	@ap: Port to initialize
6399c6fd2807SJeff Garzik  *
6400c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6401c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6402c6fd2807SJeff Garzik  *
6403c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6404c6fd2807SJeff Garzik  *
6405c6fd2807SJeff Garzik  *	LOCKING:
6406c6fd2807SJeff Garzik  *	Inherited from caller.
6407c6fd2807SJeff Garzik  */
6408c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6409c6fd2807SJeff Garzik {
6410c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6411c6fd2807SJeff Garzik 	int rc;
6412c6fd2807SJeff Garzik 
6413f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6414f0d36efdSTejun Heo 				      GFP_KERNEL);
6415c6fd2807SJeff Garzik 	if (!ap->prd)
6416c6fd2807SJeff Garzik 		return -ENOMEM;
6417c6fd2807SJeff Garzik 
6418c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6419f0d36efdSTejun Heo 	if (rc)
6420c6fd2807SJeff Garzik 		return rc;
6421c6fd2807SJeff Garzik 
6422f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6423f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6424c6fd2807SJeff Garzik 	return 0;
6425c6fd2807SJeff Garzik }
6426c6fd2807SJeff Garzik 
6427c6fd2807SJeff Garzik /**
6428c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6429c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6430c6fd2807SJeff Garzik  *
6431c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6432c6fd2807SJeff Garzik  *
6433c6fd2807SJeff Garzik  *	LOCKING:
6434c6fd2807SJeff Garzik  *	Inherited from caller.
6435c6fd2807SJeff Garzik  */
6436c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6437c6fd2807SJeff Garzik {
64389af5c9c9STejun Heo 	struct ata_link *link = dev->link;
64399af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6440c6fd2807SJeff Garzik 	unsigned long flags;
6441c6fd2807SJeff Garzik 
6442c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
64439af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
64449af5c9c9STejun Heo 	link->sata_spd = 0;
6445c6fd2807SJeff Garzik 
6446c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6447c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6448cca3974eSJeff Garzik 	 * host lock.
6449c6fd2807SJeff Garzik 	 */
6450c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6451c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
64523dcc323fSTejun Heo 	dev->horkage = 0;
6453c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6454c6fd2807SJeff Garzik 
6455c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6456c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6457c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6458c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6459c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6460c6fd2807SJeff Garzik }
6461c6fd2807SJeff Garzik 
6462c6fd2807SJeff Garzik /**
64634fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
64644fb37a25STejun Heo  *	@ap: ATA port link is attached to
64654fb37a25STejun Heo  *	@link: Link structure to initialize
64668989805dSTejun Heo  *	@pmp: Port multiplier port number
64674fb37a25STejun Heo  *
64684fb37a25STejun Heo  *	Initialize @link.
64694fb37a25STejun Heo  *
64704fb37a25STejun Heo  *	LOCKING:
64714fb37a25STejun Heo  *	Kernel thread context (may sleep)
64724fb37a25STejun Heo  */
6473fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
64744fb37a25STejun Heo {
64754fb37a25STejun Heo 	int i;
64764fb37a25STejun Heo 
64774fb37a25STejun Heo 	/* clear everything except for devices */
64784fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
64794fb37a25STejun Heo 
64804fb37a25STejun Heo 	link->ap = ap;
64818989805dSTejun Heo 	link->pmp = pmp;
64824fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
64834fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
64844fb37a25STejun Heo 
64854fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
64864fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
64874fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
64884fb37a25STejun Heo 
64894fb37a25STejun Heo 		dev->link = link;
64904fb37a25STejun Heo 		dev->devno = dev - link->device;
64914fb37a25STejun Heo 		ata_dev_init(dev);
64924fb37a25STejun Heo 	}
64934fb37a25STejun Heo }
64944fb37a25STejun Heo 
64954fb37a25STejun Heo /**
64964fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
64974fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
64984fb37a25STejun Heo  *
64994fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
65004fb37a25STejun Heo  *	configured value.
65014fb37a25STejun Heo  *
65024fb37a25STejun Heo  *	LOCKING:
65034fb37a25STejun Heo  *	Kernel thread context (may sleep).
65044fb37a25STejun Heo  *
65054fb37a25STejun Heo  *	RETURNS:
65064fb37a25STejun Heo  *	0 on success, -errno on failure.
65074fb37a25STejun Heo  */
6508fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
65094fb37a25STejun Heo {
65104fb37a25STejun Heo 	u32 scontrol, spd;
65114fb37a25STejun Heo 	int rc;
65124fb37a25STejun Heo 
65134fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
65144fb37a25STejun Heo 	if (rc)
65154fb37a25STejun Heo 		return rc;
65164fb37a25STejun Heo 
65174fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
65184fb37a25STejun Heo 	if (spd)
65194fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
65204fb37a25STejun Heo 
65214fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
65224fb37a25STejun Heo 
65234fb37a25STejun Heo 	return 0;
65244fb37a25STejun Heo }
65254fb37a25STejun Heo 
65264fb37a25STejun Heo /**
6527f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6528f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6529c6fd2807SJeff Garzik  *
6530f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6531f3187195STejun Heo  *
6532f3187195STejun Heo  *	RETURNS:
6533f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6534c6fd2807SJeff Garzik  *
6535c6fd2807SJeff Garzik  *	LOCKING:
6536f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6537c6fd2807SJeff Garzik  */
6538f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6539c6fd2807SJeff Garzik {
6540f3187195STejun Heo 	struct ata_port *ap;
6541c6fd2807SJeff Garzik 
6542f3187195STejun Heo 	DPRINTK("ENTER\n");
6543f3187195STejun Heo 
6544f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6545f3187195STejun Heo 	if (!ap)
6546f3187195STejun Heo 		return NULL;
6547f3187195STejun Heo 
6548f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6549cca3974eSJeff Garzik 	ap->lock = &host->lock;
6550c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6551f3187195STejun Heo 	ap->print_id = -1;
6552c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6553cca3974eSJeff Garzik 	ap->host = host;
6554f3187195STejun Heo 	ap->dev = host->dev;
6555c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6556c6fd2807SJeff Garzik 
6557c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6558c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6559c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6560c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6561c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6562c6fd2807SJeff Garzik #else
6563c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6564c6fd2807SJeff Garzik #endif
6565c6fd2807SJeff Garzik 
656665f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
656765f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
656865f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6569c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6570c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
65715ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
65725ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
65735ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6574c6fd2807SJeff Garzik 
6575c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6576c6fd2807SJeff Garzik 
65778989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6578c6fd2807SJeff Garzik 
6579c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6580c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6581c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6582c6fd2807SJeff Garzik #endif
6583c6fd2807SJeff Garzik 	return ap;
6584c6fd2807SJeff Garzik }
6585c6fd2807SJeff Garzik 
6586f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6587f0d36efdSTejun Heo {
6588f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6589f0d36efdSTejun Heo 	int i;
6590f0d36efdSTejun Heo 
6591f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6592f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6593f0d36efdSTejun Heo 
6594ecef7253STejun Heo 		if (!ap)
6595ecef7253STejun Heo 			continue;
6596ecef7253STejun Heo 
6597ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6598f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6599f0d36efdSTejun Heo 	}
6600f0d36efdSTejun Heo 
6601ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6602f0d36efdSTejun Heo 		host->ops->host_stop(host);
66031aa56ccaSTejun Heo 
66041aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
66051aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
66061aa506e4STejun Heo 
66074911487aSTejun Heo 		if (!ap)
66084911487aSTejun Heo 			continue;
66094911487aSTejun Heo 
66104911487aSTejun Heo 		if (ap->scsi_host)
66111aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
66121aa506e4STejun Heo 
6613633273a3STejun Heo 		kfree(ap->pmp_link);
66144911487aSTejun Heo 		kfree(ap);
66151aa506e4STejun Heo 		host->ports[i] = NULL;
66161aa506e4STejun Heo 	}
66171aa506e4STejun Heo 
66181aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6619f0d36efdSTejun Heo }
6620f0d36efdSTejun Heo 
6621c6fd2807SJeff Garzik /**
6622f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6623f3187195STejun Heo  *	@dev: generic device this host is associated with
6624f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6625f3187195STejun Heo  *
6626f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6627f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6628f3187195STejun Heo  *	attaches it using ata_host_register().
6629f3187195STejun Heo  *
6630f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6631f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6632f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6633f3187195STejun Heo  *	ports will be automatically freed on registration.
6634f3187195STejun Heo  *
6635f3187195STejun Heo  *	RETURNS:
6636f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6637f3187195STejun Heo  *
6638f3187195STejun Heo  *	LOCKING:
6639f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6640f3187195STejun Heo  */
6641f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6642f3187195STejun Heo {
6643f3187195STejun Heo 	struct ata_host *host;
6644f3187195STejun Heo 	size_t sz;
6645f3187195STejun Heo 	int i;
6646f3187195STejun Heo 
6647f3187195STejun Heo 	DPRINTK("ENTER\n");
6648f3187195STejun Heo 
6649f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6650f3187195STejun Heo 		return NULL;
6651f3187195STejun Heo 
6652f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6653f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6654f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6655f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6656f3187195STejun Heo 	if (!host)
6657f3187195STejun Heo 		goto err_out;
6658f3187195STejun Heo 
6659f3187195STejun Heo 	devres_add(dev, host);
6660f3187195STejun Heo 	dev_set_drvdata(dev, host);
6661f3187195STejun Heo 
6662f3187195STejun Heo 	spin_lock_init(&host->lock);
6663f3187195STejun Heo 	host->dev = dev;
6664f3187195STejun Heo 	host->n_ports = max_ports;
6665f3187195STejun Heo 
6666f3187195STejun Heo 	/* allocate ports bound to this host */
6667f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6668f3187195STejun Heo 		struct ata_port *ap;
6669f3187195STejun Heo 
6670f3187195STejun Heo 		ap = ata_port_alloc(host);
6671f3187195STejun Heo 		if (!ap)
6672f3187195STejun Heo 			goto err_out;
6673f3187195STejun Heo 
6674f3187195STejun Heo 		ap->port_no = i;
6675f3187195STejun Heo 		host->ports[i] = ap;
6676f3187195STejun Heo 	}
6677f3187195STejun Heo 
6678f3187195STejun Heo 	devres_remove_group(dev, NULL);
6679f3187195STejun Heo 	return host;
6680f3187195STejun Heo 
6681f3187195STejun Heo  err_out:
6682f3187195STejun Heo 	devres_release_group(dev, NULL);
6683f3187195STejun Heo 	return NULL;
6684f3187195STejun Heo }
6685f3187195STejun Heo 
6686f3187195STejun Heo /**
6687f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6688f5cda257STejun Heo  *	@dev: generic device this host is associated with
6689f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6690f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6691f5cda257STejun Heo  *
6692f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6693f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6694f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6695f5cda257STejun Heo  *
6696f5cda257STejun Heo  *	RETURNS:
6697f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6698f5cda257STejun Heo  *
6699f5cda257STejun Heo  *	LOCKING:
6700f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6701f5cda257STejun Heo  */
6702f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6703f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6704f5cda257STejun Heo 				      int n_ports)
6705f5cda257STejun Heo {
6706f5cda257STejun Heo 	const struct ata_port_info *pi;
6707f5cda257STejun Heo 	struct ata_host *host;
6708f5cda257STejun Heo 	int i, j;
6709f5cda257STejun Heo 
6710f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6711f5cda257STejun Heo 	if (!host)
6712f5cda257STejun Heo 		return NULL;
6713f5cda257STejun Heo 
6714f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6715f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6716f5cda257STejun Heo 
6717f5cda257STejun Heo 		if (ppi[j])
6718f5cda257STejun Heo 			pi = ppi[j++];
6719f5cda257STejun Heo 
6720f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6721f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6722f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6723f5cda257STejun Heo 		ap->flags |= pi->flags;
67240c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6725f5cda257STejun Heo 		ap->ops = pi->port_ops;
6726f5cda257STejun Heo 
6727f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6728f5cda257STejun Heo 			host->ops = pi->port_ops;
6729f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6730f5cda257STejun Heo 			host->private_data = pi->private_data;
6731f5cda257STejun Heo 	}
6732f5cda257STejun Heo 
6733f5cda257STejun Heo 	return host;
6734f5cda257STejun Heo }
6735f5cda257STejun Heo 
6736f5cda257STejun Heo /**
6737ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6738ecef7253STejun Heo  *	@host: ATA host to start ports for
6739ecef7253STejun Heo  *
6740ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6741ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6742ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6743f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6744f3187195STejun Heo  *	first non-dummy port ops.
6745ecef7253STejun Heo  *
6746ecef7253STejun Heo  *	LOCKING:
6747ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6748ecef7253STejun Heo  *
6749ecef7253STejun Heo  *	RETURNS:
6750ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6751ecef7253STejun Heo  */
6752ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6753ecef7253STejun Heo {
6754ecef7253STejun Heo 	int i, rc;
6755ecef7253STejun Heo 
6756ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6757ecef7253STejun Heo 		return 0;
6758ecef7253STejun Heo 
6759ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6760ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6761ecef7253STejun Heo 
6762f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6763f3187195STejun Heo 			host->ops = ap->ops;
6764f3187195STejun Heo 
6765ecef7253STejun Heo 		if (ap->ops->port_start) {
6766ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6767ecef7253STejun Heo 			if (rc) {
6768ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6769ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6770ecef7253STejun Heo 				goto err_out;
6771ecef7253STejun Heo 			}
6772ecef7253STejun Heo 		}
6773ecef7253STejun Heo 
6774ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6775ecef7253STejun Heo 	}
6776ecef7253STejun Heo 
6777ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6778ecef7253STejun Heo 	return 0;
6779ecef7253STejun Heo 
6780ecef7253STejun Heo  err_out:
6781ecef7253STejun Heo 	while (--i >= 0) {
6782ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6783ecef7253STejun Heo 
6784ecef7253STejun Heo 		if (ap->ops->port_stop)
6785ecef7253STejun Heo 			ap->ops->port_stop(ap);
6786ecef7253STejun Heo 	}
6787ecef7253STejun Heo 	return rc;
6788ecef7253STejun Heo }
6789ecef7253STejun Heo 
6790ecef7253STejun Heo /**
6791cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6792cca3974eSJeff Garzik  *	@host:	host to initialize
6793cca3974eSJeff Garzik  *	@dev:	device host is attached to
6794cca3974eSJeff Garzik  *	@flags:	host flags
6795c6fd2807SJeff Garzik  *	@ops:	port_ops
6796c6fd2807SJeff Garzik  *
6797c6fd2807SJeff Garzik  *	LOCKING:
6798c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6799c6fd2807SJeff Garzik  *
6800c6fd2807SJeff Garzik  */
6801f3187195STejun Heo /* KILLME - the only user left is ipr */
6802cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6803cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6804c6fd2807SJeff Garzik {
6805cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6806cca3974eSJeff Garzik 	host->dev = dev;
6807cca3974eSJeff Garzik 	host->flags = flags;
6808cca3974eSJeff Garzik 	host->ops = ops;
6809c6fd2807SJeff Garzik }
6810c6fd2807SJeff Garzik 
6811c6fd2807SJeff Garzik /**
6812f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6813f3187195STejun Heo  *	@host: ATA host to register
6814f3187195STejun Heo  *	@sht: template for SCSI host
6815c6fd2807SJeff Garzik  *
6816f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6817f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6818f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6819f3187195STejun Heo  *	probe registered devices.
6820c6fd2807SJeff Garzik  *
6821c6fd2807SJeff Garzik  *	LOCKING:
6822f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6823c6fd2807SJeff Garzik  *
6824c6fd2807SJeff Garzik  *	RETURNS:
6825f3187195STejun Heo  *	0 on success, -errno otherwise.
6826c6fd2807SJeff Garzik  */
6827f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6828c6fd2807SJeff Garzik {
6829f3187195STejun Heo 	int i, rc;
6830c6fd2807SJeff Garzik 
6831f3187195STejun Heo 	/* host must have been started */
6832f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6833f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6834f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6835f3187195STejun Heo 		WARN_ON(1);
6836f3187195STejun Heo 		return -EINVAL;
683702f076aaSAlan Cox 	}
6838f0d36efdSTejun Heo 
6839f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6840f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6841f3187195STejun Heo 	 * allocation time.
6842f3187195STejun Heo 	 */
6843f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6844f3187195STejun Heo 		kfree(host->ports[i]);
6845f0d36efdSTejun Heo 
6846f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6847f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6848f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6849c6fd2807SJeff Garzik 
6850f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6851ecef7253STejun Heo 	if (rc)
6852f3187195STejun Heo 		return rc;
6853ecef7253STejun Heo 
6854fafbae87STejun Heo 	/* associate with ACPI nodes */
6855fafbae87STejun Heo 	ata_acpi_associate(host);
6856fafbae87STejun Heo 
6857f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6858cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6859cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6860f3187195STejun Heo 		unsigned long xfer_mask;
6861f3187195STejun Heo 
6862f3187195STejun Heo 		/* set SATA cable type if still unset */
6863f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6864f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6865c6fd2807SJeff Garzik 
6866c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
68674fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
6868c6fd2807SJeff Garzik 
6869cbcdd875STejun Heo 		/* print per-port info to dmesg */
6870f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6871f3187195STejun Heo 					      ap->udma_mask);
6872f3187195STejun Heo 
6873abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
6874cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
6875cbcdd875STejun Heo 					"%cATA max %s %s\n",
6876a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6877f3187195STejun Heo 					ata_mode_string(xfer_mask),
6878cbcdd875STejun Heo 					ap->link.eh_info.desc);
6879abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
6880abf6e8edSTejun Heo 		} else
6881f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6882c6fd2807SJeff Garzik 	}
6883c6fd2807SJeff Garzik 
6884f3187195STejun Heo 	/* perform each probe synchronously */
6885f3187195STejun Heo 	DPRINTK("probe begin\n");
6886f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6887f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6888f3187195STejun Heo 		int rc;
6889f3187195STejun Heo 
6890f3187195STejun Heo 		/* probe */
6891c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
68929af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
6893c6fd2807SJeff Garzik 			unsigned long flags;
6894c6fd2807SJeff Garzik 
6895c6fd2807SJeff Garzik 			ata_port_probe(ap);
6896c6fd2807SJeff Garzik 
6897c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6898c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6899c6fd2807SJeff Garzik 
6900f58229f8STejun Heo 			ehi->probe_mask =
6901f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
6902c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6903c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6904c6fd2807SJeff Garzik 
6905f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6906c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6907c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6908c6fd2807SJeff Garzik 
6909c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6910c6fd2807SJeff Garzik 
6911c6fd2807SJeff Garzik 			/* wait for EH to finish */
6912c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6913c6fd2807SJeff Garzik 		} else {
691444877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6915c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
691644877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6917c6fd2807SJeff Garzik 
6918c6fd2807SJeff Garzik 			if (rc) {
6919c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6920c6fd2807SJeff Garzik 				 * Current libata behavior will
6921c6fd2807SJeff Garzik 				 * tear down everything when
6922c6fd2807SJeff Garzik 				 * the module is removed
6923c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6924c6fd2807SJeff Garzik 				 */
6925c6fd2807SJeff Garzik 			}
6926c6fd2807SJeff Garzik 		}
6927c6fd2807SJeff Garzik 	}
6928c6fd2807SJeff Garzik 
6929c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6930c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6931cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6932cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6933c6fd2807SJeff Garzik 
69341ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
6935c6fd2807SJeff Garzik 	}
6936c6fd2807SJeff Garzik 
6937f3187195STejun Heo 	return 0;
6938f3187195STejun Heo }
6939f3187195STejun Heo 
6940f3187195STejun Heo /**
6941f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6942f5cda257STejun Heo  *	@host: target ATA host
6943f5cda257STejun Heo  *	@irq: IRQ to request
6944f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6945f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6946f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6947f5cda257STejun Heo  *
6948f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6949f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6950f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6951f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6952f5cda257STejun Heo  *
6953f5cda257STejun Heo  *	LOCKING:
6954f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6955f5cda257STejun Heo  *
6956f5cda257STejun Heo  *	RETURNS:
6957f5cda257STejun Heo  *	0 on success, -errno otherwise.
6958f5cda257STejun Heo  */
6959f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6960f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6961f5cda257STejun Heo 		      struct scsi_host_template *sht)
6962f5cda257STejun Heo {
6963cbcdd875STejun Heo 	int i, rc;
6964f5cda257STejun Heo 
6965f5cda257STejun Heo 	rc = ata_host_start(host);
6966f5cda257STejun Heo 	if (rc)
6967f5cda257STejun Heo 		return rc;
6968f5cda257STejun Heo 
6969f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6970f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6971f5cda257STejun Heo 	if (rc)
6972f5cda257STejun Heo 		return rc;
6973f5cda257STejun Heo 
6974cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
6975cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
69764031826bSTejun Heo 
6977f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6978f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6979f5cda257STejun Heo 	if (rc)
6980f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6981f5cda257STejun Heo 
6982f5cda257STejun Heo 	return rc;
6983f5cda257STejun Heo }
6984f5cda257STejun Heo 
6985f5cda257STejun Heo /**
6986c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6987c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6988c6fd2807SJeff Garzik  *
6989c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6990c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6991c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6992c6fd2807SJeff Garzik  *
6993c6fd2807SJeff Garzik  *	LOCKING:
6994c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6995c6fd2807SJeff Garzik  */
6996741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
6997c6fd2807SJeff Garzik {
6998c6fd2807SJeff Garzik 	unsigned long flags;
699941bda9c9STejun Heo 	struct ata_link *link;
7000f58229f8STejun Heo 	struct ata_device *dev;
7001c6fd2807SJeff Garzik 
7002c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7003c6fd2807SJeff Garzik 		goto skip_eh;
7004c6fd2807SJeff Garzik 
7005c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7006c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7007c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7008c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7009c6fd2807SJeff Garzik 
7010c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7011c6fd2807SJeff Garzik 
7012c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
7013c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
7014c6fd2807SJeff Garzik 	 */
7015c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7016c6fd2807SJeff Garzik 
701741bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
701841bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7019f58229f8STejun Heo 			ata_dev_disable(dev);
702041bda9c9STejun Heo 	}
7021c6fd2807SJeff Garzik 
7022c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7023c6fd2807SJeff Garzik 
7024c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7025c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7026c6fd2807SJeff Garzik 	 * target.
7027c6fd2807SJeff Garzik 	 */
7028c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7029c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7030c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7031c6fd2807SJeff Garzik 
7032c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
703345a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7034c6fd2807SJeff Garzik 
7035c6fd2807SJeff Garzik  skip_eh:
7036c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7037cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7038c6fd2807SJeff Garzik }
7039c6fd2807SJeff Garzik 
7040c6fd2807SJeff Garzik /**
70410529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
70420529c159STejun Heo  *	@host: Host to detach
70430529c159STejun Heo  *
70440529c159STejun Heo  *	Detach all ports of @host.
70450529c159STejun Heo  *
70460529c159STejun Heo  *	LOCKING:
70470529c159STejun Heo  *	Kernel thread context (may sleep).
70480529c159STejun Heo  */
70490529c159STejun Heo void ata_host_detach(struct ata_host *host)
70500529c159STejun Heo {
70510529c159STejun Heo 	int i;
70520529c159STejun Heo 
70530529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
70540529c159STejun Heo 		ata_port_detach(host->ports[i]);
70550529c159STejun Heo }
70560529c159STejun Heo 
7057c6fd2807SJeff Garzik /**
7058c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7059c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7060c6fd2807SJeff Garzik  *
7061c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7062c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7063c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7064c6fd2807SJeff Garzik  *	relative to cmd_addr.
7065c6fd2807SJeff Garzik  *
7066c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7067c6fd2807SJeff Garzik  */
7068c6fd2807SJeff Garzik 
7069c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7070c6fd2807SJeff Garzik {
7071c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7072c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7073c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7074c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7075c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7076c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7077c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7078c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7079c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7080c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7081c6fd2807SJeff Garzik }
7082c6fd2807SJeff Garzik 
7083c6fd2807SJeff Garzik 
7084c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7085c6fd2807SJeff Garzik 
7086c6fd2807SJeff Garzik /**
7087c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7088c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7089c6fd2807SJeff Garzik  *
7090b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7091b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7092b878ca5dSTejun Heo  *	release is handled via devres.
7093c6fd2807SJeff Garzik  *
7094c6fd2807SJeff Garzik  *	LOCKING:
7095c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7096c6fd2807SJeff Garzik  */
7097c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7098c6fd2807SJeff Garzik {
70992855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7100cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7101c6fd2807SJeff Garzik 
7102f0d36efdSTejun Heo 	ata_host_detach(host);
7103c6fd2807SJeff Garzik }
7104c6fd2807SJeff Garzik 
7105c6fd2807SJeff Garzik /* move to PCI subsystem */
7106c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7107c6fd2807SJeff Garzik {
7108c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7109c6fd2807SJeff Garzik 
7110c6fd2807SJeff Garzik 	switch (bits->width) {
7111c6fd2807SJeff Garzik 	case 1: {
7112c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7113c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7114c6fd2807SJeff Garzik 		tmp = tmp8;
7115c6fd2807SJeff Garzik 		break;
7116c6fd2807SJeff Garzik 	}
7117c6fd2807SJeff Garzik 	case 2: {
7118c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7119c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7120c6fd2807SJeff Garzik 		tmp = tmp16;
7121c6fd2807SJeff Garzik 		break;
7122c6fd2807SJeff Garzik 	}
7123c6fd2807SJeff Garzik 	case 4: {
7124c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7125c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7126c6fd2807SJeff Garzik 		tmp = tmp32;
7127c6fd2807SJeff Garzik 		break;
7128c6fd2807SJeff Garzik 	}
7129c6fd2807SJeff Garzik 
7130c6fd2807SJeff Garzik 	default:
7131c6fd2807SJeff Garzik 		return -EINVAL;
7132c6fd2807SJeff Garzik 	}
7133c6fd2807SJeff Garzik 
7134c6fd2807SJeff Garzik 	tmp &= bits->mask;
7135c6fd2807SJeff Garzik 
7136c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7137c6fd2807SJeff Garzik }
7138c6fd2807SJeff Garzik 
71396ffa01d8STejun Heo #ifdef CONFIG_PM
7140c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7141c6fd2807SJeff Garzik {
7142c6fd2807SJeff Garzik 	pci_save_state(pdev);
7143c6fd2807SJeff Garzik 	pci_disable_device(pdev);
71444c90d971STejun Heo 
71454c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7146c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7147c6fd2807SJeff Garzik }
7148c6fd2807SJeff Garzik 
7149553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7150c6fd2807SJeff Garzik {
7151553c4aa6STejun Heo 	int rc;
7152553c4aa6STejun Heo 
7153c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7154c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7155553c4aa6STejun Heo 
7156f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7157553c4aa6STejun Heo 	if (rc) {
7158553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7159553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7160553c4aa6STejun Heo 		return rc;
7161553c4aa6STejun Heo 	}
7162553c4aa6STejun Heo 
7163c6fd2807SJeff Garzik 	pci_set_master(pdev);
7164553c4aa6STejun Heo 	return 0;
7165c6fd2807SJeff Garzik }
7166c6fd2807SJeff Garzik 
7167c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7168c6fd2807SJeff Garzik {
7169cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7170c6fd2807SJeff Garzik 	int rc = 0;
7171c6fd2807SJeff Garzik 
7172cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7173c6fd2807SJeff Garzik 	if (rc)
7174c6fd2807SJeff Garzik 		return rc;
7175c6fd2807SJeff Garzik 
7176c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7177c6fd2807SJeff Garzik 
7178c6fd2807SJeff Garzik 	return 0;
7179c6fd2807SJeff Garzik }
7180c6fd2807SJeff Garzik 
7181c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7182c6fd2807SJeff Garzik {
7183cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7184553c4aa6STejun Heo 	int rc;
7185c6fd2807SJeff Garzik 
7186553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7187553c4aa6STejun Heo 	if (rc == 0)
7188cca3974eSJeff Garzik 		ata_host_resume(host);
7189553c4aa6STejun Heo 	return rc;
7190c6fd2807SJeff Garzik }
71916ffa01d8STejun Heo #endif /* CONFIG_PM */
71926ffa01d8STejun Heo 
7193c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7194c6fd2807SJeff Garzik 
7195c6fd2807SJeff Garzik 
7196c6fd2807SJeff Garzik static int __init ata_init(void)
7197c6fd2807SJeff Garzik {
7198c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7199c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7200c6fd2807SJeff Garzik 	if (!ata_wq)
7201c6fd2807SJeff Garzik 		return -ENOMEM;
7202c6fd2807SJeff Garzik 
7203c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7204c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7205c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7206c6fd2807SJeff Garzik 		return -ENOMEM;
7207c6fd2807SJeff Garzik 	}
7208c6fd2807SJeff Garzik 
7209c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7210c6fd2807SJeff Garzik 	return 0;
7211c6fd2807SJeff Garzik }
7212c6fd2807SJeff Garzik 
7213c6fd2807SJeff Garzik static void __exit ata_exit(void)
7214c6fd2807SJeff Garzik {
7215c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7216c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7217c6fd2807SJeff Garzik }
7218c6fd2807SJeff Garzik 
7219a4625085SBrian King subsys_initcall(ata_init);
7220c6fd2807SJeff Garzik module_exit(ata_exit);
7221c6fd2807SJeff Garzik 
7222c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7223c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7224c6fd2807SJeff Garzik 
7225c6fd2807SJeff Garzik int ata_ratelimit(void)
7226c6fd2807SJeff Garzik {
7227c6fd2807SJeff Garzik 	int rc;
7228c6fd2807SJeff Garzik 	unsigned long flags;
7229c6fd2807SJeff Garzik 
7230c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7231c6fd2807SJeff Garzik 
7232c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7233c6fd2807SJeff Garzik 		rc = 1;
7234c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7235c6fd2807SJeff Garzik 	} else
7236c6fd2807SJeff Garzik 		rc = 0;
7237c6fd2807SJeff Garzik 
7238c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7239c6fd2807SJeff Garzik 
7240c6fd2807SJeff Garzik 	return rc;
7241c6fd2807SJeff Garzik }
7242c6fd2807SJeff Garzik 
7243c6fd2807SJeff Garzik /**
7244c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7245c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7246c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7247c6fd2807SJeff Garzik  *	@val: Wait condition
7248c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7249c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7250c6fd2807SJeff Garzik  *
7251c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7252c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7253c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7254c6fd2807SJeff Garzik  *
7255c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7256c6fd2807SJeff Garzik  *
7257c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7258c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7259c6fd2807SJeff Garzik  *
7260c6fd2807SJeff Garzik  *	LOCKING:
7261c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7262c6fd2807SJeff Garzik  *
7263c6fd2807SJeff Garzik  *	RETURNS:
7264c6fd2807SJeff Garzik  *	The final register value.
7265c6fd2807SJeff Garzik  */
7266c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7267c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7268c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7269c6fd2807SJeff Garzik {
7270c6fd2807SJeff Garzik 	unsigned long timeout;
7271c6fd2807SJeff Garzik 	u32 tmp;
7272c6fd2807SJeff Garzik 
7273c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7274c6fd2807SJeff Garzik 
7275c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7276c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7277c6fd2807SJeff Garzik 	 * eat away the timeout.
7278c6fd2807SJeff Garzik 	 */
7279c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7280c6fd2807SJeff Garzik 
7281c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7282c6fd2807SJeff Garzik 		msleep(interval_msec);
7283c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7284c6fd2807SJeff Garzik 	}
7285c6fd2807SJeff Garzik 
7286c6fd2807SJeff Garzik 	return tmp;
7287c6fd2807SJeff Garzik }
7288c6fd2807SJeff Garzik 
7289c6fd2807SJeff Garzik /*
7290c6fd2807SJeff Garzik  * Dummy port_ops
7291c6fd2807SJeff Garzik  */
7292c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7293c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7294c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7295c6fd2807SJeff Garzik 
7296c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7297c6fd2807SJeff Garzik {
7298c6fd2807SJeff Garzik 	return ATA_DRDY;
7299c6fd2807SJeff Garzik }
7300c6fd2807SJeff Garzik 
7301c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7302c6fd2807SJeff Garzik {
7303c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7304c6fd2807SJeff Garzik }
7305c6fd2807SJeff Garzik 
7306c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7307c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7308c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7309c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7310c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7311c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7312c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7313c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7314c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7315c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7316c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7317c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7318c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7319c6fd2807SJeff Garzik };
7320c6fd2807SJeff Garzik 
732121b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
732221b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
732321b0ad4fSTejun Heo };
732421b0ad4fSTejun Heo 
7325c6fd2807SJeff Garzik /*
7326c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7327c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7328c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7329c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7330c6fd2807SJeff Garzik  */
7331c6fd2807SJeff Garzik 
7332c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7333c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7334c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7335c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
733621b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7337c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7338c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7339cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7340f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7341f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7342ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7343f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7344f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
73450529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7346c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7347c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7348c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7349c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7350c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7351c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7352c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7353c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7354c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7355c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
735643727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7357c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7358c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7359c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7360c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7361c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7362c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7363d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7364c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
736504351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
73660d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
73670d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
736831cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7369c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7370d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7371c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7372c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7373c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7374c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7375c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7376c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7377c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7378c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7379c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7380c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7381c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7382c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
738310305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7384c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7385936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7386936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7387c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
7388c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
7389c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7390c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7391c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7392cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7393c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7394c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7395c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7396c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7397c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7398c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7399c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7400c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
740188ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7402d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7403c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7404c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7405c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7406c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7407c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7408c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7409c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7410c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7411c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7412c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7413c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7414936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7415936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
74166ffa01d8STejun Heo #ifdef CONFIG_PM
7417cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7418cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
74196ffa01d8STejun Heo #endif /* CONFIG_PM */
7420c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7421c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
742210305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7423c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7424c6fd2807SJeff Garzik 
7425c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7426c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7427c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7428c6fd2807SJeff Garzik 
7429c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7430c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7431d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
74321626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7433d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7434c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7435c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
74366ffa01d8STejun Heo #ifdef CONFIG_PM
7437c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7438c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7439c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7440c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
74416ffa01d8STejun Heo #endif /* CONFIG_PM */
7442c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7443c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7444c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7445c6fd2807SJeff Garzik 
744631f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
74473af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
74483af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
74493af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
74503af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
74513af9a77aSTejun Heo 
7452b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7453b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7454b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7455cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7456cbcdd875STejun Heo #ifdef CONFIG_PCI
7457cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7458cbcdd875STejun Heo #endif /* CONFIG_PCI */
7459c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
7460c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7461dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7462c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7463c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
74647d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7465c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7466c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7467c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7468c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7469c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
747083625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7471a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7472be0d18dfSAlan Cox 
7473be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7474be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7475be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7476be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7477