xref: /openbmc/linux/drivers/ata/libata-core.c (revision acd054a5)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
522dcb407eSJeff Garzik #include <linux/io.h>
53c6fd2807SJeff Garzik #include <scsi/scsi.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
55c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
56c6fd2807SJeff Garzik #include <linux/libata.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
62fda0efc5SJeff Garzik 
63c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
64c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
65c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
67c6fd2807SJeff Garzik 
68c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
69c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
70c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
72218f3d30SJeff Garzik 					u8 enable, u8 feature);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
7475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75c6fd2807SJeff Garzik 
76f3187195STejun Heo unsigned int ata_print_id = 1;
77c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
78c6fd2807SJeff Garzik 
79c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
80c6fd2807SJeff Garzik 
81c6fd2807SJeff Garzik int atapi_enabled = 1;
82c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
83c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84c6fd2807SJeff Garzik 
85c6fd2807SJeff Garzik int atapi_dmadir = 0;
86c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
87c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88c6fd2807SJeff Garzik 
89baf4fdfaSMark Lord int atapi_passthru16 = 1;
90baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
91baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92baf4fdfaSMark Lord 
93c6fd2807SJeff Garzik int libata_fua = 0;
94c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
95c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96c6fd2807SJeff Garzik 
972dcb407eSJeff Garzik static int ata_ignore_hpa;
981e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
991e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1001e999736SAlan Cox 
101b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
103b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104b3a70601SAlan Cox 
105c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
107c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108c6fd2807SJeff Garzik 
1096ebe9d86SJeff Garzik int libata_noacpi = 0;
110d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1116ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11211ef697bSKristen Carlson Accardi 
113c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
114c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
115c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
116c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
117c6fd2807SJeff Garzik 
118c6fd2807SJeff Garzik 
119c6fd2807SJeff Garzik /**
120c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
122c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1239977126cSTejun Heo  *	@is_cmd: This FIS is for command
1249977126cSTejun Heo  *	@fis: Buffer into which data will output
125c6fd2807SJeff Garzik  *
126c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
127c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
128c6fd2807SJeff Garzik  *
129c6fd2807SJeff Garzik  *	LOCKING:
130c6fd2807SJeff Garzik  *	Inherited from caller.
131c6fd2807SJeff Garzik  */
1329977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
133c6fd2807SJeff Garzik {
134c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1359977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1369977126cSTejun Heo 	if (is_cmd)
1379977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1389977126cSTejun Heo 
139c6fd2807SJeff Garzik 	fis[2] = tf->command;
140c6fd2807SJeff Garzik 	fis[3] = tf->feature;
141c6fd2807SJeff Garzik 
142c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
143c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
144c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
145c6fd2807SJeff Garzik 	fis[7] = tf->device;
146c6fd2807SJeff Garzik 
147c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
148c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
149c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
150c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
153c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
154c6fd2807SJeff Garzik 	fis[14] = 0;
155c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
156c6fd2807SJeff Garzik 
157c6fd2807SJeff Garzik 	fis[16] = 0;
158c6fd2807SJeff Garzik 	fis[17] = 0;
159c6fd2807SJeff Garzik 	fis[18] = 0;
160c6fd2807SJeff Garzik 	fis[19] = 0;
161c6fd2807SJeff Garzik }
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik /**
164c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
166c6fd2807SJeff Garzik  *	@tf: Taskfile to output
167c6fd2807SJeff Garzik  *
168c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
169c6fd2807SJeff Garzik  *
170c6fd2807SJeff Garzik  *	LOCKING:
171c6fd2807SJeff Garzik  *	Inherited from caller.
172c6fd2807SJeff Garzik  */
173c6fd2807SJeff Garzik 
174c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
175c6fd2807SJeff Garzik {
176c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
177c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
178c6fd2807SJeff Garzik 
179c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
180c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
181c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
182c6fd2807SJeff Garzik 	tf->device	= fis[7];
183c6fd2807SJeff Garzik 
184c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
185c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
186c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
187c6fd2807SJeff Garzik 
188c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
189c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
190c6fd2807SJeff Garzik }
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
193c6fd2807SJeff Garzik 	/* pio multi */
194c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
195c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
196c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
197c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	0,
201c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
202c6fd2807SJeff Garzik 	/* pio */
203c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
204c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
205c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
206c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	0,
209c6fd2807SJeff Garzik 	0,
210c6fd2807SJeff Garzik 	0,
211c6fd2807SJeff Garzik 	/* dma */
212c6fd2807SJeff Garzik 	ATA_CMD_READ,
213c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
214c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
215c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
216c6fd2807SJeff Garzik 	0,
217c6fd2807SJeff Garzik 	0,
218c6fd2807SJeff Garzik 	0,
219c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
220c6fd2807SJeff Garzik };
221c6fd2807SJeff Garzik 
222c6fd2807SJeff Garzik /**
223c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
224bd056d7eSTejun Heo  *	@tf: command to examine and configure
225bd056d7eSTejun Heo  *	@dev: device tf belongs to
226c6fd2807SJeff Garzik  *
227c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
228c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
229c6fd2807SJeff Garzik  *
230c6fd2807SJeff Garzik  *	LOCKING:
231c6fd2807SJeff Garzik  *	caller.
232c6fd2807SJeff Garzik  */
233bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
234c6fd2807SJeff Garzik {
235c6fd2807SJeff Garzik 	u8 cmd;
236c6fd2807SJeff Garzik 
237c6fd2807SJeff Garzik 	int index, fua, lba48, write;
238c6fd2807SJeff Garzik 
239c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
240c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
242c6fd2807SJeff Garzik 
243c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
244c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
245c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2469af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
247c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
248c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
249c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
250c6fd2807SJeff Garzik 	} else {
251c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
252c6fd2807SJeff Garzik 		index = 16;
253c6fd2807SJeff Garzik 	}
254c6fd2807SJeff Garzik 
255c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
256c6fd2807SJeff Garzik 	if (cmd) {
257c6fd2807SJeff Garzik 		tf->command = cmd;
258c6fd2807SJeff Garzik 		return 0;
259c6fd2807SJeff Garzik 	}
260c6fd2807SJeff Garzik 	return -1;
261c6fd2807SJeff Garzik }
262c6fd2807SJeff Garzik 
263c6fd2807SJeff Garzik /**
26435b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
26535b649feSTejun Heo  *	@tf: ATA taskfile of interest
26635b649feSTejun Heo  *	@dev: ATA device @tf belongs to
26735b649feSTejun Heo  *
26835b649feSTejun Heo  *	LOCKING:
26935b649feSTejun Heo  *	None.
27035b649feSTejun Heo  *
27135b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
27235b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
27335b649feSTejun Heo  *	flags select the address format to use.
27435b649feSTejun Heo  *
27535b649feSTejun Heo  *	RETURNS:
27635b649feSTejun Heo  *	Block address read from @tf.
27735b649feSTejun Heo  */
27835b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
27935b649feSTejun Heo {
28035b649feSTejun Heo 	u64 block = 0;
28135b649feSTejun Heo 
28235b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
28335b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
28435b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
28535b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
28635b649feSTejun Heo 			block |= tf->hob_lbal << 24;
28735b649feSTejun Heo 		} else
28835b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
28935b649feSTejun Heo 
29035b649feSTejun Heo 		block |= tf->lbah << 16;
29135b649feSTejun Heo 		block |= tf->lbam << 8;
29235b649feSTejun Heo 		block |= tf->lbal;
29335b649feSTejun Heo 	} else {
29435b649feSTejun Heo 		u32 cyl, head, sect;
29535b649feSTejun Heo 
29635b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
29735b649feSTejun Heo 		head = tf->device & 0xf;
29835b649feSTejun Heo 		sect = tf->lbal;
29935b649feSTejun Heo 
30035b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
30135b649feSTejun Heo 	}
30235b649feSTejun Heo 
30335b649feSTejun Heo 	return block;
30435b649feSTejun Heo }
30535b649feSTejun Heo 
30635b649feSTejun Heo /**
307bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
308bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
309bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
310bd056d7eSTejun Heo  *	@block: Block address
311bd056d7eSTejun Heo  *	@n_block: Number of blocks
312bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
313bd056d7eSTejun Heo  *	@tag: tag
314bd056d7eSTejun Heo  *
315bd056d7eSTejun Heo  *	LOCKING:
316bd056d7eSTejun Heo  *	None.
317bd056d7eSTejun Heo  *
318bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
319bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
320bd056d7eSTejun Heo  *
321bd056d7eSTejun Heo  *	RETURNS:
322bd056d7eSTejun Heo  *
323bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
324bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
325bd056d7eSTejun Heo  */
326bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
328bd056d7eSTejun Heo 		    unsigned int tag)
329bd056d7eSTejun Heo {
330bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331bd056d7eSTejun Heo 	tf->flags |= tf_flags;
332bd056d7eSTejun Heo 
3336d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
334bd056d7eSTejun Heo 		/* yay, NCQ */
335bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
336bd056d7eSTejun Heo 			return -ERANGE;
337bd056d7eSTejun Heo 
338bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
339bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340bd056d7eSTejun Heo 
341bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
342bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
343bd056d7eSTejun Heo 		else
344bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->nsect = tag << 3;
347bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
348bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
349bd056d7eSTejun Heo 
350bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
351bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
352bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
353bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
354bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
355bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
356bd056d7eSTejun Heo 
357bd056d7eSTejun Heo 		tf->device = 1 << 6;
358bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
359bd056d7eSTejun Heo 			tf->device |= 1 << 7;
360bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
361bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
364bd056d7eSTejun Heo 			/* use LBA28 */
365bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
366bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
367bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
368bd056d7eSTejun Heo 				return -ERANGE;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 			/* use LBA48 */
371bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
372bd056d7eSTejun Heo 
373bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
374bd056d7eSTejun Heo 
375bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
376bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
377bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
378bd056d7eSTejun Heo 		} else
379bd056d7eSTejun Heo 			/* request too large even for LBA48 */
380bd056d7eSTejun Heo 			return -ERANGE;
381bd056d7eSTejun Heo 
382bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383bd056d7eSTejun Heo 			return -EINVAL;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
386bd056d7eSTejun Heo 
387bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
388bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
389bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
390bd056d7eSTejun Heo 
391bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
392bd056d7eSTejun Heo 	} else {
393bd056d7eSTejun Heo 		/* CHS */
394bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
395bd056d7eSTejun Heo 
396bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
397bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
398bd056d7eSTejun Heo 			return -ERANGE;
399bd056d7eSTejun Heo 
400bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401bd056d7eSTejun Heo 			return -EINVAL;
402bd056d7eSTejun Heo 
403bd056d7eSTejun Heo 		/* Convert LBA to CHS */
404bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
405bd056d7eSTejun Heo 		cyl   = track / dev->heads;
406bd056d7eSTejun Heo 		head  = track % dev->heads;
407bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
411bd056d7eSTejun Heo 
412bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
413bd056d7eSTejun Heo 		   Cylinder: 0-65535
414bd056d7eSTejun Heo 		   Head: 0-15
415bd056d7eSTejun Heo 		   Sector: 1-255*/
416bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417bd056d7eSTejun Heo 			return -ERANGE;
418bd056d7eSTejun Heo 
419bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420bd056d7eSTejun Heo 		tf->lbal = sect;
421bd056d7eSTejun Heo 		tf->lbam = cyl;
422bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
423bd056d7eSTejun Heo 		tf->device |= head;
424bd056d7eSTejun Heo 	}
425bd056d7eSTejun Heo 
426bd056d7eSTejun Heo 	return 0;
427bd056d7eSTejun Heo }
428bd056d7eSTejun Heo 
429bd056d7eSTejun Heo /**
430c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
432c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
433c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
434c6fd2807SJeff Garzik  *
435c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
437c6fd2807SJeff Garzik  *
438c6fd2807SJeff Garzik  *	LOCKING:
439c6fd2807SJeff Garzik  *	None.
440c6fd2807SJeff Garzik  *
441c6fd2807SJeff Garzik  *	RETURNS:
442c6fd2807SJeff Garzik  *	Packed xfer_mask.
443c6fd2807SJeff Garzik  */
444c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
446c6fd2807SJeff Garzik 				      unsigned int udma_mask)
447c6fd2807SJeff Garzik {
448c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451c6fd2807SJeff Garzik }
452c6fd2807SJeff Garzik 
453c6fd2807SJeff Garzik /**
454c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
456c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
457c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
458c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
459c6fd2807SJeff Garzik  *
460c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
462c6fd2807SJeff Garzik  */
463c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
464c6fd2807SJeff Garzik 				unsigned int *pio_mask,
465c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
466c6fd2807SJeff Garzik 				unsigned int *udma_mask)
467c6fd2807SJeff Garzik {
468c6fd2807SJeff Garzik 	if (pio_mask)
469c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470c6fd2807SJeff Garzik 	if (mwdma_mask)
471c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472c6fd2807SJeff Garzik 	if (udma_mask)
473c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474c6fd2807SJeff Garzik }
475c6fd2807SJeff Garzik 
476c6fd2807SJeff Garzik static const struct ata_xfer_ent {
477c6fd2807SJeff Garzik 	int shift, bits;
478c6fd2807SJeff Garzik 	u8 base;
479c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
480c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483c6fd2807SJeff Garzik 	{ -1, },
484c6fd2807SJeff Garzik };
485c6fd2807SJeff Garzik 
486c6fd2807SJeff Garzik /**
487c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
489c6fd2807SJeff Garzik  *
490c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
491c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
492c6fd2807SJeff Garzik  *
493c6fd2807SJeff Garzik  *	LOCKING:
494c6fd2807SJeff Garzik  *	None.
495c6fd2807SJeff Garzik  *
496c6fd2807SJeff Garzik  *	RETURNS:
497c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
498c6fd2807SJeff Garzik  */
499c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500c6fd2807SJeff Garzik {
501c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
502c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
503c6fd2807SJeff Garzik 
504c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
507c6fd2807SJeff Garzik 	return 0;
508c6fd2807SJeff Garzik }
509c6fd2807SJeff Garzik 
510c6fd2807SJeff Garzik /**
511c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
513c6fd2807SJeff Garzik  *
514c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
515c6fd2807SJeff Garzik  *
516c6fd2807SJeff Garzik  *	LOCKING:
517c6fd2807SJeff Garzik  *	None.
518c6fd2807SJeff Garzik  *
519c6fd2807SJeff Garzik  *	RETURNS:
520c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
521c6fd2807SJeff Garzik  */
522c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523c6fd2807SJeff Garzik {
524c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
525c6fd2807SJeff Garzik 
526c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
529c6fd2807SJeff Garzik 	return 0;
530c6fd2807SJeff Garzik }
531c6fd2807SJeff Garzik 
532c6fd2807SJeff Garzik /**
533c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
535c6fd2807SJeff Garzik  *
536c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
537c6fd2807SJeff Garzik  *
538c6fd2807SJeff Garzik  *	LOCKING:
539c6fd2807SJeff Garzik  *	None.
540c6fd2807SJeff Garzik  *
541c6fd2807SJeff Garzik  *	RETURNS:
542c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
543c6fd2807SJeff Garzik  */
544c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
545c6fd2807SJeff Garzik {
546c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
547c6fd2807SJeff Garzik 
548c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550c6fd2807SJeff Garzik 			return ent->shift;
551c6fd2807SJeff Garzik 	return -1;
552c6fd2807SJeff Garzik }
553c6fd2807SJeff Garzik 
554c6fd2807SJeff Garzik /**
555c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
556c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
557c6fd2807SJeff Garzik  *
558c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
559c6fd2807SJeff Garzik  *	(highest bit in @modemask).
560c6fd2807SJeff Garzik  *
561c6fd2807SJeff Garzik  *	LOCKING:
562c6fd2807SJeff Garzik  *	None.
563c6fd2807SJeff Garzik  *
564c6fd2807SJeff Garzik  *	RETURNS:
565c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
566c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
567c6fd2807SJeff Garzik  */
568c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
569c6fd2807SJeff Garzik {
570c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
571c6fd2807SJeff Garzik 		"PIO0",
572c6fd2807SJeff Garzik 		"PIO1",
573c6fd2807SJeff Garzik 		"PIO2",
574c6fd2807SJeff Garzik 		"PIO3",
575c6fd2807SJeff Garzik 		"PIO4",
576b352e57dSAlan Cox 		"PIO5",
577b352e57dSAlan Cox 		"PIO6",
578c6fd2807SJeff Garzik 		"MWDMA0",
579c6fd2807SJeff Garzik 		"MWDMA1",
580c6fd2807SJeff Garzik 		"MWDMA2",
581b352e57dSAlan Cox 		"MWDMA3",
582b352e57dSAlan Cox 		"MWDMA4",
583c6fd2807SJeff Garzik 		"UDMA/16",
584c6fd2807SJeff Garzik 		"UDMA/25",
585c6fd2807SJeff Garzik 		"UDMA/33",
586c6fd2807SJeff Garzik 		"UDMA/44",
587c6fd2807SJeff Garzik 		"UDMA/66",
588c6fd2807SJeff Garzik 		"UDMA/100",
589c6fd2807SJeff Garzik 		"UDMA/133",
590c6fd2807SJeff Garzik 		"UDMA7",
591c6fd2807SJeff Garzik 	};
592c6fd2807SJeff Garzik 	int highbit;
593c6fd2807SJeff Garzik 
594c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
595c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
597c6fd2807SJeff Garzik 	return "<n/a>";
598c6fd2807SJeff Garzik }
599c6fd2807SJeff Garzik 
600c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
601c6fd2807SJeff Garzik {
602c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
603c6fd2807SJeff Garzik 		"1.5 Gbps",
604c6fd2807SJeff Garzik 		"3.0 Gbps",
605c6fd2807SJeff Garzik 	};
606c6fd2807SJeff Garzik 
607c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608c6fd2807SJeff Garzik 		return "<unknown>";
609c6fd2807SJeff Garzik 	return spd_str[spd - 1];
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
613c6fd2807SJeff Garzik {
61409d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6159af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
616c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6174ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6184ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
619c6fd2807SJeff Garzik 		dev->class++;
620c6fd2807SJeff Garzik 	}
621c6fd2807SJeff Garzik }
622c6fd2807SJeff Garzik 
623c6fd2807SJeff Garzik /**
624c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
625c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
626c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
627c6fd2807SJeff Garzik  *
6280d5ff566STejun Heo  *	This technique was originally described in
6290d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
6300d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
6310d5ff566STejun Heo  *
6320d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
6330d5ff566STejun Heo  *	and if a device is present, it will respond by
6340d5ff566STejun Heo  *	correctly storing and echoing back the
6350d5ff566STejun Heo  *	ATA shadow register contents.
636c6fd2807SJeff Garzik  *
637c6fd2807SJeff Garzik  *	LOCKING:
638c6fd2807SJeff Garzik  *	caller.
639c6fd2807SJeff Garzik  */
640c6fd2807SJeff Garzik 
6410d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
642c6fd2807SJeff Garzik {
6430d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
6440d5ff566STejun Heo 	u8 nsect, lbal;
6450d5ff566STejun Heo 
6460d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
6470d5ff566STejun Heo 
6480d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6490d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6500d5ff566STejun Heo 
6510d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
6520d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
6530d5ff566STejun Heo 
6540d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
6550d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
6560d5ff566STejun Heo 
6570d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
6580d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
6590d5ff566STejun Heo 
6600d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
6610d5ff566STejun Heo 		return 1;	/* we found a device */
6620d5ff566STejun Heo 
6630d5ff566STejun Heo 	return 0;		/* nothing found */
664c6fd2807SJeff Garzik }
665c6fd2807SJeff Garzik 
666c6fd2807SJeff Garzik /**
667c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
668c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
669c6fd2807SJeff Garzik  *
670c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
671c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
672c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
673c6fd2807SJeff Garzik  *
674c6fd2807SJeff Garzik  *	LOCKING:
675c6fd2807SJeff Garzik  *	None.
676c6fd2807SJeff Garzik  *
677c6fd2807SJeff Garzik  *	RETURNS:
678633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
679633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
680c6fd2807SJeff Garzik  */
681c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
682c6fd2807SJeff Garzik {
683c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
684c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
685c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
686633273a3STejun Heo 	 *
687633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
688633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
689633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
690633273a3STejun Heo 	 * spec has never mentioned about using different signatures
691633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
692633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
693633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
694633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
695633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
696633273a3STejun Heo 	 * SerialATA.
697633273a3STejun Heo 	 *
698633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
699633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
700c6fd2807SJeff Garzik 	 */
701633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
702c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
703c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
704c6fd2807SJeff Garzik 	}
705c6fd2807SJeff Garzik 
706633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
707c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
708c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
709c6fd2807SJeff Garzik 	}
710c6fd2807SJeff Garzik 
711633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
712633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
713633273a3STejun Heo 		return ATA_DEV_PMP;
714633273a3STejun Heo 	}
715633273a3STejun Heo 
716633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
7172dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
718633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
719633273a3STejun Heo 	}
720633273a3STejun Heo 
721c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
722c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
723c6fd2807SJeff Garzik }
724c6fd2807SJeff Garzik 
725c6fd2807SJeff Garzik /**
726c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
7273f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
7283f19859eSTejun Heo  *	@present: device seems present
729c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
730c6fd2807SJeff Garzik  *
731c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
732c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
733c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
734c6fd2807SJeff Garzik  *	and diagnostics.
735c6fd2807SJeff Garzik  *
736c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
737c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
738c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
739c6fd2807SJeff Garzik  *
740c6fd2807SJeff Garzik  *	LOCKING:
741c6fd2807SJeff Garzik  *	caller.
742c6fd2807SJeff Garzik  *
743c6fd2807SJeff Garzik  *	RETURNS:
744c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
745c6fd2807SJeff Garzik  */
7463f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
7473f19859eSTejun Heo 				  u8 *r_err)
748c6fd2807SJeff Garzik {
7493f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
750c6fd2807SJeff Garzik 	struct ata_taskfile tf;
751c6fd2807SJeff Garzik 	unsigned int class;
752c6fd2807SJeff Garzik 	u8 err;
753c6fd2807SJeff Garzik 
7543f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
755c6fd2807SJeff Garzik 
756c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
757c6fd2807SJeff Garzik 
758c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
759c6fd2807SJeff Garzik 	err = tf.feature;
760c6fd2807SJeff Garzik 	if (r_err)
761c6fd2807SJeff Garzik 		*r_err = err;
762c6fd2807SJeff Garzik 
76393590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
7643f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
76593590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
7663f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
76793590859SAlan Cox 	else if (err == 1)
768c6fd2807SJeff Garzik 		/* do nothing */ ;
7693f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
770c6fd2807SJeff Garzik 		/* do nothing */ ;
771c6fd2807SJeff Garzik 	else
772c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
773c6fd2807SJeff Garzik 
774c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
775c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
776c6fd2807SJeff Garzik 
777d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
778d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
779d7fbee05STejun Heo 		 * have reported incorrect device signature too.
780d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
781d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
782d7fbee05STejun Heo 		 * failure.
783d7fbee05STejun Heo 		 */
784d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
785d7fbee05STejun Heo 			class = ATA_DEV_ATA;
786d7fbee05STejun Heo 		else
787d7fbee05STejun Heo 			class = ATA_DEV_NONE;
788d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
789d7fbee05STejun Heo 		class = ATA_DEV_NONE;
790d7fbee05STejun Heo 
791c6fd2807SJeff Garzik 	return class;
792c6fd2807SJeff Garzik }
793c6fd2807SJeff Garzik 
794c6fd2807SJeff Garzik /**
795c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
796c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
797c6fd2807SJeff Garzik  *	@s: string into which data is output
798c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
799c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
800c6fd2807SJeff Garzik  *
801c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
802c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
803c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
804c6fd2807SJeff Garzik  *
805c6fd2807SJeff Garzik  *	LOCKING:
806c6fd2807SJeff Garzik  *	caller.
807c6fd2807SJeff Garzik  */
808c6fd2807SJeff Garzik 
809c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
810c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
811c6fd2807SJeff Garzik {
812c6fd2807SJeff Garzik 	unsigned int c;
813c6fd2807SJeff Garzik 
814c6fd2807SJeff Garzik 	while (len > 0) {
815c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
816c6fd2807SJeff Garzik 		*s = c;
817c6fd2807SJeff Garzik 		s++;
818c6fd2807SJeff Garzik 
819c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
820c6fd2807SJeff Garzik 		*s = c;
821c6fd2807SJeff Garzik 		s++;
822c6fd2807SJeff Garzik 
823c6fd2807SJeff Garzik 		ofs++;
824c6fd2807SJeff Garzik 		len -= 2;
825c6fd2807SJeff Garzik 	}
826c6fd2807SJeff Garzik }
827c6fd2807SJeff Garzik 
828c6fd2807SJeff Garzik /**
829c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
830c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
831c6fd2807SJeff Garzik  *	@s: string into which data is output
832c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
833c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
834c6fd2807SJeff Garzik  *
835c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
836c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
837c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
838c6fd2807SJeff Garzik  *
839c6fd2807SJeff Garzik  *	LOCKING:
840c6fd2807SJeff Garzik  *	caller.
841c6fd2807SJeff Garzik  */
842c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
843c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
844c6fd2807SJeff Garzik {
845c6fd2807SJeff Garzik 	unsigned char *p;
846c6fd2807SJeff Garzik 
847c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
848c6fd2807SJeff Garzik 
849c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
850c6fd2807SJeff Garzik 
851c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
852c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
853c6fd2807SJeff Garzik 		p--;
854c6fd2807SJeff Garzik 	*p = '\0';
855c6fd2807SJeff Garzik }
856c6fd2807SJeff Garzik 
857db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
858db6f8759STejun Heo {
859db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
860db6f8759STejun Heo 		if (ata_id_has_lba48(id))
861db6f8759STejun Heo 			return ata_id_u64(id, 100);
862db6f8759STejun Heo 		else
863db6f8759STejun Heo 			return ata_id_u32(id, 60);
864db6f8759STejun Heo 	} else {
865db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
866db6f8759STejun Heo 			return ata_id_u32(id, 57);
867db6f8759STejun Heo 		else
868db6f8759STejun Heo 			return id[1] * id[3] * id[6];
869db6f8759STejun Heo 	}
870db6f8759STejun Heo }
871db6f8759STejun Heo 
8721e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
8731e999736SAlan Cox {
8741e999736SAlan Cox 	u64 sectors = 0;
8751e999736SAlan Cox 
8761e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
8771e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
8781e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
8791e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8801e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8811e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8821e999736SAlan Cox 
8831e999736SAlan Cox 	return ++sectors;
8841e999736SAlan Cox }
8851e999736SAlan Cox 
8861e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
8871e999736SAlan Cox {
8881e999736SAlan Cox 	u64 sectors = 0;
8891e999736SAlan Cox 
8901e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
8911e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
8921e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
8931e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
8941e999736SAlan Cox 
8951e999736SAlan Cox 	return ++sectors;
8961e999736SAlan Cox }
8971e999736SAlan Cox 
8981e999736SAlan Cox /**
899c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
900c728a914STejun Heo  *	@dev: target device
901c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
9021e999736SAlan Cox  *
903c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
904c728a914STejun Heo  *	question.
905c728a914STejun Heo  *
906c728a914STejun Heo  *	RETURNS:
907c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
908c728a914STejun Heo  *	-EIO on other errors.
9091e999736SAlan Cox  */
910c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
9111e999736SAlan Cox {
912c728a914STejun Heo 	unsigned int err_mask;
9131e999736SAlan Cox 	struct ata_taskfile tf;
914c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
9151e999736SAlan Cox 
9161e999736SAlan Cox 	ata_tf_init(dev, &tf);
9171e999736SAlan Cox 
918c728a914STejun Heo 	/* always clear all address registers */
9191e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
920c728a914STejun Heo 
921c728a914STejun Heo 	if (lba48) {
922c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
923c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
924c728a914STejun Heo 	} else
925c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
926c728a914STejun Heo 
9271e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
928c728a914STejun Heo 	tf.device |= ATA_LBA;
9291e999736SAlan Cox 
9302b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
931c728a914STejun Heo 	if (err_mask) {
932c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
933c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
934c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
935c728a914STejun Heo 			return -EACCES;
936c728a914STejun Heo 		return -EIO;
937c728a914STejun Heo 	}
938c728a914STejun Heo 
939c728a914STejun Heo 	if (lba48)
940c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
941c728a914STejun Heo 	else
942c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
94393328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
94493328e11SAlan Cox 		(*max_sectors)--;
9451e999736SAlan Cox 	return 0;
9461e999736SAlan Cox }
9471e999736SAlan Cox 
9481e999736SAlan Cox /**
949c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
950c728a914STejun Heo  *	@dev: target device
9516b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
9521e999736SAlan Cox  *
953c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
954c728a914STejun Heo  *
955c728a914STejun Heo  *	RETURNS:
956c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
957c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
958c728a914STejun Heo  *	errors.
9591e999736SAlan Cox  */
96005027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
9611e999736SAlan Cox {
962c728a914STejun Heo 	unsigned int err_mask;
9631e999736SAlan Cox 	struct ata_taskfile tf;
964c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
9651e999736SAlan Cox 
9661e999736SAlan Cox 	new_sectors--;
9671e999736SAlan Cox 
9681e999736SAlan Cox 	ata_tf_init(dev, &tf);
9691e999736SAlan Cox 
970c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
9711e999736SAlan Cox 
972c728a914STejun Heo 	if (lba48) {
973c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
974c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
9751e999736SAlan Cox 
9761e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
9771e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
9781e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
9791e582ba4STejun Heo 	} else {
9801e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
981c728a914STejun Heo 
9821e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
9831e582ba4STejun Heo 	}
9841e582ba4STejun Heo 
9851e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
986c728a914STejun Heo 	tf.device |= ATA_LBA;
9871e999736SAlan Cox 
9881e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
9891e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
9901e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
9911e999736SAlan Cox 
9922b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
993c728a914STejun Heo 	if (err_mask) {
994c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
995c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
996c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
997c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
998c728a914STejun Heo 			return -EACCES;
999c728a914STejun Heo 		return -EIO;
1000c728a914STejun Heo 	}
1001c728a914STejun Heo 
10021e999736SAlan Cox 	return 0;
10031e999736SAlan Cox }
10041e999736SAlan Cox 
10051e999736SAlan Cox /**
10061e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
10071e999736SAlan Cox  *	@dev: Device to resize
10081e999736SAlan Cox  *
10091e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
10101e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
10111e999736SAlan Cox  *	the drive has the HPA feature set enabled.
101205027adcSTejun Heo  *
101305027adcSTejun Heo  *	RETURNS:
101405027adcSTejun Heo  *	0 on success, -errno on failure.
10151e999736SAlan Cox  */
101605027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
10171e999736SAlan Cox {
101805027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
101905027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
102005027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
102105027adcSTejun Heo 	u64 native_sectors;
1022c728a914STejun Heo 	int rc;
10231e999736SAlan Cox 
102405027adcSTejun Heo 	/* do we need to do it? */
102505027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
102605027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
102705027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1028c728a914STejun Heo 		return 0;
10291e999736SAlan Cox 
103005027adcSTejun Heo 	/* read native max address */
103105027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
103205027adcSTejun Heo 	if (rc) {
103305027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
103405027adcSTejun Heo 		 * resizing from the next try.
103505027adcSTejun Heo 		 */
103605027adcSTejun Heo 		if (!ata_ignore_hpa) {
103705027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
103805027adcSTejun Heo 				       "broken, will skip HPA handling\n");
103905027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
104005027adcSTejun Heo 
104105027adcSTejun Heo 			/* we can continue if device aborted the command */
104205027adcSTejun Heo 			if (rc == -EACCES)
104305027adcSTejun Heo 				rc = 0;
104405027adcSTejun Heo 		}
104505027adcSTejun Heo 
104605027adcSTejun Heo 		return rc;
104705027adcSTejun Heo 	}
104805027adcSTejun Heo 
104905027adcSTejun Heo 	/* nothing to do? */
105005027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
105105027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
105205027adcSTejun Heo 			return 0;
105305027adcSTejun Heo 
105405027adcSTejun Heo 		if (native_sectors > sectors)
10551e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
105605027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
105705027adcSTejun Heo 				(unsigned long long)sectors,
105805027adcSTejun Heo 				(unsigned long long)native_sectors);
105905027adcSTejun Heo 		else if (native_sectors < sectors)
106005027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
106105027adcSTejun Heo 				"native sectors (%llu) is smaller than "
106205027adcSTejun Heo 				"sectors (%llu)\n",
106305027adcSTejun Heo 				(unsigned long long)native_sectors,
106405027adcSTejun Heo 				(unsigned long long)sectors);
106505027adcSTejun Heo 		return 0;
10661e999736SAlan Cox 	}
106737301a55STejun Heo 
106805027adcSTejun Heo 	/* let's unlock HPA */
106905027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
107005027adcSTejun Heo 	if (rc == -EACCES) {
107105027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
107205027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
107305027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
107405027adcSTejun Heo 			       (unsigned long long)sectors,
107505027adcSTejun Heo 			       (unsigned long long)native_sectors);
107605027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
107705027adcSTejun Heo 		return 0;
107805027adcSTejun Heo 	} else if (rc)
107905027adcSTejun Heo 		return rc;
108005027adcSTejun Heo 
108105027adcSTejun Heo 	/* re-read IDENTIFY data */
108205027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
108305027adcSTejun Heo 	if (rc) {
108405027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
108505027adcSTejun Heo 			       "data after HPA resizing\n");
108605027adcSTejun Heo 		return rc;
108705027adcSTejun Heo 	}
108805027adcSTejun Heo 
108905027adcSTejun Heo 	if (print_info) {
109005027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
109105027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
109205027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
109305027adcSTejun Heo 			(unsigned long long)sectors,
109405027adcSTejun Heo 			(unsigned long long)new_sectors,
109505027adcSTejun Heo 			(unsigned long long)native_sectors);
109605027adcSTejun Heo 	}
109705027adcSTejun Heo 
109805027adcSTejun Heo 	return 0;
10991e999736SAlan Cox }
11001e999736SAlan Cox 
1101c6fd2807SJeff Garzik /**
110210305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
110310305f0fSAlan  *	@dev: device to identify
1104cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
110510305f0fSAlan  *
110610305f0fSAlan  *	Set up the timing values for the device based upon the identify
110710305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
110810305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
110910305f0fSAlan  *	mode correctly when possible.
111010305f0fSAlan  *
111110305f0fSAlan  *	In addition we emit similarly formatted messages to the default
111210305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
111310305f0fSAlan  *	presentation.
111410305f0fSAlan  */
111510305f0fSAlan 
111610305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
111710305f0fSAlan {
111810305f0fSAlan 	unsigned int mask;
111910305f0fSAlan 	u8 mode;
112010305f0fSAlan 
112110305f0fSAlan 	/* Pack the DMA modes */
112210305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
112310305f0fSAlan 	if (dev->id[53] & 0x04)
112410305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
112510305f0fSAlan 
112610305f0fSAlan 	/* Select the mode in use */
112710305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
112810305f0fSAlan 
112910305f0fSAlan 	if (mode != 0) {
113010305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
113110305f0fSAlan 		       ata_mode_string(mask));
113210305f0fSAlan 	} else {
113310305f0fSAlan 		/* SWDMA perhaps ? */
113410305f0fSAlan 		mode = unknown;
113510305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
113610305f0fSAlan 	}
113710305f0fSAlan 
113810305f0fSAlan 	/* Configure the device reporting */
113910305f0fSAlan 	dev->xfer_mode = mode;
114010305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
114110305f0fSAlan }
114210305f0fSAlan 
114310305f0fSAlan /**
1144c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1145c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1146c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1147c6fd2807SJeff Garzik  *
1148c6fd2807SJeff Garzik  *	This function performs no actual function.
1149c6fd2807SJeff Garzik  *
1150c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1151c6fd2807SJeff Garzik  *
1152c6fd2807SJeff Garzik  *	LOCKING:
1153c6fd2807SJeff Garzik  *	caller.
1154c6fd2807SJeff Garzik  */
1155c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1156c6fd2807SJeff Garzik {
1157c6fd2807SJeff Garzik }
1158c6fd2807SJeff Garzik 
1159c6fd2807SJeff Garzik 
1160c6fd2807SJeff Garzik /**
1161c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1162c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1163c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1164c6fd2807SJeff Garzik  *
1165c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1166c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1167c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1168c6fd2807SJeff Garzik  *
1169c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1170c6fd2807SJeff Garzik  *
1171c6fd2807SJeff Garzik  *	LOCKING:
1172c6fd2807SJeff Garzik  *	caller.
1173c6fd2807SJeff Garzik  */
1174c6fd2807SJeff Garzik 
1175c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1176c6fd2807SJeff Garzik {
1177c6fd2807SJeff Garzik 	u8 tmp;
1178c6fd2807SJeff Garzik 
1179c6fd2807SJeff Garzik 	if (device == 0)
1180c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1181c6fd2807SJeff Garzik 	else
1182c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1183c6fd2807SJeff Garzik 
11840d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1185c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1186c6fd2807SJeff Garzik }
1187c6fd2807SJeff Garzik 
1188c6fd2807SJeff Garzik /**
1189c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1190c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1191c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1192c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1193c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1194c6fd2807SJeff Garzik  *
1195c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1196c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1197c6fd2807SJeff Garzik  *	ATA channel.
1198c6fd2807SJeff Garzik  *
1199c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1200c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1201c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1202c6fd2807SJeff Garzik  *
1203c6fd2807SJeff Garzik  *	LOCKING:
1204c6fd2807SJeff Garzik  *	caller.
1205c6fd2807SJeff Garzik  */
1206c6fd2807SJeff Garzik 
1207c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1208c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1209c6fd2807SJeff Garzik {
1210c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
121144877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
121244877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1213c6fd2807SJeff Garzik 
1214c6fd2807SJeff Garzik 	if (wait)
1215c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1216c6fd2807SJeff Garzik 
1217c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1218c6fd2807SJeff Garzik 
1219c6fd2807SJeff Garzik 	if (wait) {
12209af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1221c6fd2807SJeff Garzik 			msleep(150);
1222c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1223c6fd2807SJeff Garzik 	}
1224c6fd2807SJeff Garzik }
1225c6fd2807SJeff Garzik 
1226c6fd2807SJeff Garzik /**
1227c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1228c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1229c6fd2807SJeff Garzik  *
1230c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1231c6fd2807SJeff Garzik  *	page.
1232c6fd2807SJeff Garzik  *
1233c6fd2807SJeff Garzik  *	LOCKING:
1234c6fd2807SJeff Garzik  *	caller.
1235c6fd2807SJeff Garzik  */
1236c6fd2807SJeff Garzik 
1237c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1238c6fd2807SJeff Garzik {
1239c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1240c6fd2807SJeff Garzik 		"53==0x%04x  "
1241c6fd2807SJeff Garzik 		"63==0x%04x  "
1242c6fd2807SJeff Garzik 		"64==0x%04x  "
1243c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1244c6fd2807SJeff Garzik 		id[49],
1245c6fd2807SJeff Garzik 		id[53],
1246c6fd2807SJeff Garzik 		id[63],
1247c6fd2807SJeff Garzik 		id[64],
1248c6fd2807SJeff Garzik 		id[75]);
1249c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1250c6fd2807SJeff Garzik 		"81==0x%04x  "
1251c6fd2807SJeff Garzik 		"82==0x%04x  "
1252c6fd2807SJeff Garzik 		"83==0x%04x  "
1253c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1254c6fd2807SJeff Garzik 		id[80],
1255c6fd2807SJeff Garzik 		id[81],
1256c6fd2807SJeff Garzik 		id[82],
1257c6fd2807SJeff Garzik 		id[83],
1258c6fd2807SJeff Garzik 		id[84]);
1259c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1260c6fd2807SJeff Garzik 		"93==0x%04x\n",
1261c6fd2807SJeff Garzik 		id[88],
1262c6fd2807SJeff Garzik 		id[93]);
1263c6fd2807SJeff Garzik }
1264c6fd2807SJeff Garzik 
1265c6fd2807SJeff Garzik /**
1266c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1267c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1268c6fd2807SJeff Garzik  *
1269c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1270c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1271c6fd2807SJeff Garzik  *
1272c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1273c6fd2807SJeff Garzik  *
1274c6fd2807SJeff Garzik  *	LOCKING:
1275c6fd2807SJeff Garzik  *	None.
1276c6fd2807SJeff Garzik  *
1277c6fd2807SJeff Garzik  *	RETURNS:
1278c6fd2807SJeff Garzik  *	Computed xfermask
1279c6fd2807SJeff Garzik  */
1280c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1281c6fd2807SJeff Garzik {
1282c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1283c6fd2807SJeff Garzik 
1284c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1285c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1286c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1287c6fd2807SJeff Garzik 		pio_mask <<= 3;
1288c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1289c6fd2807SJeff Garzik 	} else {
1290c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1291c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1292c6fd2807SJeff Garzik 		 * a mask.
1293c6fd2807SJeff Garzik 		 */
12947a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
129546767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
129646767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
129746767aebSAlan Cox 		else
129846767aebSAlan Cox 			pio_mask = 1;
1299c6fd2807SJeff Garzik 
1300c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1301c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1302c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1303c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1304c6fd2807SJeff Garzik 		 * will get this right anyway
1305c6fd2807SJeff Garzik 		 */
1306c6fd2807SJeff Garzik 	}
1307c6fd2807SJeff Garzik 
1308c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1309c6fd2807SJeff Garzik 
1310b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1311b352e57dSAlan Cox 		/*
1312b352e57dSAlan Cox 		 *	Process compact flash extended modes
1313b352e57dSAlan Cox 		 */
1314b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1315b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1316b352e57dSAlan Cox 
1317b352e57dSAlan Cox 		if (pio)
1318b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1319b352e57dSAlan Cox 		if (pio > 1)
1320b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1321b352e57dSAlan Cox 		if (dma)
1322b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1323b352e57dSAlan Cox 		if (dma > 1)
1324b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1325b352e57dSAlan Cox 	}
1326b352e57dSAlan Cox 
1327c6fd2807SJeff Garzik 	udma_mask = 0;
1328c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1329c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1330c6fd2807SJeff Garzik 
1331c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1332c6fd2807SJeff Garzik }
1333c6fd2807SJeff Garzik 
1334c6fd2807SJeff Garzik /**
1335c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1336c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1337c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
133865f27f38SDavid Howells  *	@data: data for @fn to use
1339c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1340c6fd2807SJeff Garzik  *
1341c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1342c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1343c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1344c6fd2807SJeff Garzik  *	one task is active at any given time.
1345c6fd2807SJeff Garzik  *
1346c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1347c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1348c6fd2807SJeff Garzik  *	synchronization.
1349c6fd2807SJeff Garzik  *
1350c6fd2807SJeff Garzik  *	LOCKING:
1351c6fd2807SJeff Garzik  *	Inherited from caller.
1352c6fd2807SJeff Garzik  */
135365f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1354c6fd2807SJeff Garzik 			 unsigned long delay)
1355c6fd2807SJeff Garzik {
135665f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
135765f27f38SDavid Howells 	ap->port_task_data = data;
1358c6fd2807SJeff Garzik 
135945a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
136045a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1361c6fd2807SJeff Garzik }
1362c6fd2807SJeff Garzik 
1363c6fd2807SJeff Garzik /**
1364c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1365c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1366c6fd2807SJeff Garzik  *
1367c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1368c6fd2807SJeff Garzik  *	be running or scheduled.
1369c6fd2807SJeff Garzik  *
1370c6fd2807SJeff Garzik  *	LOCKING:
1371c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1372c6fd2807SJeff Garzik  */
1373c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1374c6fd2807SJeff Garzik {
1375c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1376c6fd2807SJeff Garzik 
137745a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1378c6fd2807SJeff Garzik 
1379c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1380c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1381c6fd2807SJeff Garzik }
1382c6fd2807SJeff Garzik 
13837102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1384c6fd2807SJeff Garzik {
1385c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1386c6fd2807SJeff Garzik 
1387c6fd2807SJeff Garzik 	complete(waiting);
1388c6fd2807SJeff Garzik }
1389c6fd2807SJeff Garzik 
1390c6fd2807SJeff Garzik /**
13912432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1392c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1393c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1394c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1395c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
13965c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
13972432697bSTejun Heo  *	@n_elem: Number of sg entries
13982b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1399c6fd2807SJeff Garzik  *
1400c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1401c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1402c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1403c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1404c6fd2807SJeff Garzik  *	clean up after timeout.
1405c6fd2807SJeff Garzik  *
1406c6fd2807SJeff Garzik  *	LOCKING:
1407c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1408c6fd2807SJeff Garzik  *
1409c6fd2807SJeff Garzik  *	RETURNS:
1410c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1411c6fd2807SJeff Garzik  */
14122432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1413c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
141487260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
14152b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1416c6fd2807SJeff Garzik {
14179af5c9c9STejun Heo 	struct ata_link *link = dev->link;
14189af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1419c6fd2807SJeff Garzik 	u8 command = tf->command;
1420c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1421c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1422c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1423da917d69STejun Heo 	int preempted_nr_active_links;
1424c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1425c6fd2807SJeff Garzik 	unsigned long flags;
1426c6fd2807SJeff Garzik 	unsigned int err_mask;
1427c6fd2807SJeff Garzik 	int rc;
1428c6fd2807SJeff Garzik 
1429c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1430c6fd2807SJeff Garzik 
1431c6fd2807SJeff Garzik 	/* no internal command while frozen */
1432c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1433c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1434c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1435c6fd2807SJeff Garzik 	}
1436c6fd2807SJeff Garzik 
1437c6fd2807SJeff Garzik 	/* initialize internal qc */
1438c6fd2807SJeff Garzik 
1439c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1440c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1441c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1442c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1443c6fd2807SJeff Garzik 	 */
1444c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1445c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1446c6fd2807SJeff Garzik 	else
1447c6fd2807SJeff Garzik 		tag = 0;
1448c6fd2807SJeff Garzik 
1449c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1450c6fd2807SJeff Garzik 		BUG();
1451c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1452c6fd2807SJeff Garzik 
1453c6fd2807SJeff Garzik 	qc->tag = tag;
1454c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1455c6fd2807SJeff Garzik 	qc->ap = ap;
1456c6fd2807SJeff Garzik 	qc->dev = dev;
1457c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1458c6fd2807SJeff Garzik 
14599af5c9c9STejun Heo 	preempted_tag = link->active_tag;
14609af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1461c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1462da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
14639af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
14649af5c9c9STejun Heo 	link->sactive = 0;
1465c6fd2807SJeff Garzik 	ap->qc_active = 0;
1466da917d69STejun Heo 	ap->nr_active_links = 0;
1467c6fd2807SJeff Garzik 
1468c6fd2807SJeff Garzik 	/* prepare & issue qc */
1469c6fd2807SJeff Garzik 	qc->tf = *tf;
1470c6fd2807SJeff Garzik 	if (cdb)
1471c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1472c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1473c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1474c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
14752432697bSTejun Heo 		unsigned int i, buflen = 0;
147687260216SJens Axboe 		struct scatterlist *sg;
14772432697bSTejun Heo 
147887260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
147987260216SJens Axboe 			buflen += sg->length;
14802432697bSTejun Heo 
148187260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
148249c80429SBrian King 		qc->nbytes = buflen;
1483c6fd2807SJeff Garzik 	}
1484c6fd2807SJeff Garzik 
1485c6fd2807SJeff Garzik 	qc->private_data = &wait;
1486c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1487c6fd2807SJeff Garzik 
1488c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1489c6fd2807SJeff Garzik 
1490c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1491c6fd2807SJeff Garzik 
14922b789108STejun Heo 	if (!timeout)
14932b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
14942b789108STejun Heo 
14952b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1496c6fd2807SJeff Garzik 
1497c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1498c6fd2807SJeff Garzik 
1499c6fd2807SJeff Garzik 	if (!rc) {
1500c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1501c6fd2807SJeff Garzik 
1502c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1503c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1504c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1505c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1506c6fd2807SJeff Garzik 		 */
1507c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1508c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1509c6fd2807SJeff Garzik 
1510c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1511c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1512c6fd2807SJeff Garzik 			else
1513c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1514c6fd2807SJeff Garzik 
1515c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1516c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1517c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1518c6fd2807SJeff Garzik 		}
1519c6fd2807SJeff Garzik 
1520c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1521c6fd2807SJeff Garzik 	}
1522c6fd2807SJeff Garzik 
1523c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1524c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1525c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1526c6fd2807SJeff Garzik 
1527a51d644aSTejun Heo 	/* perform minimal error analysis */
1528a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1529a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1530a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1531a51d644aSTejun Heo 
1532a51d644aSTejun Heo 		if (!qc->err_mask)
1533c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1534a51d644aSTejun Heo 
1535a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1536a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1537c6fd2807SJeff Garzik 	}
1538c6fd2807SJeff Garzik 
1539c6fd2807SJeff Garzik 	/* finish up */
1540c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1541c6fd2807SJeff Garzik 
1542c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1543c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1544c6fd2807SJeff Garzik 
1545c6fd2807SJeff Garzik 	ata_qc_free(qc);
15469af5c9c9STejun Heo 	link->active_tag = preempted_tag;
15479af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1548c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1549da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1550c6fd2807SJeff Garzik 
1551c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1552c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1553c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1554c6fd2807SJeff Garzik 	 * port.
1555c6fd2807SJeff Garzik 	 *
1556c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1557c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1558c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1559c6fd2807SJeff Garzik 	 *
1560c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1561c6fd2807SJeff Garzik 	 */
1562c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1563c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1564c6fd2807SJeff Garzik 		ata_port_probe(ap);
1565c6fd2807SJeff Garzik 	}
1566c6fd2807SJeff Garzik 
1567c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1568c6fd2807SJeff Garzik 
1569c6fd2807SJeff Garzik 	return err_mask;
1570c6fd2807SJeff Garzik }
1571c6fd2807SJeff Garzik 
1572c6fd2807SJeff Garzik /**
157333480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
15742432697bSTejun Heo  *	@dev: Device to which the command is sent
15752432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
15762432697bSTejun Heo  *	@cdb: CDB for packet command
15772432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
15782432697bSTejun Heo  *	@buf: Data buffer of the command
15792432697bSTejun Heo  *	@buflen: Length of data buffer
15802b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
15812432697bSTejun Heo  *
15822432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
15832432697bSTejun Heo  *	buffer instead of sg list.
15842432697bSTejun Heo  *
15852432697bSTejun Heo  *	LOCKING:
15862432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
15872432697bSTejun Heo  *
15882432697bSTejun Heo  *	RETURNS:
15892432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
15902432697bSTejun Heo  */
15912432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
15922432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
15932b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
15942b789108STejun Heo 			   unsigned long timeout)
15952432697bSTejun Heo {
159633480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
159733480a0eSTejun Heo 	unsigned int n_elem = 0;
15982432697bSTejun Heo 
159933480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
160033480a0eSTejun Heo 		WARN_ON(!buf);
16012432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
160233480a0eSTejun Heo 		psg = &sg;
160333480a0eSTejun Heo 		n_elem++;
160433480a0eSTejun Heo 	}
16052432697bSTejun Heo 
16062b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
16072b789108STejun Heo 				    timeout);
16082432697bSTejun Heo }
16092432697bSTejun Heo 
16102432697bSTejun Heo /**
1611c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1612c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1613c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1614c6fd2807SJeff Garzik  *
1615c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1616c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1617c6fd2807SJeff Garzik  *
1618c6fd2807SJeff Garzik  *	LOCKING:
1619c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1620c6fd2807SJeff Garzik  *
1621c6fd2807SJeff Garzik  *	RETURNS:
1622c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1623c6fd2807SJeff Garzik  */
1624c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1625c6fd2807SJeff Garzik {
1626c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1627c6fd2807SJeff Garzik 
1628c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1629c6fd2807SJeff Garzik 
1630c6fd2807SJeff Garzik 	tf.command = cmd;
1631c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1632c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1633c6fd2807SJeff Garzik 
16342b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1635c6fd2807SJeff Garzik }
1636c6fd2807SJeff Garzik 
1637c6fd2807SJeff Garzik /**
1638c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1639c6fd2807SJeff Garzik  *	@adev: ATA device
1640c6fd2807SJeff Garzik  *
1641c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1642c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1643c6fd2807SJeff Garzik  */
1644c6fd2807SJeff Garzik 
1645c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1646c6fd2807SJeff Garzik {
1647432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1648432729f0SAlan Cox 	   as the caller should know this */
16499af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1650c6fd2807SJeff Garzik 		return 0;
1651432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1652432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1653c6fd2807SJeff Garzik 		return 1;
1654432729f0SAlan Cox 	/* We turn it on when possible */
1655432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1656432729f0SAlan Cox 		return 1;
1657432729f0SAlan Cox 	return 0;
1658432729f0SAlan Cox }
1659c6fd2807SJeff Garzik 
1660432729f0SAlan Cox /**
1661432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1662432729f0SAlan Cox  *	@adev: ATA device
1663432729f0SAlan Cox  *
1664432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1665432729f0SAlan Cox  *	-1 if no iordy mode is available.
1666432729f0SAlan Cox  */
1667432729f0SAlan Cox 
1668432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1669432729f0SAlan Cox {
1670c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1671c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1672432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1673c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1674c6fd2807SJeff Garzik 		if (pio) {
1675c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1676c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1677432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1678432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1679c6fd2807SJeff Garzik 		}
1680c6fd2807SJeff Garzik 	}
1681432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1682c6fd2807SJeff Garzik }
1683c6fd2807SJeff Garzik 
1684c6fd2807SJeff Garzik /**
1685c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1686c6fd2807SJeff Garzik  *	@dev: target device
1687c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1688bff04647STejun Heo  *	@flags: ATA_READID_* flags
1689c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1690c6fd2807SJeff Garzik  *
1691c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1692c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1693c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1694c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1695c6fd2807SJeff Garzik  *
169650a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
169750a99018SAlan Cox  *	now we abort if we hit that case.
169850a99018SAlan Cox  *
1699c6fd2807SJeff Garzik  *	LOCKING:
1700c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1701c6fd2807SJeff Garzik  *
1702c6fd2807SJeff Garzik  *	RETURNS:
1703c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1704c6fd2807SJeff Garzik  */
1705c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1706bff04647STejun Heo 		    unsigned int flags, u16 *id)
1707c6fd2807SJeff Garzik {
17089af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1709c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1710c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1711c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1712c6fd2807SJeff Garzik 	const char *reason;
171354936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1714c6fd2807SJeff Garzik 	int rc;
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
171744877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1718c6fd2807SJeff Garzik 
1719c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1720c6fd2807SJeff Garzik  retry:
1721c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1722c6fd2807SJeff Garzik 
1723c6fd2807SJeff Garzik 	switch (class) {
1724c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1725c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1726c6fd2807SJeff Garzik 		break;
1727c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1728c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1729c6fd2807SJeff Garzik 		break;
1730c6fd2807SJeff Garzik 	default:
1731c6fd2807SJeff Garzik 		rc = -ENODEV;
1732c6fd2807SJeff Garzik 		reason = "unsupported class";
1733c6fd2807SJeff Garzik 		goto err_out;
1734c6fd2807SJeff Garzik 	}
1735c6fd2807SJeff Garzik 
1736c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
173781afe893STejun Heo 
173881afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
173981afe893STejun Heo 	 * sure those are properly initialized.
174081afe893STejun Heo 	 */
174181afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
174281afe893STejun Heo 
174381afe893STejun Heo 	/* Device presence detection is unreliable on some
174481afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
174581afe893STejun Heo 	 */
174681afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1747c6fd2807SJeff Garzik 
1748c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
17492b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1750c6fd2807SJeff Garzik 	if (err_mask) {
1751800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
175255a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
175344877b4eSTejun Heo 				ap->print_id, dev->devno);
175455a8e2c8STejun Heo 			return -ENOENT;
175555a8e2c8STejun Heo 		}
175655a8e2c8STejun Heo 
175754936f8bSTejun Heo 		/* Device or controller might have reported the wrong
175854936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
175954936f8bSTejun Heo 		 * the current one is aborted by the device.
176054936f8bSTejun Heo 		 */
176154936f8bSTejun Heo 		if (may_fallback &&
176254936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
176354936f8bSTejun Heo 			may_fallback = 0;
176454936f8bSTejun Heo 
176554936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
176654936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
176754936f8bSTejun Heo 			else
176854936f8bSTejun Heo 				class = ATA_DEV_ATA;
176954936f8bSTejun Heo 			goto retry;
177054936f8bSTejun Heo 		}
177154936f8bSTejun Heo 
1772c6fd2807SJeff Garzik 		rc = -EIO;
1773c6fd2807SJeff Garzik 		reason = "I/O error";
1774c6fd2807SJeff Garzik 		goto err_out;
1775c6fd2807SJeff Garzik 	}
1776c6fd2807SJeff Garzik 
177754936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
177854936f8bSTejun Heo 	 * successfully at least once.
177954936f8bSTejun Heo 	 */
178054936f8bSTejun Heo 	may_fallback = 0;
178154936f8bSTejun Heo 
1782c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1783c6fd2807SJeff Garzik 
1784c6fd2807SJeff Garzik 	/* sanity check */
1785c6fd2807SJeff Garzik 	rc = -EINVAL;
17866070068bSAlan Cox 	reason = "device reports invalid type";
17874a3381feSJeff Garzik 
17884a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
17894a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
17904a3381feSJeff Garzik 			goto err_out;
17914a3381feSJeff Garzik 	} else {
17924a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1793c6fd2807SJeff Garzik 			goto err_out;
1794c6fd2807SJeff Garzik 	}
1795c6fd2807SJeff Garzik 
1796169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1797169439c2SMark Lord 		tried_spinup = 1;
1798169439c2SMark Lord 		/*
1799169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1800169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1801169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1802169439c2SMark Lord 		 */
1803218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1804fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1805169439c2SMark Lord 			rc = -EIO;
1806169439c2SMark Lord 			reason = "SPINUP failed";
1807169439c2SMark Lord 			goto err_out;
1808169439c2SMark Lord 		}
1809169439c2SMark Lord 		/*
1810169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1811169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1812169439c2SMark Lord 		 */
1813169439c2SMark Lord 		if (id[2] == 0x37c8)
1814169439c2SMark Lord 			goto retry;
1815169439c2SMark Lord 	}
1816169439c2SMark Lord 
1817bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1818c6fd2807SJeff Garzik 		/*
1819c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1820c6fd2807SJeff Garzik 		 * SRST RESET
182150a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
182250a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1823c6fd2807SJeff Garzik 		 * anything else..
1824c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
182550a99018SAlan Cox 		 *
182650a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
182750a99018SAlan Cox 		 * shoud never trigger.
1828c6fd2807SJeff Garzik 		 */
1829c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1830c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1831c6fd2807SJeff Garzik 			if (err_mask) {
1832c6fd2807SJeff Garzik 				rc = -EIO;
1833c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
1834c6fd2807SJeff Garzik 				goto err_out;
1835c6fd2807SJeff Garzik 			}
1836c6fd2807SJeff Garzik 
1837c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
1838c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
1839c6fd2807SJeff Garzik 			 */
1840bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
1841c6fd2807SJeff Garzik 			goto retry;
1842c6fd2807SJeff Garzik 		}
1843c6fd2807SJeff Garzik 	}
1844c6fd2807SJeff Garzik 
1845c6fd2807SJeff Garzik 	*p_class = class;
1846c6fd2807SJeff Garzik 
1847c6fd2807SJeff Garzik 	return 0;
1848c6fd2807SJeff Garzik 
1849c6fd2807SJeff Garzik  err_out:
1850c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
1851c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1852c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1853c6fd2807SJeff Garzik 	return rc;
1854c6fd2807SJeff Garzik }
1855c6fd2807SJeff Garzik 
1856c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
1857c6fd2807SJeff Garzik {
18589af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
18599af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1860c6fd2807SJeff Garzik }
1861c6fd2807SJeff Garzik 
1862c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
1863c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
1864c6fd2807SJeff Garzik {
18659af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1866c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1867c6fd2807SJeff Garzik 
1868c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
1869c6fd2807SJeff Garzik 		desc[0] = '\0';
1870c6fd2807SJeff Garzik 		return;
1871c6fd2807SJeff Garzik 	}
187275683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
18736919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
18746919a0a6SAlan Cox 		return;
18756919a0a6SAlan Cox 	}
1876c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
1877cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1878c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
1879c6fd2807SJeff Garzik 	}
1880c6fd2807SJeff Garzik 
1881c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
1882c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1883c6fd2807SJeff Garzik 	else
1884c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1885c6fd2807SJeff Garzik }
1886c6fd2807SJeff Garzik 
1887c6fd2807SJeff Garzik /**
1888c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
1889c6fd2807SJeff Garzik  *	@dev: Target device to configure
1890c6fd2807SJeff Garzik  *
1891c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
1892c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
1893c6fd2807SJeff Garzik  *
1894c6fd2807SJeff Garzik  *	LOCKING:
1895c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1896c6fd2807SJeff Garzik  *
1897c6fd2807SJeff Garzik  *	RETURNS:
1898c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
1899c6fd2807SJeff Garzik  */
1900efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
1901c6fd2807SJeff Garzik {
19029af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
19039af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
19046746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1905c6fd2807SJeff Garzik 	const u16 *id = dev->id;
1906c6fd2807SJeff Garzik 	unsigned int xfer_mask;
1907b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
19083f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
19093f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
1910c6fd2807SJeff Garzik 	int rc;
1911c6fd2807SJeff Garzik 
1912c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
191344877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
191444877b4eSTejun Heo 			       __FUNCTION__);
1915c6fd2807SJeff Garzik 		return 0;
1916c6fd2807SJeff Garzik 	}
1917c6fd2807SJeff Garzik 
1918c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
191944877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1920c6fd2807SJeff Garzik 
192175683fe7STejun Heo 	/* set horkage */
192275683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
192375683fe7STejun Heo 
19246746544cSTejun Heo 	/* let ACPI work its magic */
19256746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
19266746544cSTejun Heo 	if (rc)
19276746544cSTejun Heo 		return rc;
192808573a86SKristen Carlson Accardi 
192905027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
193005027adcSTejun Heo 	rc = ata_hpa_resize(dev);
193105027adcSTejun Heo 	if (rc)
193205027adcSTejun Heo 		return rc;
193305027adcSTejun Heo 
1934c6fd2807SJeff Garzik 	/* print device capabilities */
1935c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1936c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
1937c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1938c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1939c6fd2807SJeff Garzik 			       __FUNCTION__,
1940c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
1941c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
1942c6fd2807SJeff Garzik 
1943c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
1944c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1945c6fd2807SJeff Garzik 	dev->max_sectors = 0;
1946c6fd2807SJeff Garzik 	dev->cdb_len = 0;
1947c6fd2807SJeff Garzik 	dev->n_sectors = 0;
1948c6fd2807SJeff Garzik 	dev->cylinders = 0;
1949c6fd2807SJeff Garzik 	dev->heads = 0;
1950c6fd2807SJeff Garzik 	dev->sectors = 0;
1951c6fd2807SJeff Garzik 
1952c6fd2807SJeff Garzik 	/*
1953c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
1954c6fd2807SJeff Garzik 	 */
1955c6fd2807SJeff Garzik 
1956c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
1957c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
1958c6fd2807SJeff Garzik 
1959c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
1960c6fd2807SJeff Garzik 		ata_dump_id(id);
1961c6fd2807SJeff Garzik 
1962ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1963ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1964ef143d57SAlbert Lee 			sizeof(fwrevbuf));
1965ef143d57SAlbert Lee 
1966ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1967ef143d57SAlbert Lee 			sizeof(modelbuf));
1968ef143d57SAlbert Lee 
1969c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
1970c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
1971b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
1972b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
197344877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
197444877b4eSTejun Heo 					       "supports DRM functions and may "
197544877b4eSTejun Heo 					       "not be fully accessable.\n");
1976b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
19772dcb407eSJeff Garzik 		} else
1978b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1979b352e57dSAlan Cox 
1980c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
1981c6fd2807SJeff Garzik 
19823f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
19833f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
19843f64f565SEric D. Mudama 
1985c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
1986c6fd2807SJeff Garzik 			const char *lba_desc;
1987c6fd2807SJeff Garzik 			char ncq_desc[20];
1988c6fd2807SJeff Garzik 
1989c6fd2807SJeff Garzik 			lba_desc = "LBA";
1990c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
1991c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
1992c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
1993c6fd2807SJeff Garzik 				lba_desc = "LBA48";
19946fc49adbSTejun Heo 
19956fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
19966fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
19976fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1998c6fd2807SJeff Garzik 			}
1999c6fd2807SJeff Garzik 
2000c6fd2807SJeff Garzik 			/* config NCQ */
2001c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2002c6fd2807SJeff Garzik 
2003c6fd2807SJeff Garzik 			/* print device info to dmesg */
20043f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
20053f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20063f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
20073f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
20083f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
20093f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20103f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2011c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
20123f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
20133f64f565SEric D. Mudama 			}
2014c6fd2807SJeff Garzik 		} else {
2015c6fd2807SJeff Garzik 			/* CHS */
2016c6fd2807SJeff Garzik 
2017c6fd2807SJeff Garzik 			/* Default translation */
2018c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2019c6fd2807SJeff Garzik 			dev->heads	= id[3];
2020c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2023c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2024c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2025c6fd2807SJeff Garzik 				dev->heads     = id[55];
2026c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2027c6fd2807SJeff Garzik 			}
2028c6fd2807SJeff Garzik 
2029c6fd2807SJeff Garzik 			/* print device info to dmesg */
20303f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2031c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
20323f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
20333f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
20343f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
20353f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
20363f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
20373f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
20383f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
20393f64f565SEric D. Mudama 					dev->heads, dev->sectors);
20403f64f565SEric D. Mudama 			}
2041c6fd2807SJeff Garzik 		}
2042c6fd2807SJeff Garzik 
2043c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2044c6fd2807SJeff Garzik 	}
2045c6fd2807SJeff Garzik 
2046c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2047c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2048854c73a2STejun Heo 		const char *cdb_intr_string = "";
2049854c73a2STejun Heo 		const char *atapi_an_string = "";
20507d77b247STejun Heo 		u32 sntf;
2051c6fd2807SJeff Garzik 
2052c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2053c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2054c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2055c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2056c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2057c6fd2807SJeff Garzik 			rc = -EINVAL;
2058c6fd2807SJeff Garzik 			goto err_out_nosup;
2059c6fd2807SJeff Garzik 		}
2060c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2061c6fd2807SJeff Garzik 
20627d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
20637d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
20647d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
20657d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
20669f45cbd3SKristen Carlson Accardi 		 */
20677d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
20687d77b247STejun Heo 		    (!ap->nr_pmp_links ||
20697d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2070854c73a2STejun Heo 			unsigned int err_mask;
2071854c73a2STejun Heo 
20729f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2073218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2074218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2075854c73a2STejun Heo 			if (err_mask)
20769f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2077854c73a2STejun Heo 					"failed to enable ATAPI AN "
2078854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2079854c73a2STejun Heo 			else {
20809f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2081854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2082854c73a2STejun Heo 			}
20839f45cbd3SKristen Carlson Accardi 		}
20849f45cbd3SKristen Carlson Accardi 
2085c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2086c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2087c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2088c6fd2807SJeff Garzik 		}
2089c6fd2807SJeff Garzik 
2090c6fd2807SJeff Garzik 		/* print device info to dmesg */
2091c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2092ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2093854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2094ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2095c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2096854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2097c6fd2807SJeff Garzik 	}
2098c6fd2807SJeff Garzik 
2099914ed354STejun Heo 	/* determine max_sectors */
2100914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2101914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2102914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2103914ed354STejun Heo 
210493590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
210593590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
210693590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
210793590859SAlan Cox 		   idiot */
210893590859SAlan Cox 		if (print_info) {
210993590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
211093590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
211193590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
211293590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
211393590859SAlan Cox 		}
211493590859SAlan Cox 	}
211593590859SAlan Cox 
2116c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2117c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2118c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2119c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2120c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2121c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2122c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2123c6fd2807SJeff Garzik 	}
2124c6fd2807SJeff Garzik 
212575683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
212603ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
212703ec52deSTejun Heo 					 dev->max_sectors);
212818d6e9d5SAlbert Lee 
2129c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2130cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2131c6fd2807SJeff Garzik 
2132c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2133c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2134c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2135c6fd2807SJeff Garzik 	return 0;
2136c6fd2807SJeff Garzik 
2137c6fd2807SJeff Garzik err_out_nosup:
2138c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2139c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2140c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2141c6fd2807SJeff Garzik 	return rc;
2142c6fd2807SJeff Garzik }
2143c6fd2807SJeff Garzik 
2144c6fd2807SJeff Garzik /**
21452e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2146be0d18dfSAlan Cox  *	@ap: port
2147be0d18dfSAlan Cox  *
21482e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2149be0d18dfSAlan Cox  *	detection.
2150be0d18dfSAlan Cox  */
2151be0d18dfSAlan Cox 
2152be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2153be0d18dfSAlan Cox {
2154be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2155be0d18dfSAlan Cox }
2156be0d18dfSAlan Cox 
2157be0d18dfSAlan Cox /**
21582e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2159be0d18dfSAlan Cox  *	@ap: port
2160be0d18dfSAlan Cox  *
21612e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2162be0d18dfSAlan Cox  *	detection.
2163be0d18dfSAlan Cox  */
2164be0d18dfSAlan Cox 
2165be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2166be0d18dfSAlan Cox {
2167be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2168be0d18dfSAlan Cox }
2169be0d18dfSAlan Cox 
2170be0d18dfSAlan Cox /**
2171be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2172be0d18dfSAlan Cox  *	@ap: port
2173be0d18dfSAlan Cox  *
2174be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2175be0d18dfSAlan Cox  */
2176be0d18dfSAlan Cox 
2177be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2178be0d18dfSAlan Cox {
2179be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2180be0d18dfSAlan Cox }
2181be0d18dfSAlan Cox 
2182be0d18dfSAlan Cox /**
2183be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2184be0d18dfSAlan Cox  *	@ap: port
2185be0d18dfSAlan Cox  *
2186be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2187be0d18dfSAlan Cox  */
2188be0d18dfSAlan Cox 
2189be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2190be0d18dfSAlan Cox {
2191be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2192be0d18dfSAlan Cox }
2193be0d18dfSAlan Cox 
2194be0d18dfSAlan Cox /**
2195c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2196c6fd2807SJeff Garzik  *	@ap: Bus to probe
2197c6fd2807SJeff Garzik  *
2198c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2199c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2200c6fd2807SJeff Garzik  *	the bus.
2201c6fd2807SJeff Garzik  *
2202c6fd2807SJeff Garzik  *	LOCKING:
2203c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2204c6fd2807SJeff Garzik  *
2205c6fd2807SJeff Garzik  *	RETURNS:
2206c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2207c6fd2807SJeff Garzik  */
2208c6fd2807SJeff Garzik 
2209c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2210c6fd2807SJeff Garzik {
2211c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2212c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2213f58229f8STejun Heo 	int rc;
2214c6fd2807SJeff Garzik 	struct ata_device *dev;
2215c6fd2807SJeff Garzik 
2216c6fd2807SJeff Garzik 	ata_port_probe(ap);
2217c6fd2807SJeff Garzik 
2218f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2219f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2220c6fd2807SJeff Garzik 
2221c6fd2807SJeff Garzik  retry:
2222c6fd2807SJeff Garzik 	/* reset and determine device classes */
2223c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2224c6fd2807SJeff Garzik 
2225f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2226c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2227c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2228c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2229c6fd2807SJeff Garzik 		else
2230c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2231c6fd2807SJeff Garzik 
2232c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2233c6fd2807SJeff Garzik 	}
2234c6fd2807SJeff Garzik 
2235c6fd2807SJeff Garzik 	ata_port_probe(ap);
2236c6fd2807SJeff Garzik 
2237c6fd2807SJeff Garzik 	/* after the reset the device state is PIO 0 and the controller
2238c6fd2807SJeff Garzik 	   state is undefined. Record the mode */
2239c6fd2807SJeff Garzik 
2240f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2241f58229f8STejun Heo 		dev->pio_mode = XFER_PIO_0;
2242c6fd2807SJeff Garzik 
2243f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2244f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2245f31f0cc2SJeff Garzik 	   the slave device */
2246f31f0cc2SJeff Garzik 
2247f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2248f58229f8STejun Heo 		if (tries[dev->devno])
2249f58229f8STejun Heo 			dev->class = classes[dev->devno];
2250c6fd2807SJeff Garzik 
2251c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2252c6fd2807SJeff Garzik 			continue;
2253c6fd2807SJeff Garzik 
2254bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2255bff04647STejun Heo 				     dev->id);
2256c6fd2807SJeff Garzik 		if (rc)
2257c6fd2807SJeff Garzik 			goto fail;
2258f31f0cc2SJeff Garzik 	}
2259f31f0cc2SJeff Garzik 
2260be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2261be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2262be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2263be0d18dfSAlan Cox 
2264614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2265614fe29bSAlan Cox 	   reported cable types and sensed types */
2266614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2267614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2268614fe29bSAlan Cox 			continue;
2269614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2270614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2271614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2272614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2273614fe29bSAlan Cox 	}
2274614fe29bSAlan Cox 
2275f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2276f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2277f31f0cc2SJeff Garzik 
2278f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2279f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2280f31f0cc2SJeff Garzik 			continue;
2281c6fd2807SJeff Garzik 
22829af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2283efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
22849af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2285c6fd2807SJeff Garzik 		if (rc)
2286c6fd2807SJeff Garzik 			goto fail;
2287c6fd2807SJeff Garzik 	}
2288c6fd2807SJeff Garzik 
2289c6fd2807SJeff Garzik 	/* configure transfer mode */
22900260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
22914ae72a1eSTejun Heo 	if (rc)
2292c6fd2807SJeff Garzik 		goto fail;
2293c6fd2807SJeff Garzik 
2294f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2295f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2296c6fd2807SJeff Garzik 			return 0;
2297c6fd2807SJeff Garzik 
2298c6fd2807SJeff Garzik 	/* no device present, disable port */
2299c6fd2807SJeff Garzik 	ata_port_disable(ap);
2300c6fd2807SJeff Garzik 	return -ENODEV;
2301c6fd2807SJeff Garzik 
2302c6fd2807SJeff Garzik  fail:
23034ae72a1eSTejun Heo 	tries[dev->devno]--;
23044ae72a1eSTejun Heo 
2305c6fd2807SJeff Garzik 	switch (rc) {
2306c6fd2807SJeff Garzik 	case -EINVAL:
23074ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2308c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2309c6fd2807SJeff Garzik 		break;
23104ae72a1eSTejun Heo 
23114ae72a1eSTejun Heo 	case -ENODEV:
23124ae72a1eSTejun Heo 		/* give it just one more chance */
23134ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2314c6fd2807SJeff Garzik 	case -EIO:
23154ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
23164ae72a1eSTejun Heo 			/* This is the last chance, better to slow
23174ae72a1eSTejun Heo 			 * down than lose it.
23184ae72a1eSTejun Heo 			 */
2319936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
23204ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
23214ae72a1eSTejun Heo 		}
2322c6fd2807SJeff Garzik 	}
2323c6fd2807SJeff Garzik 
23244ae72a1eSTejun Heo 	if (!tries[dev->devno])
2325c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2326c6fd2807SJeff Garzik 
2327c6fd2807SJeff Garzik 	goto retry;
2328c6fd2807SJeff Garzik }
2329c6fd2807SJeff Garzik 
2330c6fd2807SJeff Garzik /**
2331c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2332c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2333c6fd2807SJeff Garzik  *
2334c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2335c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2336c6fd2807SJeff Garzik  *
2337cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2338c6fd2807SJeff Garzik  *	serialization.
2339c6fd2807SJeff Garzik  */
2340c6fd2807SJeff Garzik 
2341c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2342c6fd2807SJeff Garzik {
2343c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2344c6fd2807SJeff Garzik }
2345c6fd2807SJeff Garzik 
2346c6fd2807SJeff Garzik /**
2347c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2348936fd732STejun Heo  *	@link: SATA link to printk link status about
2349c6fd2807SJeff Garzik  *
2350c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2351c6fd2807SJeff Garzik  *
2352c6fd2807SJeff Garzik  *	LOCKING:
2353c6fd2807SJeff Garzik  *	None.
2354c6fd2807SJeff Garzik  */
2355936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2356c6fd2807SJeff Garzik {
2357c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2358c6fd2807SJeff Garzik 
2359936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2360c6fd2807SJeff Garzik 		return;
2361936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2362c6fd2807SJeff Garzik 
2363936fd732STejun Heo 	if (ata_link_online(link)) {
2364c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2365936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2366c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2367c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2368c6fd2807SJeff Garzik 	} else {
2369936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2370c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2371c6fd2807SJeff Garzik 				sstatus, scontrol);
2372c6fd2807SJeff Garzik 	}
2373c6fd2807SJeff Garzik }
2374c6fd2807SJeff Garzik 
2375c6fd2807SJeff Garzik /**
2376c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2377c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2378c6fd2807SJeff Garzik  *
2379c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2380c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2381c6fd2807SJeff Garzik  *	clear any reset condition.
2382c6fd2807SJeff Garzik  *
2383c6fd2807SJeff Garzik  *	LOCKING:
2384c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2385c6fd2807SJeff Garzik  *
2386c6fd2807SJeff Garzik  */
2387c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2388c6fd2807SJeff Garzik {
2389936fd732STejun Heo 	struct ata_link *link = &ap->link;
2390c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2391936fd732STejun Heo 	u32 sstatus;
2392c6fd2807SJeff Garzik 
2393c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2394c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2395936fd732STejun Heo 		sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2396c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2397c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2398c6fd2807SJeff Garzik 		mdelay(1);
2399c6fd2807SJeff Garzik 	}
2400c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2401936fd732STejun Heo 	sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2402c6fd2807SJeff Garzik 
2403c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2404c6fd2807SJeff Garzik 	do {
2405c6fd2807SJeff Garzik 		msleep(200);
2406936fd732STejun Heo 		sata_scr_read(link, SCR_STATUS, &sstatus);
2407c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2408c6fd2807SJeff Garzik 			break;
2409c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2410c6fd2807SJeff Garzik 
2411c6fd2807SJeff Garzik 	/* print link status */
2412936fd732STejun Heo 	sata_print_link_status(link);
2413c6fd2807SJeff Garzik 
2414c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2415936fd732STejun Heo 	if (!ata_link_offline(link))
2416c6fd2807SJeff Garzik 		ata_port_probe(ap);
2417c6fd2807SJeff Garzik 	else
2418c6fd2807SJeff Garzik 		ata_port_disable(ap);
2419c6fd2807SJeff Garzik 
2420c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2421c6fd2807SJeff Garzik 		return;
2422c6fd2807SJeff Garzik 
2423c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2424c6fd2807SJeff Garzik 		ata_port_disable(ap);
2425c6fd2807SJeff Garzik 		return;
2426c6fd2807SJeff Garzik 	}
2427c6fd2807SJeff Garzik 
2428c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2429c6fd2807SJeff Garzik }
2430c6fd2807SJeff Garzik 
2431c6fd2807SJeff Garzik /**
2432c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2433c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2434c6fd2807SJeff Garzik  *
2435c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2436c6fd2807SJeff Garzik  *	the bus for devices.
2437c6fd2807SJeff Garzik  *
2438c6fd2807SJeff Garzik  *	LOCKING:
2439c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2440c6fd2807SJeff Garzik  *
2441c6fd2807SJeff Garzik  */
2442c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2443c6fd2807SJeff Garzik {
2444c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2445c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2446c6fd2807SJeff Garzik 		return;
2447c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2448c6fd2807SJeff Garzik }
2449c6fd2807SJeff Garzik 
2450c6fd2807SJeff Garzik /**
2451c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2452c6fd2807SJeff Garzik  *	@adev: device
2453c6fd2807SJeff Garzik  *
2454c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2455c6fd2807SJeff Garzik  *	present NULL is returned
2456c6fd2807SJeff Garzik  */
2457c6fd2807SJeff Garzik 
2458c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2459c6fd2807SJeff Garzik {
24609af5c9c9STejun Heo 	struct ata_link *link = adev->link;
24619af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2462c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2463c6fd2807SJeff Garzik 		return NULL;
2464c6fd2807SJeff Garzik 	return pair;
2465c6fd2807SJeff Garzik }
2466c6fd2807SJeff Garzik 
2467c6fd2807SJeff Garzik /**
2468c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2469c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2470c6fd2807SJeff Garzik  *
2471c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2472c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2473c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2474c6fd2807SJeff Garzik  *	on this port.
2475c6fd2807SJeff Garzik  *
2476cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2477c6fd2807SJeff Garzik  *	serialization.
2478c6fd2807SJeff Garzik  */
2479c6fd2807SJeff Garzik 
2480c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2481c6fd2807SJeff Garzik {
24829af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
24839af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2484c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2485c6fd2807SJeff Garzik }
2486c6fd2807SJeff Garzik 
2487c6fd2807SJeff Garzik /**
2488c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2489936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2490c6fd2807SJeff Garzik  *
2491936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2492c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2493c6fd2807SJeff Garzik  *	using sata_set_spd().
2494c6fd2807SJeff Garzik  *
2495c6fd2807SJeff Garzik  *	LOCKING:
2496c6fd2807SJeff Garzik  *	Inherited from caller.
2497c6fd2807SJeff Garzik  *
2498c6fd2807SJeff Garzik  *	RETURNS:
2499c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2500c6fd2807SJeff Garzik  */
2501936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2502c6fd2807SJeff Garzik {
2503c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2504c6fd2807SJeff Garzik 	int rc, highbit;
2505c6fd2807SJeff Garzik 
2506936fd732STejun Heo 	if (!sata_scr_valid(link))
2507008a7896STejun Heo 		return -EOPNOTSUPP;
2508008a7896STejun Heo 
2509008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2510936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2511008a7896STejun Heo 	 */
2512936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2513008a7896STejun Heo 	if (rc == 0)
2514008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2515008a7896STejun Heo 	else
2516936fd732STejun Heo 		spd = link->sata_spd;
2517c6fd2807SJeff Garzik 
2518936fd732STejun Heo 	mask = link->sata_spd_limit;
2519c6fd2807SJeff Garzik 	if (mask <= 1)
2520c6fd2807SJeff Garzik 		return -EINVAL;
2521008a7896STejun Heo 
2522008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2523c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2524c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2525c6fd2807SJeff Garzik 
2526008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2527008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2528008a7896STejun Heo 	 */
2529008a7896STejun Heo 	if (spd > 1)
2530008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2531008a7896STejun Heo 	else
2532008a7896STejun Heo 		mask &= 1;
2533008a7896STejun Heo 
2534008a7896STejun Heo 	/* were we already at the bottom? */
2535c6fd2807SJeff Garzik 	if (!mask)
2536c6fd2807SJeff Garzik 		return -EINVAL;
2537c6fd2807SJeff Garzik 
2538936fd732STejun Heo 	link->sata_spd_limit = mask;
2539c6fd2807SJeff Garzik 
2540936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2541c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2542c6fd2807SJeff Garzik 
2543c6fd2807SJeff Garzik 	return 0;
2544c6fd2807SJeff Garzik }
2545c6fd2807SJeff Garzik 
2546936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2547c6fd2807SJeff Garzik {
2548c6fd2807SJeff Garzik 	u32 spd, limit;
2549c6fd2807SJeff Garzik 
2550936fd732STejun Heo 	if (link->sata_spd_limit == UINT_MAX)
2551c6fd2807SJeff Garzik 		limit = 0;
2552c6fd2807SJeff Garzik 	else
2553936fd732STejun Heo 		limit = fls(link->sata_spd_limit);
2554c6fd2807SJeff Garzik 
2555c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
2556c6fd2807SJeff Garzik 	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2557c6fd2807SJeff Garzik 
2558c6fd2807SJeff Garzik 	return spd != limit;
2559c6fd2807SJeff Garzik }
2560c6fd2807SJeff Garzik 
2561c6fd2807SJeff Garzik /**
2562c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2563936fd732STejun Heo  *	@link: Link in question
2564c6fd2807SJeff Garzik  *
2565c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2566936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2567c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2568c6fd2807SJeff Garzik  *	configuration.
2569c6fd2807SJeff Garzik  *
2570c6fd2807SJeff Garzik  *	LOCKING:
2571c6fd2807SJeff Garzik  *	Inherited from caller.
2572c6fd2807SJeff Garzik  *
2573c6fd2807SJeff Garzik  *	RETURNS:
2574c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2575c6fd2807SJeff Garzik  */
2576936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2577c6fd2807SJeff Garzik {
2578c6fd2807SJeff Garzik 	u32 scontrol;
2579c6fd2807SJeff Garzik 
2580936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2581c6fd2807SJeff Garzik 		return 0;
2582c6fd2807SJeff Garzik 
2583936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2584c6fd2807SJeff Garzik }
2585c6fd2807SJeff Garzik 
2586c6fd2807SJeff Garzik /**
2587c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2588936fd732STejun Heo  *	@link: Link to set SATA spd for
2589c6fd2807SJeff Garzik  *
2590936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2591c6fd2807SJeff Garzik  *
2592c6fd2807SJeff Garzik  *	LOCKING:
2593c6fd2807SJeff Garzik  *	Inherited from caller.
2594c6fd2807SJeff Garzik  *
2595c6fd2807SJeff Garzik  *	RETURNS:
2596c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2597c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2598c6fd2807SJeff Garzik  */
2599936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2600c6fd2807SJeff Garzik {
2601c6fd2807SJeff Garzik 	u32 scontrol;
2602c6fd2807SJeff Garzik 	int rc;
2603c6fd2807SJeff Garzik 
2604936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2605c6fd2807SJeff Garzik 		return rc;
2606c6fd2807SJeff Garzik 
2607936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2608c6fd2807SJeff Garzik 		return 0;
2609c6fd2807SJeff Garzik 
2610936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2611c6fd2807SJeff Garzik 		return rc;
2612c6fd2807SJeff Garzik 
2613c6fd2807SJeff Garzik 	return 1;
2614c6fd2807SJeff Garzik }
2615c6fd2807SJeff Garzik 
2616c6fd2807SJeff Garzik /*
2617c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2618c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2619c6fd2807SJeff Garzik  */
2620c6fd2807SJeff Garzik /*
2621b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2622c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2623b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2624b352e57dSAlan Cox  *
2625b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2626c6fd2807SJeff Garzik  */
2627c6fd2807SJeff Garzik 
2628c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2629c6fd2807SJeff Garzik 
2630c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2631c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2632c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2633c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2634c6fd2807SJeff Garzik 
2635b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2636b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2637c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2638c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2639c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2640c6fd2807SJeff Garzik 
2641c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2642c6fd2807SJeff Garzik 
2643c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2644c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2645c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2646c6fd2807SJeff Garzik 
2647c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2648c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2649c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2650c6fd2807SJeff Garzik 
2651b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2652b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2653c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2654c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2655c6fd2807SJeff Garzik 
2656c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2657c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2658c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2659c6fd2807SJeff Garzik 
2660c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2661c6fd2807SJeff Garzik 
2662c6fd2807SJeff Garzik 	{ 0xFF }
2663c6fd2807SJeff Garzik };
2664c6fd2807SJeff Garzik 
2665c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2666c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2667c6fd2807SJeff Garzik 
2668c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2669c6fd2807SJeff Garzik {
2670c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2671c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2672c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2673c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2674c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2675c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2676c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2677c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2678c6fd2807SJeff Garzik }
2679c6fd2807SJeff Garzik 
2680c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2681c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2682c6fd2807SJeff Garzik {
2683c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2684c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2685c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2686c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2687c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2688c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2689c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2690c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2691c6fd2807SJeff Garzik }
2692c6fd2807SJeff Garzik 
2693c6fd2807SJeff Garzik static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
2694c6fd2807SJeff Garzik {
2695c6fd2807SJeff Garzik 	const struct ata_timing *t;
2696c6fd2807SJeff Garzik 
2697c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2698c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2699c6fd2807SJeff Garzik 			return NULL;
2700c6fd2807SJeff Garzik 	return t;
2701c6fd2807SJeff Garzik }
2702c6fd2807SJeff Garzik 
2703c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2704c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2705c6fd2807SJeff Garzik {
2706c6fd2807SJeff Garzik 	const struct ata_timing *s;
2707c6fd2807SJeff Garzik 	struct ata_timing p;
2708c6fd2807SJeff Garzik 
2709c6fd2807SJeff Garzik 	/*
2710c6fd2807SJeff Garzik 	 * Find the mode.
2711c6fd2807SJeff Garzik 	 */
2712c6fd2807SJeff Garzik 
2713c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2714c6fd2807SJeff Garzik 		return -EINVAL;
2715c6fd2807SJeff Garzik 
2716c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2717c6fd2807SJeff Garzik 
2718c6fd2807SJeff Garzik 	/*
2719c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2720c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2721c6fd2807SJeff Garzik 	 */
2722c6fd2807SJeff Garzik 
2723c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2724c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2725c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2726c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2727c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2728c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2729c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2730c6fd2807SJeff Garzik 		}
2731c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2732c6fd2807SJeff Garzik 	}
2733c6fd2807SJeff Garzik 
2734c6fd2807SJeff Garzik 	/*
2735c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2736c6fd2807SJeff Garzik 	 */
2737c6fd2807SJeff Garzik 
2738c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2739c6fd2807SJeff Garzik 
2740c6fd2807SJeff Garzik 	/*
2741c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2742c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2743c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2744c6fd2807SJeff Garzik 	 */
2745c6fd2807SJeff Garzik 
2746fd3367afSAlan 	if (speed > XFER_PIO_6) {
2747c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2748c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2749c6fd2807SJeff Garzik 	}
2750c6fd2807SJeff Garzik 
2751c6fd2807SJeff Garzik 	/*
2752c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2753c6fd2807SJeff Garzik 	 */
2754c6fd2807SJeff Garzik 
2755c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2756c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2757c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2758c6fd2807SJeff Garzik 	}
2759c6fd2807SJeff Garzik 
2760c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2761c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2762c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2763c6fd2807SJeff Garzik 	}
27644f701d1eSAlan Cox 
27654f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
27664f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
27674f701d1eSAlan Cox 	   if so we must correct this */
27684f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
27694f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2770c6fd2807SJeff Garzik 
2771c6fd2807SJeff Garzik 	return 0;
2772c6fd2807SJeff Garzik }
2773c6fd2807SJeff Garzik 
2774c6fd2807SJeff Garzik /**
2775c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2776c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2777458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2778c6fd2807SJeff Garzik  *
2779c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2780c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2781c6fd2807SJeff Garzik  *	will apply the limit.
2782c6fd2807SJeff Garzik  *
2783c6fd2807SJeff Garzik  *	LOCKING:
2784c6fd2807SJeff Garzik  *	Inherited from caller.
2785c6fd2807SJeff Garzik  *
2786c6fd2807SJeff Garzik  *	RETURNS:
2787c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2788c6fd2807SJeff Garzik  */
2789458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2790c6fd2807SJeff Garzik {
2791458337dbSTejun Heo 	char buf[32];
2792458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2793458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2794458337dbSTejun Heo 	int quiet, highbit;
2795c6fd2807SJeff Garzik 
2796458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2797458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2798458337dbSTejun Heo 
2799458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2800458337dbSTejun Heo 						  dev->mwdma_mask,
2801c6fd2807SJeff Garzik 						  dev->udma_mask);
2802458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2803c6fd2807SJeff Garzik 
2804458337dbSTejun Heo 	switch (sel) {
2805458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2806458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2807458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2808458337dbSTejun Heo 		break;
2809458337dbSTejun Heo 
2810458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2811458337dbSTejun Heo 		if (udma_mask) {
2812458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2813458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2814458337dbSTejun Heo 			if (!udma_mask)
2815458337dbSTejun Heo 				return -ENOENT;
2816458337dbSTejun Heo 		} else if (mwdma_mask) {
2817458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2818458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2819458337dbSTejun Heo 			if (!mwdma_mask)
2820458337dbSTejun Heo 				return -ENOENT;
2821458337dbSTejun Heo 		}
2822458337dbSTejun Heo 		break;
2823458337dbSTejun Heo 
2824458337dbSTejun Heo 	case ATA_DNXFER_40C:
2825458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2826458337dbSTejun Heo 		break;
2827458337dbSTejun Heo 
2828458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2829458337dbSTejun Heo 		pio_mask &= 1;
2830458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2831458337dbSTejun Heo 		mwdma_mask = 0;
2832458337dbSTejun Heo 		udma_mask = 0;
2833458337dbSTejun Heo 		break;
2834458337dbSTejun Heo 
2835458337dbSTejun Heo 	default:
2836458337dbSTejun Heo 		BUG();
2837458337dbSTejun Heo 	}
2838458337dbSTejun Heo 
2839458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2840458337dbSTejun Heo 
2841458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2842458337dbSTejun Heo 		return -ENOENT;
2843458337dbSTejun Heo 
2844458337dbSTejun Heo 	if (!quiet) {
2845458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2846458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
2847458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
2848458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2849458337dbSTejun Heo 		else
2850458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
2851458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
2852458337dbSTejun Heo 
2853458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
2854458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
2855458337dbSTejun Heo 	}
2856c6fd2807SJeff Garzik 
2857c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2858c6fd2807SJeff Garzik 			    &dev->udma_mask);
2859c6fd2807SJeff Garzik 
2860c6fd2807SJeff Garzik 	return 0;
2861c6fd2807SJeff Garzik }
2862c6fd2807SJeff Garzik 
2863c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
2864c6fd2807SJeff Garzik {
28659af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
2866c6fd2807SJeff Garzik 	unsigned int err_mask;
2867c6fd2807SJeff Garzik 	int rc;
2868c6fd2807SJeff Garzik 
2869c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
2870c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
2871c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
2872c6fd2807SJeff Garzik 
2873c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
28742dcb407eSJeff Garzik 
287511750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
287611750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
287711750a40SAlan 		err_mask &= ~AC_ERR_DEV;
28782dcb407eSJeff Garzik 
28790bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
28800bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
28810bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
28820bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
28830bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
28842dcb407eSJeff Garzik 
28853acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
28863acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
28873acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
28883acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
28893acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
28903acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
28913acaf94bSAlan Cox 
2892c6fd2807SJeff Garzik 	if (err_mask) {
2893c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2894c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
2895c6fd2807SJeff Garzik 		return -EIO;
2896c6fd2807SJeff Garzik 	}
2897c6fd2807SJeff Garzik 
2898baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2899422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2900baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2901c6fd2807SJeff Garzik 	if (rc)
2902c6fd2807SJeff Garzik 		return rc;
2903c6fd2807SJeff Garzik 
2904c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2905c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
2906c6fd2807SJeff Garzik 
2907c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2908c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2909c6fd2807SJeff Garzik 	return 0;
2910c6fd2807SJeff Garzik }
2911c6fd2807SJeff Garzik 
2912c6fd2807SJeff Garzik /**
291304351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
29140260731fSTejun Heo  *	@link: link on which timings will be programmed
2915c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
2916c6fd2807SJeff Garzik  *
291704351821SAlan  *	Standard implementation of the function used to tune and set
291804351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
291904351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
2920c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
2921c6fd2807SJeff Garzik  *
2922c6fd2807SJeff Garzik  *	LOCKING:
2923c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2924c6fd2807SJeff Garzik  *
2925c6fd2807SJeff Garzik  *	RETURNS:
2926c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
2927c6fd2807SJeff Garzik  */
292804351821SAlan 
29290260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2930c6fd2807SJeff Garzik {
29310260731fSTejun Heo 	struct ata_port *ap = link->ap;
2932c6fd2807SJeff Garzik 	struct ata_device *dev;
2933f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
2934c6fd2807SJeff Garzik 
2935c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
2936f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2937c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
2938b3a70601SAlan Cox 		unsigned int mode_mask;
2939c6fd2807SJeff Garzik 
2940c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2941c6fd2807SJeff Garzik 			continue;
2942c6fd2807SJeff Garzik 
2943b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
2944b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
2945b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
2946b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
2947b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
2948b3a70601SAlan Cox 
2949c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
2950c6fd2807SJeff Garzik 
2951c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2952c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2953b3a70601SAlan Cox 
2954b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
2955b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2956b3a70601SAlan Cox 		else
2957b3a70601SAlan Cox 			dma_mask = 0;
2958b3a70601SAlan Cox 
2959c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2960c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2961c6fd2807SJeff Garzik 
2962c6fd2807SJeff Garzik 		found = 1;
2963c6fd2807SJeff Garzik 		if (dev->dma_mode)
2964c6fd2807SJeff Garzik 			used_dma = 1;
2965c6fd2807SJeff Garzik 	}
2966c6fd2807SJeff Garzik 	if (!found)
2967c6fd2807SJeff Garzik 		goto out;
2968c6fd2807SJeff Garzik 
2969c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
2970f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2971c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2972c6fd2807SJeff Garzik 			continue;
2973c6fd2807SJeff Garzik 
2974c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
2975c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2976c6fd2807SJeff Garzik 			rc = -EINVAL;
2977c6fd2807SJeff Garzik 			goto out;
2978c6fd2807SJeff Garzik 		}
2979c6fd2807SJeff Garzik 
2980c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
2981c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
2982c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
2983c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
2984c6fd2807SJeff Garzik 	}
2985c6fd2807SJeff Garzik 
2986c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
2987f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
2988c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2989c6fd2807SJeff Garzik 			continue;
2990c6fd2807SJeff Garzik 
2991c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
2992c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2993c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
2994c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
2995c6fd2807SJeff Garzik 	}
2996c6fd2807SJeff Garzik 
2997c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
2998f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
299918d90debSAlan 		/* don't update suspended devices' xfer mode */
30009666f400STejun Heo 		if (!ata_dev_enabled(dev))
3001c6fd2807SJeff Garzik 			continue;
3002c6fd2807SJeff Garzik 
3003c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3004c6fd2807SJeff Garzik 		if (rc)
3005c6fd2807SJeff Garzik 			goto out;
3006c6fd2807SJeff Garzik 	}
3007c6fd2807SJeff Garzik 
3008c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3009c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3010c6fd2807SJeff Garzik 	 */
3011cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3012032af1ceSAlan 		ap->host->simplex_claimed = ap;
3013c6fd2807SJeff Garzik 
3014c6fd2807SJeff Garzik  out:
3015c6fd2807SJeff Garzik 	if (rc)
3016c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3017c6fd2807SJeff Garzik 	return rc;
3018c6fd2807SJeff Garzik }
3019c6fd2807SJeff Garzik 
3020c6fd2807SJeff Garzik /**
302104351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
30220260731fSTejun Heo  *	@link: link on which timings will be programmed
302304351821SAlan  *	@r_failed_dev: out paramter for failed device
302404351821SAlan  *
302504351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
302604351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
302704351821SAlan  *	returned in @r_failed_dev.
302804351821SAlan  *
302904351821SAlan  *	LOCKING:
303004351821SAlan  *	PCI/etc. bus probe sem.
303104351821SAlan  *
303204351821SAlan  *	RETURNS:
303304351821SAlan  *	0 on success, negative errno otherwise
303404351821SAlan  */
30350260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
303604351821SAlan {
30370260731fSTejun Heo 	struct ata_port *ap = link->ap;
30380260731fSTejun Heo 
303904351821SAlan 	/* has private set_mode? */
304004351821SAlan 	if (ap->ops->set_mode)
30410260731fSTejun Heo 		return ap->ops->set_mode(link, r_failed_dev);
30420260731fSTejun Heo 	return ata_do_set_mode(link, r_failed_dev);
304304351821SAlan }
304404351821SAlan 
304504351821SAlan /**
3046c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3047c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3048c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3049c6fd2807SJeff Garzik  *
3050c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3051c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3052c6fd2807SJeff Garzik  *	other threads.
3053c6fd2807SJeff Garzik  *
3054c6fd2807SJeff Garzik  *	LOCKING:
3055cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3056c6fd2807SJeff Garzik  */
3057c6fd2807SJeff Garzik 
3058c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3059c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3060c6fd2807SJeff Garzik {
3061c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3062c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3063c6fd2807SJeff Garzik }
3064c6fd2807SJeff Garzik 
3065c6fd2807SJeff Garzik /**
3066c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3067c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3068c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3069c6fd2807SJeff Garzik  *	@tmout: overall timeout
3070c6fd2807SJeff Garzik  *
3071c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3072c6fd2807SJeff Garzik  *	or a timeout occurs.
3073c6fd2807SJeff Garzik  *
3074d1adc1bbSTejun Heo  *	LOCKING:
3075d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3076d1adc1bbSTejun Heo  *
3077d1adc1bbSTejun Heo  *	RETURNS:
3078d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3079c6fd2807SJeff Garzik  */
3080d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3081c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3082c6fd2807SJeff Garzik {
3083c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3084c6fd2807SJeff Garzik 	u8 status;
3085c6fd2807SJeff Garzik 
3086c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3087c6fd2807SJeff Garzik 	timer_start = jiffies;
3088c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3089d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3090d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3091c6fd2807SJeff Garzik 		msleep(50);
3092c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3093c6fd2807SJeff Garzik 	}
3094c6fd2807SJeff Garzik 
3095d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3096c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
309735aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
309835aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3099c6fd2807SJeff Garzik 
3100c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3101d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3102d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3103c6fd2807SJeff Garzik 		msleep(50);
3104c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3105c6fd2807SJeff Garzik 	}
3106c6fd2807SJeff Garzik 
3107d1adc1bbSTejun Heo 	if (status == 0xff)
3108d1adc1bbSTejun Heo 		return -ENODEV;
3109d1adc1bbSTejun Heo 
3110c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3111c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
311235aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
311335aa7a43SJeff Garzik 				tmout / HZ, status);
3114d1adc1bbSTejun Heo 		return -EBUSY;
3115c6fd2807SJeff Garzik 	}
3116c6fd2807SJeff Garzik 
3117c6fd2807SJeff Garzik 	return 0;
3118c6fd2807SJeff Garzik }
3119c6fd2807SJeff Garzik 
3120d4b2bab4STejun Heo /**
3121d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3122d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3123d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3124d4b2bab4STejun Heo  *
3125d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3126d4b2bab4STejun Heo  *	occurs.
3127d4b2bab4STejun Heo  *
3128d4b2bab4STejun Heo  *	LOCKING:
3129d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3130d4b2bab4STejun Heo  *
3131d4b2bab4STejun Heo  *	RETURNS:
3132d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3133d4b2bab4STejun Heo  */
3134d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3135d4b2bab4STejun Heo {
3136d4b2bab4STejun Heo 	unsigned long start = jiffies;
3137d4b2bab4STejun Heo 	int warned = 0;
3138d4b2bab4STejun Heo 
3139d4b2bab4STejun Heo 	while (1) {
3140d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3141d4b2bab4STejun Heo 		unsigned long now = jiffies;
3142d4b2bab4STejun Heo 
3143d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3144d4b2bab4STejun Heo 			return 0;
3145936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3146d4b2bab4STejun Heo 			return -ENODEV;
3147d4b2bab4STejun Heo 		if (time_after(now, deadline))
3148d4b2bab4STejun Heo 			return -EBUSY;
3149d4b2bab4STejun Heo 
3150d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3151d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3152d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3153d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3154d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3155d4b2bab4STejun Heo 			warned = 1;
3156d4b2bab4STejun Heo 		}
3157d4b2bab4STejun Heo 
3158d4b2bab4STejun Heo 		msleep(50);
3159d4b2bab4STejun Heo 	}
3160d4b2bab4STejun Heo }
3161d4b2bab4STejun Heo 
3162d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3163d4b2bab4STejun Heo 			      unsigned long deadline)
3164c6fd2807SJeff Garzik {
3165c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3166c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3167c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
31689b89391cSTejun Heo 	int rc, ret = 0;
3169c6fd2807SJeff Garzik 
3170c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3171c6fd2807SJeff Garzik 	 * BSY bit to clear
3172c6fd2807SJeff Garzik 	 */
3173d4b2bab4STejun Heo 	if (dev0) {
3174d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
31759b89391cSTejun Heo 		if (rc) {
31769b89391cSTejun Heo 			if (rc != -ENODEV)
3177d4b2bab4STejun Heo 				return rc;
31789b89391cSTejun Heo 			ret = rc;
31799b89391cSTejun Heo 		}
3180d4b2bab4STejun Heo 	}
3181c6fd2807SJeff Garzik 
3182e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3183e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3184c6fd2807SJeff Garzik 	 */
3185e141d999STejun Heo 	if (dev1) {
3186e141d999STejun Heo 		int i;
3187c6fd2807SJeff Garzik 
3188c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3189e141d999STejun Heo 
3190e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3191e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3192e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3193e141d999STejun Heo 		 */
3194e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3195e141d999STejun Heo 			u8 nsect, lbal;
3196e141d999STejun Heo 
31970d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
31980d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3199c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3200c6fd2807SJeff Garzik 				break;
3201c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3202c6fd2807SJeff Garzik 		}
3203e141d999STejun Heo 
3204d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
32059b89391cSTejun Heo 		if (rc) {
32069b89391cSTejun Heo 			if (rc != -ENODEV)
3207d4b2bab4STejun Heo 				return rc;
32089b89391cSTejun Heo 			ret = rc;
32099b89391cSTejun Heo 		}
3210d4b2bab4STejun Heo 	}
3211c6fd2807SJeff Garzik 
3212c6fd2807SJeff Garzik 	/* is all this really necessary? */
3213c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3214c6fd2807SJeff Garzik 	if (dev1)
3215c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3216c6fd2807SJeff Garzik 	if (dev0)
3217c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3218d4b2bab4STejun Heo 
32199b89391cSTejun Heo 	return ret;
3220c6fd2807SJeff Garzik }
3221c6fd2807SJeff Garzik 
3222d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3223d4b2bab4STejun Heo 			     unsigned long deadline)
3224c6fd2807SJeff Garzik {
3225c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3226681c80b5SAlan Cox 	struct ata_device *dev;
3227681c80b5SAlan Cox 	int i = 0;
3228c6fd2807SJeff Garzik 
322944877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3230c6fd2807SJeff Garzik 
3231c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
32320d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3233c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
32340d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3235c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
32360d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3237c6fd2807SJeff Garzik 
3238681c80b5SAlan Cox 	/* If we issued an SRST then an ATA drive (not ATAPI)
3239681c80b5SAlan Cox 	 * may have changed configuration and be in PIO0 timing. If
3240681c80b5SAlan Cox 	 * we did a hard reset (or are coming from power on) this is
3241681c80b5SAlan Cox 	 * true for ATA or ATAPI. Until we've set a suitable controller
3242681c80b5SAlan Cox 	 * mode we should not touch the bus as we may be talking too fast.
3243681c80b5SAlan Cox 	 */
3244681c80b5SAlan Cox 
3245681c80b5SAlan Cox 	ata_link_for_each_dev(dev, &ap->link)
3246681c80b5SAlan Cox 		dev->pio_mode = XFER_PIO_0;
3247681c80b5SAlan Cox 
3248681c80b5SAlan Cox 	/* If the controller has a pio mode setup function then use
3249681c80b5SAlan Cox 	   it to set the chipset to rights. Don't touch the DMA setup
3250681c80b5SAlan Cox 	   as that will be dealt with when revalidating */
3251681c80b5SAlan Cox 	if (ap->ops->set_piomode) {
3252681c80b5SAlan Cox 		ata_link_for_each_dev(dev, &ap->link)
3253681c80b5SAlan Cox 			if (devmask & (1 << i++))
3254681c80b5SAlan Cox 				ap->ops->set_piomode(ap, dev);
3255681c80b5SAlan Cox 	}
3256681c80b5SAlan Cox 
3257c6fd2807SJeff Garzik 	/* spec mandates ">= 2ms" before checking status.
3258c6fd2807SJeff Garzik 	 * We wait 150ms, because that was the magic delay used for
3259c6fd2807SJeff Garzik 	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3260c6fd2807SJeff Garzik 	 * between when the ATA command register is written, and then
3261c6fd2807SJeff Garzik 	 * status is checked.  Because waiting for "a while" before
3262c6fd2807SJeff Garzik 	 * checking status is fine, post SRST, we perform this magic
3263c6fd2807SJeff Garzik 	 * delay here as well.
3264c6fd2807SJeff Garzik 	 *
3265c6fd2807SJeff Garzik 	 * Old drivers/ide uses the 2mS rule and then waits for ready
3266c6fd2807SJeff Garzik 	 */
3267c6fd2807SJeff Garzik 	msleep(150);
3268c6fd2807SJeff Garzik 
3269c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3270c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3271c6fd2807SJeff Garzik 	 * pulldown resistor.
3272c6fd2807SJeff Garzik 	 */
3273150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
32749b89391cSTejun Heo 		return -ENODEV;
3275c6fd2807SJeff Garzik 
3276d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3277c6fd2807SJeff Garzik }
3278c6fd2807SJeff Garzik 
3279c6fd2807SJeff Garzik /**
3280c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3281c6fd2807SJeff Garzik  *	@ap: port to reset
3282c6fd2807SJeff Garzik  *
3283c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3284c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3285c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3286c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3287c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3288c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3289c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3290c6fd2807SJeff Garzik  *
3291c6fd2807SJeff Garzik  *	LOCKING:
3292c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3293cca3974eSJeff Garzik  *	Obtains host lock.
3294c6fd2807SJeff Garzik  *
3295c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3296c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3297c6fd2807SJeff Garzik  */
3298c6fd2807SJeff Garzik 
3299c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3300c6fd2807SJeff Garzik {
33019af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3302c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3303c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3304c6fd2807SJeff Garzik 	u8 err;
3305c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
33069b89391cSTejun Heo 	int rc;
3307c6fd2807SJeff Garzik 
330844877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3309c6fd2807SJeff Garzik 
3310c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3311c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3312c6fd2807SJeff Garzik 		dev0 = 1;
3313c6fd2807SJeff Garzik 	else {
3314c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3315c6fd2807SJeff Garzik 		if (slave_possible)
3316c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3317c6fd2807SJeff Garzik 	}
3318c6fd2807SJeff Garzik 
3319c6fd2807SJeff Garzik 	if (dev0)
3320c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3321c6fd2807SJeff Garzik 	if (dev1)
3322c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3323c6fd2807SJeff Garzik 
3324c6fd2807SJeff Garzik 	/* select device 0 again */
3325c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3326c6fd2807SJeff Garzik 
3327c6fd2807SJeff Garzik 	/* issue bus reset */
33289b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
33299b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
33309b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3331c6fd2807SJeff Garzik 			goto err_out;
33329b89391cSTejun Heo 	}
3333c6fd2807SJeff Garzik 
3334c6fd2807SJeff Garzik 	/*
3335c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3336c6fd2807SJeff Garzik 	 */
33373f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3338c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
33393f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3340c6fd2807SJeff Garzik 
3341c6fd2807SJeff Garzik 	/* is double-select really necessary? */
33429af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3343c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
33449af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3345c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3346c6fd2807SJeff Garzik 
3347c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
33489af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
33499af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3350c6fd2807SJeff Garzik 		goto err_out;
3351c6fd2807SJeff Garzik 
3352c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3353c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
33540d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3355c6fd2807SJeff Garzik 	}
3356c6fd2807SJeff Garzik 
3357c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3358c6fd2807SJeff Garzik 	return;
3359c6fd2807SJeff Garzik 
3360c6fd2807SJeff Garzik err_out:
3361c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3362ac8869d5SJeff Garzik 	ata_port_disable(ap);
3363c6fd2807SJeff Garzik 
3364c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3365c6fd2807SJeff Garzik }
3366c6fd2807SJeff Garzik 
3367c6fd2807SJeff Garzik /**
3368936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3369936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3370c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3371d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3372c6fd2807SJeff Garzik  *
3373936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3374c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3375c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3376d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3377d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3378c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3379c6fd2807SJeff Garzik  *
3380d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3381d4b2bab4STejun Heo  *	two is used.
3382d4b2bab4STejun Heo  *
3383c6fd2807SJeff Garzik  *	LOCKING:
3384c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3385c6fd2807SJeff Garzik  *
3386c6fd2807SJeff Garzik  *	RETURNS:
3387c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3388c6fd2807SJeff Garzik  */
3389936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3390d4b2bab4STejun Heo 		       unsigned long deadline)
3391c6fd2807SJeff Garzik {
3392c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3393d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3394d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3395c6fd2807SJeff Garzik 	u32 last, cur;
3396c6fd2807SJeff Garzik 	int rc;
3397c6fd2807SJeff Garzik 
3398d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3399d4b2bab4STejun Heo 	if (time_before(t, deadline))
3400d4b2bab4STejun Heo 		deadline = t;
3401d4b2bab4STejun Heo 
3402936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3403c6fd2807SJeff Garzik 		return rc;
3404c6fd2807SJeff Garzik 	cur &= 0xf;
3405c6fd2807SJeff Garzik 
3406c6fd2807SJeff Garzik 	last = cur;
3407c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3408c6fd2807SJeff Garzik 
3409c6fd2807SJeff Garzik 	while (1) {
3410c6fd2807SJeff Garzik 		msleep(interval_msec);
3411936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3412c6fd2807SJeff Garzik 			return rc;
3413c6fd2807SJeff Garzik 		cur &= 0xf;
3414c6fd2807SJeff Garzik 
3415c6fd2807SJeff Garzik 		/* DET stable? */
3416c6fd2807SJeff Garzik 		if (cur == last) {
3417d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3418c6fd2807SJeff Garzik 				continue;
3419c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3420c6fd2807SJeff Garzik 				return 0;
3421c6fd2807SJeff Garzik 			continue;
3422c6fd2807SJeff Garzik 		}
3423c6fd2807SJeff Garzik 
3424c6fd2807SJeff Garzik 		/* unstable, start over */
3425c6fd2807SJeff Garzik 		last = cur;
3426c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3427c6fd2807SJeff Garzik 
3428f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3429f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3430f1545154STejun Heo 		 */
3431d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3432f1545154STejun Heo 			return -EPIPE;
3433c6fd2807SJeff Garzik 	}
3434c6fd2807SJeff Garzik }
3435c6fd2807SJeff Garzik 
3436c6fd2807SJeff Garzik /**
3437936fd732STejun Heo  *	sata_link_resume - resume SATA link
3438936fd732STejun Heo  *	@link: ATA link to resume SATA
3439c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3440d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3441c6fd2807SJeff Garzik  *
3442936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3443c6fd2807SJeff Garzik  *
3444c6fd2807SJeff Garzik  *	LOCKING:
3445c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3446c6fd2807SJeff Garzik  *
3447c6fd2807SJeff Garzik  *	RETURNS:
3448c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3449c6fd2807SJeff Garzik  */
3450936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3451d4b2bab4STejun Heo 		     unsigned long deadline)
3452c6fd2807SJeff Garzik {
3453c6fd2807SJeff Garzik 	u32 scontrol;
3454c6fd2807SJeff Garzik 	int rc;
3455c6fd2807SJeff Garzik 
3456936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3457c6fd2807SJeff Garzik 		return rc;
3458c6fd2807SJeff Garzik 
3459c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3460c6fd2807SJeff Garzik 
3461936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3462c6fd2807SJeff Garzik 		return rc;
3463c6fd2807SJeff Garzik 
3464c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3465c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3466c6fd2807SJeff Garzik 	 */
3467c6fd2807SJeff Garzik 	msleep(200);
3468c6fd2807SJeff Garzik 
3469936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3470c6fd2807SJeff Garzik }
3471c6fd2807SJeff Garzik 
3472c6fd2807SJeff Garzik /**
3473c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3474cc0680a5STejun Heo  *	@link: ATA link to be reset
3475d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3476c6fd2807SJeff Garzik  *
3477cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3478b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3479b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3480b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3481b8cffc6aSTejun Heo  *	should just whine, not fail.
3482c6fd2807SJeff Garzik  *
3483c6fd2807SJeff Garzik  *	LOCKING:
3484c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3485c6fd2807SJeff Garzik  *
3486c6fd2807SJeff Garzik  *	RETURNS:
3487c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3488c6fd2807SJeff Garzik  */
3489cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3490c6fd2807SJeff Garzik {
3491cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3492936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3493c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3494c6fd2807SJeff Garzik 	int rc;
3495c6fd2807SJeff Garzik 
349631daabdaSTejun Heo 	/* handle link resume */
3497c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
34980c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3499c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3500c6fd2807SJeff Garzik 
3501633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3502633273a3STejun Heo 	 * is supported.
3503633273a3STejun Heo 	 */
3504633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3505633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3506633273a3STejun Heo 
3507c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3508c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3509c6fd2807SJeff Garzik 		return 0;
3510c6fd2807SJeff Garzik 
3511936fd732STejun Heo 	/* if SATA, resume link */
3512a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3513936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3514b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3515b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3516cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3517c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3518c6fd2807SJeff Garzik 	}
3519c6fd2807SJeff Garzik 
3520c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3521c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3522c6fd2807SJeff Garzik 	 */
35230c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3524b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
35256dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3526cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3527b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3528b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3529b8cffc6aSTejun Heo 		}
3530b8cffc6aSTejun Heo 	}
3531c6fd2807SJeff Garzik 
3532c6fd2807SJeff Garzik 	return 0;
3533c6fd2807SJeff Garzik }
3534c6fd2807SJeff Garzik 
3535c6fd2807SJeff Garzik /**
3536c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3537cc0680a5STejun Heo  *	@link: ATA link to reset
3538c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3539d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3540c6fd2807SJeff Garzik  *
3541c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3542c6fd2807SJeff Garzik  *
3543c6fd2807SJeff Garzik  *	LOCKING:
3544c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3545c6fd2807SJeff Garzik  *
3546c6fd2807SJeff Garzik  *	RETURNS:
3547c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3548c6fd2807SJeff Garzik  */
3549cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3550d4b2bab4STejun Heo 		      unsigned long deadline)
3551c6fd2807SJeff Garzik {
3552cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3553c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3554d4b2bab4STejun Heo 	unsigned int devmask = 0;
3555d4b2bab4STejun Heo 	int rc;
3556c6fd2807SJeff Garzik 	u8 err;
3557c6fd2807SJeff Garzik 
3558c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3559c6fd2807SJeff Garzik 
3560936fd732STejun Heo 	if (ata_link_offline(link)) {
3561c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3562c6fd2807SJeff Garzik 		goto out;
3563c6fd2807SJeff Garzik 	}
3564c6fd2807SJeff Garzik 
3565c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3566c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3567c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3568c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3569c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3570c6fd2807SJeff Garzik 
3571c6fd2807SJeff Garzik 	/* select device 0 again */
3572c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3573c6fd2807SJeff Garzik 
3574c6fd2807SJeff Garzik 	/* issue bus reset */
3575c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3576d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
35779b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3578936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3579cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3580d4b2bab4STejun Heo 		return rc;
3581c6fd2807SJeff Garzik 	}
3582c6fd2807SJeff Garzik 
3583c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
35843f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
35853f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3586c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
35873f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
35883f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3589c6fd2807SJeff Garzik 
3590c6fd2807SJeff Garzik  out:
3591c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3592c6fd2807SJeff Garzik 	return 0;
3593c6fd2807SJeff Garzik }
3594c6fd2807SJeff Garzik 
3595c6fd2807SJeff Garzik /**
3596cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3597cc0680a5STejun Heo  *	@link: link to reset
3598b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3599d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3600c6fd2807SJeff Garzik  *
3601cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3602c6fd2807SJeff Garzik  *
3603c6fd2807SJeff Garzik  *	LOCKING:
3604c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3605c6fd2807SJeff Garzik  *
3606c6fd2807SJeff Garzik  *	RETURNS:
3607c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3608c6fd2807SJeff Garzik  */
3609cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3610d4b2bab4STejun Heo 			unsigned long deadline)
3611c6fd2807SJeff Garzik {
3612c6fd2807SJeff Garzik 	u32 scontrol;
3613c6fd2807SJeff Garzik 	int rc;
3614c6fd2807SJeff Garzik 
3615c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3616c6fd2807SJeff Garzik 
3617936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3618c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3619c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3620c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3621c6fd2807SJeff Garzik 		 * and Sil3124.
3622c6fd2807SJeff Garzik 		 */
3623936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3624b6103f6dSTejun Heo 			goto out;
3625c6fd2807SJeff Garzik 
3626cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3627c6fd2807SJeff Garzik 
3628936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3629b6103f6dSTejun Heo 			goto out;
3630c6fd2807SJeff Garzik 
3631936fd732STejun Heo 		sata_set_spd(link);
3632c6fd2807SJeff Garzik 	}
3633c6fd2807SJeff Garzik 
3634c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3635936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3636b6103f6dSTejun Heo 		goto out;
3637c6fd2807SJeff Garzik 
3638c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3639c6fd2807SJeff Garzik 
3640936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3641b6103f6dSTejun Heo 		goto out;
3642c6fd2807SJeff Garzik 
3643c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3644c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3645c6fd2807SJeff Garzik 	 */
3646c6fd2807SJeff Garzik 	msleep(1);
3647c6fd2807SJeff Garzik 
3648936fd732STejun Heo 	/* bring link back */
3649936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3650b6103f6dSTejun Heo  out:
3651b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3652b6103f6dSTejun Heo 	return rc;
3653b6103f6dSTejun Heo }
3654b6103f6dSTejun Heo 
3655b6103f6dSTejun Heo /**
3656b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3657cc0680a5STejun Heo  *	@link: link to reset
3658b6103f6dSTejun Heo  *	@class: resulting class of attached device
3659d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3660b6103f6dSTejun Heo  *
3661b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3662b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3663b6103f6dSTejun Heo  *
3664b6103f6dSTejun Heo  *	LOCKING:
3665b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3666b6103f6dSTejun Heo  *
3667b6103f6dSTejun Heo  *	RETURNS:
3668b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3669b6103f6dSTejun Heo  */
3670cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3671d4b2bab4STejun Heo 		       unsigned long deadline)
3672b6103f6dSTejun Heo {
3673cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3674936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3675b6103f6dSTejun Heo 	int rc;
3676b6103f6dSTejun Heo 
3677b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3678b6103f6dSTejun Heo 
3679b6103f6dSTejun Heo 	/* do hardreset */
3680cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3681b6103f6dSTejun Heo 	if (rc) {
3682cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3683b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3684b6103f6dSTejun Heo 		return rc;
3685b6103f6dSTejun Heo 	}
3686c6fd2807SJeff Garzik 
3687c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3688936fd732STejun Heo 	if (ata_link_offline(link)) {
3689c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3690c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3691c6fd2807SJeff Garzik 		return 0;
3692c6fd2807SJeff Garzik 	}
3693c6fd2807SJeff Garzik 
369434fee227STejun Heo 	/* wait a while before checking status, see SRST for more info */
369534fee227STejun Heo 	msleep(150);
369634fee227STejun Heo 
3697633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3698633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3699633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3700633273a3STejun Heo 	 * second and request follow-up SRST.
3701633273a3STejun Heo 	 */
3702633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3703633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3704633273a3STejun Heo 		return -EAGAIN;
3705633273a3STejun Heo 	}
3706633273a3STejun Heo 
3707d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
37089b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
37099b89391cSTejun Heo 	if (rc) {
3710cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3711d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3712d4b2bab4STejun Heo 		return rc;
3713c6fd2807SJeff Garzik 	}
3714c6fd2807SJeff Garzik 
3715c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3716c6fd2807SJeff Garzik 
37173f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3718c6fd2807SJeff Garzik 
3719c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3720c6fd2807SJeff Garzik 	return 0;
3721c6fd2807SJeff Garzik }
3722c6fd2807SJeff Garzik 
3723c6fd2807SJeff Garzik /**
3724c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3725cc0680a5STejun Heo  *	@link: the target ata_link
3726c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3727c6fd2807SJeff Garzik  *
3728c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3729c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3730c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3731c6fd2807SJeff Garzik  *
3732c6fd2807SJeff Garzik  *	LOCKING:
3733c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3734c6fd2807SJeff Garzik  */
3735cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3736c6fd2807SJeff Garzik {
3737cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3738c6fd2807SJeff Garzik 	u32 serror;
3739c6fd2807SJeff Garzik 
3740c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3741c6fd2807SJeff Garzik 
3742c6fd2807SJeff Garzik 	/* print link status */
3743936fd732STejun Heo 	sata_print_link_status(link);
3744c6fd2807SJeff Garzik 
3745c6fd2807SJeff Garzik 	/* clear SError */
3746936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3747936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3748c6fd2807SJeff Garzik 
3749c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3750c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3751c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3752c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3753c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3754c6fd2807SJeff Garzik 
3755c6fd2807SJeff Garzik 	/* bail out if no device is present */
3756c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3757c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3758c6fd2807SJeff Garzik 		return;
3759c6fd2807SJeff Garzik 	}
3760c6fd2807SJeff Garzik 
3761c6fd2807SJeff Garzik 	/* set up device control */
37620d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
37630d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3764c6fd2807SJeff Garzik 
3765c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3766c6fd2807SJeff Garzik }
3767c6fd2807SJeff Garzik 
3768c6fd2807SJeff Garzik /**
3769c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3770c6fd2807SJeff Garzik  *	@dev: device to compare against
3771c6fd2807SJeff Garzik  *	@new_class: class of the new device
3772c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3773c6fd2807SJeff Garzik  *
3774c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3775c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3776c6fd2807SJeff Garzik  *	@new_id.
3777c6fd2807SJeff Garzik  *
3778c6fd2807SJeff Garzik  *	LOCKING:
3779c6fd2807SJeff Garzik  *	None.
3780c6fd2807SJeff Garzik  *
3781c6fd2807SJeff Garzik  *	RETURNS:
3782c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3783c6fd2807SJeff Garzik  */
3784c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3785c6fd2807SJeff Garzik 			       const u16 *new_id)
3786c6fd2807SJeff Garzik {
3787c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3788a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3789a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3790c6fd2807SJeff Garzik 
3791c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3792c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3793c6fd2807SJeff Garzik 			       dev->class, new_class);
3794c6fd2807SJeff Garzik 		return 0;
3795c6fd2807SJeff Garzik 	}
3796c6fd2807SJeff Garzik 
3797a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3798a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3799a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3800a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3801c6fd2807SJeff Garzik 
3802c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3803c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3804c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3805c6fd2807SJeff Garzik 		return 0;
3806c6fd2807SJeff Garzik 	}
3807c6fd2807SJeff Garzik 
3808c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3809c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3810c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3811c6fd2807SJeff Garzik 		return 0;
3812c6fd2807SJeff Garzik 	}
3813c6fd2807SJeff Garzik 
3814c6fd2807SJeff Garzik 	return 1;
3815c6fd2807SJeff Garzik }
3816c6fd2807SJeff Garzik 
3817c6fd2807SJeff Garzik /**
3818fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
38193fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3820bff04647STejun Heo  *	@readid_flags: read ID flags
3821c6fd2807SJeff Garzik  *
3822c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
3823c6fd2807SJeff Garzik  *	the port.
3824c6fd2807SJeff Garzik  *
3825c6fd2807SJeff Garzik  *	LOCKING:
3826c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3827c6fd2807SJeff Garzik  *
3828c6fd2807SJeff Garzik  *	RETURNS:
3829c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3830c6fd2807SJeff Garzik  */
3831fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3832c6fd2807SJeff Garzik {
3833c6fd2807SJeff Garzik 	unsigned int class = dev->class;
38349af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
3835c6fd2807SJeff Garzik 	int rc;
3836c6fd2807SJeff Garzik 
3837c6fd2807SJeff Garzik 	/* read ID data */
3838bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3839c6fd2807SJeff Garzik 	if (rc)
3840fe30911bSTejun Heo 		return rc;
3841c6fd2807SJeff Garzik 
3842c6fd2807SJeff Garzik 	/* is the device still there? */
3843fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
3844fe30911bSTejun Heo 		return -ENODEV;
3845c6fd2807SJeff Garzik 
3846c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3847fe30911bSTejun Heo 	return 0;
3848fe30911bSTejun Heo }
3849fe30911bSTejun Heo 
3850fe30911bSTejun Heo /**
3851fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
3852fe30911bSTejun Heo  *	@dev: device to revalidate
3853422c9daaSTejun Heo  *	@new_class: new class code
3854fe30911bSTejun Heo  *	@readid_flags: read ID flags
3855fe30911bSTejun Heo  *
3856fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3857fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
3858fe30911bSTejun Heo  *
3859fe30911bSTejun Heo  *	LOCKING:
3860fe30911bSTejun Heo  *	Kernel thread context (may sleep)
3861fe30911bSTejun Heo  *
3862fe30911bSTejun Heo  *	RETURNS:
3863fe30911bSTejun Heo  *	0 on success, negative errno otherwise
3864fe30911bSTejun Heo  */
3865422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3866422c9daaSTejun Heo 		       unsigned int readid_flags)
3867fe30911bSTejun Heo {
38686ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
3869fe30911bSTejun Heo 	int rc;
3870fe30911bSTejun Heo 
3871fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
3872fe30911bSTejun Heo 		return -ENODEV;
3873fe30911bSTejun Heo 
3874422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3875422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
3876422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3877422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3878422c9daaSTejun Heo 			       dev->class, new_class);
3879422c9daaSTejun Heo 		rc = -ENODEV;
3880422c9daaSTejun Heo 		goto fail;
3881422c9daaSTejun Heo 	}
3882422c9daaSTejun Heo 
3883fe30911bSTejun Heo 	/* re-read ID */
3884fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
3885fe30911bSTejun Heo 	if (rc)
3886fe30911bSTejun Heo 		goto fail;
3887c6fd2807SJeff Garzik 
3888c6fd2807SJeff Garzik 	/* configure device according to the new ID */
3889efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
38906ddcd3b0STejun Heo 	if (rc)
38916ddcd3b0STejun Heo 		goto fail;
38926ddcd3b0STejun Heo 
38936ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
3894b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3895b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
38966ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
38976ddcd3b0STejun Heo 			       "%llu != %llu\n",
38986ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
38996ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
39008270bec4STejun Heo 
39018270bec4STejun Heo 		/* restore original n_sectors */
39028270bec4STejun Heo 		dev->n_sectors = n_sectors;
39038270bec4STejun Heo 
39046ddcd3b0STejun Heo 		rc = -ENODEV;
39056ddcd3b0STejun Heo 		goto fail;
39066ddcd3b0STejun Heo 	}
39076ddcd3b0STejun Heo 
3908c6fd2807SJeff Garzik 	return 0;
3909c6fd2807SJeff Garzik 
3910c6fd2807SJeff Garzik  fail:
3911c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3912c6fd2807SJeff Garzik 	return rc;
3913c6fd2807SJeff Garzik }
3914c6fd2807SJeff Garzik 
39156919a0a6SAlan Cox struct ata_blacklist_entry {
39166919a0a6SAlan Cox 	const char *model_num;
39176919a0a6SAlan Cox 	const char *model_rev;
39186919a0a6SAlan Cox 	unsigned long horkage;
39196919a0a6SAlan Cox };
39206919a0a6SAlan Cox 
39216919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
39226919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
39236919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
39246919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
39256919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
39266919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
39276919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
39286919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
39296919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
39306919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
39316919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
39326919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
39336919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
39346919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
39356919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
39366919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
39376919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
39386919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
39396919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
39406919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
39416919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
39426919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
39436919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
39446919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
39456919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
39466919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
39476919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
39486919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
39496919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
39506919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
39516919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
395239f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
39533af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
39543af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
39553af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
39566919a0a6SAlan Cox 
395718d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
395840a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
395918d6e9d5SAlbert Lee 
39606919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
39616919a0a6SAlan Cox 
39626919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
39636919a0a6SAlan Cox 	/* NCQ is slow */
39646919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
396509125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
396609125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
39677acfaf30SPaul Rolland 	/* NCQ is broken */
3968539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
39690e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
39700b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
39710b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
3972da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3973539cc7c7SJeff Garzik 
397436e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
397536e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
397636e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
397736e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
397836e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3979bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
3980bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
39812f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
398270edb185STejun Heo 	{ "HDT722516DLA380",	"V43OA96A",	ATA_HORKAGE_NONCQ, },
3983e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
39840c173174STejun Heo 	{ "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
39852f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
39867f567620STejun Heo 	{ "WDC WD3200AAJS-00RYA0", "12.01B01",	ATA_HORKAGE_NONCQ, },
3987a520f261STejun Heo 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
39887f567620STejun Heo 	{ "ST9120822AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
39893fb6589cSTejun Heo 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
3990954bb005STejun Heo 	{ "ST9160821AS",	"3.ALD",	ATA_HORKAGE_NONCQ, },
399113587960STejun Heo 	{ "ST9160821AS",	"3.CCD",	ATA_HORKAGE_NONCQ, },
39927f567620STejun Heo 	{ "ST3160812AS",	"3.ADJ",	ATA_HORKAGE_NONCQ, },
39937f567620STejun Heo 	{ "ST980813AS",		"3.ADB",	ATA_HORKAGE_NONCQ, },
39945d6aca8dSTejun Heo 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
39956919a0a6SAlan Cox 
399616c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
399716c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
399816c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
399916c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
400016c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
40016919a0a6SAlan Cox 
400293328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
400393328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
400493328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
400593328e11SAlan Cox 
40066919a0a6SAlan Cox 	/* End Marker */
40076919a0a6SAlan Cox 	{ }
4008c6fd2807SJeff Garzik };
4009c6fd2807SJeff Garzik 
4010741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4011539cc7c7SJeff Garzik {
4012539cc7c7SJeff Garzik 	const char *p;
4013539cc7c7SJeff Garzik 	int len;
4014539cc7c7SJeff Garzik 
4015539cc7c7SJeff Garzik 	/*
4016539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4017539cc7c7SJeff Garzik 	 */
4018539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4019539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4020539cc7c7SJeff Garzik 		len = p - patt;
4021317b50b8SAndrew Paprocki 	else {
4022539cc7c7SJeff Garzik 		len = strlen(name);
4023317b50b8SAndrew Paprocki 		if (!len) {
4024317b50b8SAndrew Paprocki 			if (!*patt)
4025317b50b8SAndrew Paprocki 				return 0;
4026317b50b8SAndrew Paprocki 			return -1;
4027317b50b8SAndrew Paprocki 		}
4028317b50b8SAndrew Paprocki 	}
4029539cc7c7SJeff Garzik 
4030539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4031539cc7c7SJeff Garzik }
4032539cc7c7SJeff Garzik 
403375683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4034c6fd2807SJeff Garzik {
40358bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
40368bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
40376919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4038c6fd2807SJeff Garzik 
40398bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
40408bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4041c6fd2807SJeff Garzik 
40426919a0a6SAlan Cox 	while (ad->model_num) {
4043539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
40446919a0a6SAlan Cox 			if (ad->model_rev == NULL)
40456919a0a6SAlan Cox 				return ad->horkage;
4046539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
40476919a0a6SAlan Cox 				return ad->horkage;
4048c6fd2807SJeff Garzik 		}
40496919a0a6SAlan Cox 		ad++;
4050c6fd2807SJeff Garzik 	}
4051c6fd2807SJeff Garzik 	return 0;
4052c6fd2807SJeff Garzik }
4053c6fd2807SJeff Garzik 
40546919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
40556919a0a6SAlan Cox {
40566919a0a6SAlan Cox 	/* We don't support polling DMA.
40576919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
40586919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
40596919a0a6SAlan Cox 	 */
40609af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
40616919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
40626919a0a6SAlan Cox 		return 1;
406375683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
40646919a0a6SAlan Cox }
40656919a0a6SAlan Cox 
4066c6fd2807SJeff Garzik /**
4067c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4068c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4069c6fd2807SJeff Garzik  *
4070c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4071c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4072c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4073c6fd2807SJeff Garzik  *	blacklist, etc...
4074c6fd2807SJeff Garzik  *
4075c6fd2807SJeff Garzik  *	LOCKING:
4076c6fd2807SJeff Garzik  *	None.
4077c6fd2807SJeff Garzik  */
4078c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4079c6fd2807SJeff Garzik {
40809af5c9c9STejun Heo 	struct ata_link *link = dev->link;
40819af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4082cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4083c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4084c6fd2807SJeff Garzik 
4085c6fd2807SJeff Garzik 	/* controller modes available */
4086c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4087c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4088c6fd2807SJeff Garzik 
40898343f889SRobert Hancock 	/* drive modes available */
4090c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4091c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4092c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4093c6fd2807SJeff Garzik 
4094b352e57dSAlan Cox 	/*
4095b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4096b352e57dSAlan Cox 	 *	cable
4097b352e57dSAlan Cox 	 */
4098b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4099b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4100b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4101b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4102b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4103b352e57dSAlan Cox 	}
4104b352e57dSAlan Cox 
4105c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4106c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4107c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4108c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4109c6fd2807SJeff Garzik 	}
4110c6fd2807SJeff Garzik 
411114d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
411214d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4113c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4114c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4115c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4116c6fd2807SJeff Garzik 	}
4117c6fd2807SJeff Garzik 
4118e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4119e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4120e424675fSJeff Garzik 
4121c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4122a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4123c6fd2807SJeff Garzik 
41248343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
41258343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
41268343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
41278343f889SRobert Hancock 	 * solely limited by the cable.
41288343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
41298343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
41308343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
41318343f889SRobert Hancock 	 */
41328343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
41338343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
41348343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
41358343f889SRobert Hancock 		    (ata_drive_40wire(dev->id) &&
41368343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
41378343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
41388343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
41398343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
41408343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
41418343f889SRobert Hancock 		}
41428343f889SRobert Hancock 
4143c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4144c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4145c6fd2807SJeff Garzik }
4146c6fd2807SJeff Garzik 
4147c6fd2807SJeff Garzik /**
4148c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4149c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4150c6fd2807SJeff Garzik  *
4151c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4152c6fd2807SJeff Garzik  *	on port @ap.
4153c6fd2807SJeff Garzik  *
4154c6fd2807SJeff Garzik  *	LOCKING:
4155c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4156c6fd2807SJeff Garzik  *
4157c6fd2807SJeff Garzik  *	RETURNS:
4158c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4159c6fd2807SJeff Garzik  */
4160c6fd2807SJeff Garzik 
4161c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4162c6fd2807SJeff Garzik {
4163c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4164c6fd2807SJeff Garzik 	unsigned int err_mask;
4165c6fd2807SJeff Garzik 
4166c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4167c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4168c6fd2807SJeff Garzik 
4169464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4170464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4171464cf177STejun Heo 	 */
4172c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4173c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4174c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4175464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4176c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4177c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
4178c6fd2807SJeff Garzik 
41792b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4180c6fd2807SJeff Garzik 
4181c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4182c6fd2807SJeff Garzik 	return err_mask;
4183c6fd2807SJeff Garzik }
4184c6fd2807SJeff Garzik /**
4185218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
41869f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
41879f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4188218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
41899f45cbd3SKristen Carlson Accardi  *
41909f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4191218f3d30SJeff Garzik  *	on port @ap with sector count
41929f45cbd3SKristen Carlson Accardi  *
41939f45cbd3SKristen Carlson Accardi  *	LOCKING:
41949f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
41959f45cbd3SKristen Carlson Accardi  *
41969f45cbd3SKristen Carlson Accardi  *	RETURNS:
41979f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
41989f45cbd3SKristen Carlson Accardi  */
4199218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4200218f3d30SJeff Garzik 					u8 feature)
42019f45cbd3SKristen Carlson Accardi {
42029f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
42039f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
42049f45cbd3SKristen Carlson Accardi 
42059f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
42069f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
42079f45cbd3SKristen Carlson Accardi 
42089f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
42099f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
42109f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
42119f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
42129f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4213218f3d30SJeff Garzik 	tf.nsect = feature;
42149f45cbd3SKristen Carlson Accardi 
42152b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
42169f45cbd3SKristen Carlson Accardi 
42179f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
42189f45cbd3SKristen Carlson Accardi 	return err_mask;
42199f45cbd3SKristen Carlson Accardi }
42209f45cbd3SKristen Carlson Accardi 
42219f45cbd3SKristen Carlson Accardi /**
4222c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4223c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4224c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4225c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4226c6fd2807SJeff Garzik  *
4227c6fd2807SJeff Garzik  *	LOCKING:
4228c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4229c6fd2807SJeff Garzik  *
4230c6fd2807SJeff Garzik  *	RETURNS:
4231c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4232c6fd2807SJeff Garzik  */
4233c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4234c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4235c6fd2807SJeff Garzik {
4236c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4237c6fd2807SJeff Garzik 	unsigned int err_mask;
4238c6fd2807SJeff Garzik 
4239c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4240c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4241c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4242c6fd2807SJeff Garzik 
4243c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4244c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4245c6fd2807SJeff Garzik 
4246c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4247c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4248c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4249c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4250c6fd2807SJeff Garzik 	tf.nsect = sectors;
4251c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4252c6fd2807SJeff Garzik 
42532b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
425418b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
425518b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
425618b2466cSAlan Cox 	   drive reported working geometry */
425718b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
425818b2466cSAlan Cox 		err_mask = 0;
4259c6fd2807SJeff Garzik 
4260c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4261c6fd2807SJeff Garzik 	return err_mask;
4262c6fd2807SJeff Garzik }
4263c6fd2807SJeff Garzik 
4264c6fd2807SJeff Garzik /**
4265c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4266c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4267c6fd2807SJeff Garzik  *
4268c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4269c6fd2807SJeff Garzik  *
4270c6fd2807SJeff Garzik  *	LOCKING:
4271cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4272c6fd2807SJeff Garzik  */
427370e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4274c6fd2807SJeff Garzik {
4275c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4276c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4277c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4278c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4279c6fd2807SJeff Garzik 
4280c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4281c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4282c6fd2807SJeff Garzik 
4283c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4284c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4285c6fd2807SJeff Garzik 
4286c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4287c6fd2807SJeff Garzik 
4288c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4289c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4290c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4291c6fd2807SJeff Garzik 	 */
4292c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4293c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4294c6fd2807SJeff Garzik 
4295c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4296c6fd2807SJeff Garzik 		if (qc->n_elem)
4297c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4298c6fd2807SJeff Garzik 		/* restore last sg */
429987260216SJens Axboe 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4300c6fd2807SJeff Garzik 		if (pad_buf) {
4301c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
430245711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4303c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4304c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4305c6fd2807SJeff Garzik 		}
4306c6fd2807SJeff Garzik 	} else {
4307c6fd2807SJeff Garzik 		if (qc->n_elem)
4308c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4309c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4310c6fd2807SJeff Garzik 				dir);
4311c6fd2807SJeff Garzik 		/* restore sg */
4312c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4313c6fd2807SJeff Garzik 		if (pad_buf)
4314c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4315c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4316c6fd2807SJeff Garzik 	}
4317c6fd2807SJeff Garzik 
4318c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4319c6fd2807SJeff Garzik 	qc->__sg = NULL;
4320c6fd2807SJeff Garzik }
4321c6fd2807SJeff Garzik 
4322c6fd2807SJeff Garzik /**
4323c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4324c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4325c6fd2807SJeff Garzik  *
4326c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4327c6fd2807SJeff Garzik  *	associated with the current disk command.
4328c6fd2807SJeff Garzik  *
4329c6fd2807SJeff Garzik  *	LOCKING:
4330cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4331c6fd2807SJeff Garzik  *
4332c6fd2807SJeff Garzik  */
4333c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4334c6fd2807SJeff Garzik {
4335c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4336c6fd2807SJeff Garzik 	struct scatterlist *sg;
4337c6fd2807SJeff Garzik 	unsigned int idx;
4338c6fd2807SJeff Garzik 
4339c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4340c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4341c6fd2807SJeff Garzik 
4342c6fd2807SJeff Garzik 	idx = 0;
4343c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4344c6fd2807SJeff Garzik 		u32 addr, offset;
4345c6fd2807SJeff Garzik 		u32 sg_len, len;
4346c6fd2807SJeff Garzik 
4347c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4348c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4349c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4350c6fd2807SJeff Garzik 		 */
4351c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4352c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4353c6fd2807SJeff Garzik 
4354c6fd2807SJeff Garzik 		while (sg_len) {
4355c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4356c6fd2807SJeff Garzik 			len = sg_len;
4357c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4358c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4359c6fd2807SJeff Garzik 
4360c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4361c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4362c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4363c6fd2807SJeff Garzik 
4364c6fd2807SJeff Garzik 			idx++;
4365c6fd2807SJeff Garzik 			sg_len -= len;
4366c6fd2807SJeff Garzik 			addr += len;
4367c6fd2807SJeff Garzik 		}
4368c6fd2807SJeff Garzik 	}
4369c6fd2807SJeff Garzik 
4370c6fd2807SJeff Garzik 	if (idx)
4371c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4372c6fd2807SJeff Garzik }
4373b9a4197eSTejun Heo 
4374c6fd2807SJeff Garzik /**
4375d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4376d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4377d26fc955SAlan Cox  *
4378d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4379d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4380d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4381d26fc955SAlan Cox  *	controllers that don't follow the spec.
4382d26fc955SAlan Cox  *
4383d26fc955SAlan Cox  *	LOCKING:
4384d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4385d26fc955SAlan Cox  *
4386d26fc955SAlan Cox  */
4387d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4388d26fc955SAlan Cox {
4389d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4390d26fc955SAlan Cox 	struct scatterlist *sg;
4391d26fc955SAlan Cox 	unsigned int idx;
4392d26fc955SAlan Cox 
4393d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4394d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4395d26fc955SAlan Cox 
4396d26fc955SAlan Cox 	idx = 0;
4397d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4398d26fc955SAlan Cox 		u32 addr, offset;
4399d26fc955SAlan Cox 		u32 sg_len, len, blen;
4400d26fc955SAlan Cox 
4401d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4402d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4403d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4404d26fc955SAlan Cox 		 */
4405d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4406d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4407d26fc955SAlan Cox 
4408d26fc955SAlan Cox 		while (sg_len) {
4409d26fc955SAlan Cox 			offset = addr & 0xffff;
4410d26fc955SAlan Cox 			len = sg_len;
4411d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4412d26fc955SAlan Cox 				len = 0x10000 - offset;
4413d26fc955SAlan Cox 
4414d26fc955SAlan Cox 			blen = len & 0xffff;
4415d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4416d26fc955SAlan Cox 			if (blen == 0) {
4417d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4418d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4419d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4420d26fc955SAlan Cox 				blen = 0x8000;
4421d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4422d26fc955SAlan Cox 			}
4423d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4424d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4425d26fc955SAlan Cox 
4426d26fc955SAlan Cox 			idx++;
4427d26fc955SAlan Cox 			sg_len -= len;
4428d26fc955SAlan Cox 			addr += len;
4429d26fc955SAlan Cox 		}
4430d26fc955SAlan Cox 	}
4431d26fc955SAlan Cox 
4432d26fc955SAlan Cox 	if (idx)
4433d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4434d26fc955SAlan Cox }
4435d26fc955SAlan Cox 
4436d26fc955SAlan Cox /**
4437c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4438c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4439c6fd2807SJeff Garzik  *
4440c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4441c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4442c6fd2807SJeff Garzik  *	supplied PACKET command.
4443c6fd2807SJeff Garzik  *
4444c6fd2807SJeff Garzik  *	LOCKING:
4445cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4446c6fd2807SJeff Garzik  *
4447c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4448c6fd2807SJeff Garzik  *               nonzero otherwise
4449c6fd2807SJeff Garzik  */
4450c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4451c6fd2807SJeff Garzik {
4452c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4453c6fd2807SJeff Garzik 
4454b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4455b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4456b9a4197eSTejun Heo 	 */
4457b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
44586f23a31dSAlbert Lee 		return 1;
44596f23a31dSAlbert Lee 
4460c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4461b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4462c6fd2807SJeff Garzik 
4463b9a4197eSTejun Heo 	return 0;
4464c6fd2807SJeff Garzik }
4465b9a4197eSTejun Heo 
4466c6fd2807SJeff Garzik /**
446731cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
446831cc23b3STejun Heo  *	@qc: ATA command in question
446931cc23b3STejun Heo  *
447031cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
447131cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
447231cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
447331cc23b3STejun Heo  *	whether a new command @qc can be issued.
447431cc23b3STejun Heo  *
447531cc23b3STejun Heo  *	LOCKING:
447631cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
447731cc23b3STejun Heo  *
447831cc23b3STejun Heo  *	RETURNS:
447931cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
448031cc23b3STejun Heo  */
448131cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
448231cc23b3STejun Heo {
448331cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
448431cc23b3STejun Heo 
448531cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
448631cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
448731cc23b3STejun Heo 			return 0;
448831cc23b3STejun Heo 	} else {
448931cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
449031cc23b3STejun Heo 			return 0;
449131cc23b3STejun Heo 	}
449231cc23b3STejun Heo 
449331cc23b3STejun Heo 	return ATA_DEFER_LINK;
449431cc23b3STejun Heo }
449531cc23b3STejun Heo 
449631cc23b3STejun Heo /**
4497c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4498c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4499c6fd2807SJeff Garzik  *
4500c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4501c6fd2807SJeff Garzik  *
4502c6fd2807SJeff Garzik  *	LOCKING:
4503cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4504c6fd2807SJeff Garzik  */
4505c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4506c6fd2807SJeff Garzik {
4507c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4508c6fd2807SJeff Garzik 		return;
4509c6fd2807SJeff Garzik 
4510c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4511c6fd2807SJeff Garzik }
4512c6fd2807SJeff Garzik 
4513d26fc955SAlan Cox /**
4514d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4515d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4516d26fc955SAlan Cox  *
4517d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4518d26fc955SAlan Cox  *
4519d26fc955SAlan Cox  *	LOCKING:
4520d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4521d26fc955SAlan Cox  */
4522d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4523d26fc955SAlan Cox {
4524d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4525d26fc955SAlan Cox 		return;
4526d26fc955SAlan Cox 
4527d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4528d26fc955SAlan Cox }
4529d26fc955SAlan Cox 
4530c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4531c6fd2807SJeff Garzik 
4532c6fd2807SJeff Garzik /**
4533c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4534c6fd2807SJeff Garzik  *	@qc: Command to be associated
4535c6fd2807SJeff Garzik  *	@buf: Memory buffer
4536c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4537c6fd2807SJeff Garzik  *
4538c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4539c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4540c6fd2807SJeff Garzik  *
4541c6fd2807SJeff Garzik  *	LOCKING:
4542cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4543c6fd2807SJeff Garzik  */
4544c6fd2807SJeff Garzik 
4545c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4546c6fd2807SJeff Garzik {
4547c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4548c6fd2807SJeff Garzik 
4549c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4550c6fd2807SJeff Garzik 	qc->n_elem = 1;
4551c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4552c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4553c6fd2807SJeff Garzik 	qc->nbytes = buflen;
455487260216SJens Axboe 	qc->cursg = qc->__sg;
4555c6fd2807SJeff Garzik 
455661c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4557c6fd2807SJeff Garzik }
4558c6fd2807SJeff Garzik 
4559c6fd2807SJeff Garzik /**
4560c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4561c6fd2807SJeff Garzik  *	@qc: Command to be associated
4562c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4563c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4564c6fd2807SJeff Garzik  *
4565c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4566c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4567c6fd2807SJeff Garzik  *	elements.
4568c6fd2807SJeff Garzik  *
4569c6fd2807SJeff Garzik  *	LOCKING:
4570cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4571c6fd2807SJeff Garzik  */
4572c6fd2807SJeff Garzik 
4573c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4574c6fd2807SJeff Garzik 		 unsigned int n_elem)
4575c6fd2807SJeff Garzik {
4576c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4577c6fd2807SJeff Garzik 	qc->__sg = sg;
4578c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4579c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
458087260216SJens Axboe 	qc->cursg = qc->__sg;
4581c6fd2807SJeff Garzik }
4582c6fd2807SJeff Garzik 
4583c6fd2807SJeff Garzik /**
4584c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4585c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4586c6fd2807SJeff Garzik  *
4587c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4588c6fd2807SJeff Garzik  *
4589c6fd2807SJeff Garzik  *	LOCKING:
4590cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4591c6fd2807SJeff Garzik  *
4592c6fd2807SJeff Garzik  *	RETURNS:
4593c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4594c6fd2807SJeff Garzik  */
4595c6fd2807SJeff Garzik 
4596c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4597c6fd2807SJeff Garzik {
4598c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4599c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4600c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4601c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4602c6fd2807SJeff Garzik 	int trim_sg = 0;
4603c6fd2807SJeff Garzik 
4604c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4605c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4606c6fd2807SJeff Garzik 	if (qc->pad_len) {
4607c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4608c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4609c6fd2807SJeff Garzik 
4610c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4611c6fd2807SJeff Garzik 
4612c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4613c6fd2807SJeff Garzik 
4614c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4615c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4616c6fd2807SJeff Garzik 			       qc->pad_len);
4617c6fd2807SJeff Garzik 
4618c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4619c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4620c6fd2807SJeff Garzik 		/* trim sg */
4621c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4622c6fd2807SJeff Garzik 		if (sg->length == 0)
4623c6fd2807SJeff Garzik 			trim_sg = 1;
4624c6fd2807SJeff Garzik 
4625c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4626c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4627c6fd2807SJeff Garzik 	}
4628c6fd2807SJeff Garzik 
4629c6fd2807SJeff Garzik 	if (trim_sg) {
4630c6fd2807SJeff Garzik 		qc->n_elem--;
4631c6fd2807SJeff Garzik 		goto skip_map;
4632c6fd2807SJeff Garzik 	}
4633c6fd2807SJeff Garzik 
4634c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4635c6fd2807SJeff Garzik 				     sg->length, dir);
4636c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4637c6fd2807SJeff Garzik 		/* restore sg */
4638c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4639c6fd2807SJeff Garzik 		return -1;
4640c6fd2807SJeff Garzik 	}
4641c6fd2807SJeff Garzik 
4642c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4643c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4644c6fd2807SJeff Garzik 
4645c6fd2807SJeff Garzik skip_map:
4646c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4647c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4648c6fd2807SJeff Garzik 
4649c6fd2807SJeff Garzik 	return 0;
4650c6fd2807SJeff Garzik }
4651c6fd2807SJeff Garzik 
4652c6fd2807SJeff Garzik /**
4653c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4654c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4655c6fd2807SJeff Garzik  *
4656c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4657c6fd2807SJeff Garzik  *
4658c6fd2807SJeff Garzik  *	LOCKING:
4659cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4660c6fd2807SJeff Garzik  *
4661c6fd2807SJeff Garzik  *	RETURNS:
4662c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4663c6fd2807SJeff Garzik  *
4664c6fd2807SJeff Garzik  */
4665c6fd2807SJeff Garzik 
4666c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4667c6fd2807SJeff Garzik {
4668c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4669c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
467087260216SJens Axboe 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4671c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4672c6fd2807SJeff Garzik 
467344877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4674c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4675c6fd2807SJeff Garzik 
4676c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4677c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4678c6fd2807SJeff Garzik 	if (qc->pad_len) {
4679c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4680c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4681c6fd2807SJeff Garzik 		unsigned int offset;
4682c6fd2807SJeff Garzik 
4683c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4684c6fd2807SJeff Garzik 
4685c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4686c6fd2807SJeff Garzik 
4687c6fd2807SJeff Garzik 		/*
4688c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4689c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4690c6fd2807SJeff Garzik 		 */
4691c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4692acd054a5SAnton Blanchard 		sg_init_table(psg, 1);
4693642f1490SJens Axboe 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4694642f1490SJens Axboe 				qc->pad_len, offset_in_page(offset));
4695c6fd2807SJeff Garzik 
4696c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
469745711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4698c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4699c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4700c6fd2807SJeff Garzik 		}
4701c6fd2807SJeff Garzik 
4702c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4703c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4704c6fd2807SJeff Garzik 		/* trim last sg */
4705c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4706c6fd2807SJeff Garzik 		if (lsg->length == 0)
4707c6fd2807SJeff Garzik 			trim_sg = 1;
4708c6fd2807SJeff Garzik 
4709c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4710c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4711c6fd2807SJeff Garzik 	}
4712c6fd2807SJeff Garzik 
4713c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4714c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4715c6fd2807SJeff Garzik 		pre_n_elem--;
4716c6fd2807SJeff Garzik 
4717c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4718c6fd2807SJeff Garzik 		n_elem = 0;
4719c6fd2807SJeff Garzik 		goto skip_map;
4720c6fd2807SJeff Garzik 	}
4721c6fd2807SJeff Garzik 
4722c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4723c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4724c6fd2807SJeff Garzik 	if (n_elem < 1) {
4725c6fd2807SJeff Garzik 		/* restore last sg */
4726c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4727c6fd2807SJeff Garzik 		return -1;
4728c6fd2807SJeff Garzik 	}
4729c6fd2807SJeff Garzik 
4730c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4731c6fd2807SJeff Garzik 
4732c6fd2807SJeff Garzik skip_map:
4733c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4734c6fd2807SJeff Garzik 
4735c6fd2807SJeff Garzik 	return 0;
4736c6fd2807SJeff Garzik }
4737c6fd2807SJeff Garzik 
4738c6fd2807SJeff Garzik /**
4739c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4740c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4741c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4742c6fd2807SJeff Garzik  *
4743c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4744c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4745c6fd2807SJeff Garzik  *	vice-versa.
4746c6fd2807SJeff Garzik  *
4747c6fd2807SJeff Garzik  *	LOCKING:
4748c6fd2807SJeff Garzik  *	Inherited from caller.
4749c6fd2807SJeff Garzik  */
4750c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4751c6fd2807SJeff Garzik {
4752c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4753c6fd2807SJeff Garzik 	unsigned int i;
4754c6fd2807SJeff Garzik 
4755c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4756c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4757c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4758c6fd2807SJeff Garzik }
4759c6fd2807SJeff Garzik 
4760c6fd2807SJeff Garzik /**
47610d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4762c6fd2807SJeff Garzik  *	@adev: device to target
4763c6fd2807SJeff Garzik  *	@buf: data buffer
4764c6fd2807SJeff Garzik  *	@buflen: buffer length
4765c6fd2807SJeff Garzik  *	@write_data: read/write
4766c6fd2807SJeff Garzik  *
4767c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4768c6fd2807SJeff Garzik  *
4769c6fd2807SJeff Garzik  *	LOCKING:
4770c6fd2807SJeff Garzik  *	Inherited from caller.
4771c6fd2807SJeff Garzik  */
47720d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4773c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4774c6fd2807SJeff Garzik {
47759af5c9c9STejun Heo 	struct ata_port *ap = adev->link->ap;
4776c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4777c6fd2807SJeff Garzik 
4778c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4779c6fd2807SJeff Garzik 	if (write_data)
47800d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4781c6fd2807SJeff Garzik 	else
47820d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4783c6fd2807SJeff Garzik 
4784c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4785c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4786c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4787c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4788c6fd2807SJeff Garzik 
4789c6fd2807SJeff Garzik 		if (write_data) {
4790c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
47910d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4792c6fd2807SJeff Garzik 		} else {
47930d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4794c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4795c6fd2807SJeff Garzik 		}
4796c6fd2807SJeff Garzik 	}
4797c6fd2807SJeff Garzik }
4798c6fd2807SJeff Garzik 
4799c6fd2807SJeff Garzik /**
48000d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4801c6fd2807SJeff Garzik  *	@adev: device to target
4802c6fd2807SJeff Garzik  *	@buf: data buffer
4803c6fd2807SJeff Garzik  *	@buflen: buffer length
4804c6fd2807SJeff Garzik  *	@write_data: read/write
4805c6fd2807SJeff Garzik  *
4806c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4807c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4808c6fd2807SJeff Garzik  *
4809c6fd2807SJeff Garzik  *	LOCKING:
4810c6fd2807SJeff Garzik  *	Inherited from caller.
4811c6fd2807SJeff Garzik  */
48120d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4813c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
4814c6fd2807SJeff Garzik {
4815c6fd2807SJeff Garzik 	unsigned long flags;
4816c6fd2807SJeff Garzik 	local_irq_save(flags);
48170d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
4818c6fd2807SJeff Garzik 	local_irq_restore(flags);
4819c6fd2807SJeff Garzik }
4820c6fd2807SJeff Garzik 
4821c6fd2807SJeff Garzik 
4822c6fd2807SJeff Garzik /**
48235a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
4824c6fd2807SJeff Garzik  *	@qc: Command on going
4825c6fd2807SJeff Garzik  *
48265a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
4827c6fd2807SJeff Garzik  *
4828c6fd2807SJeff Garzik  *	LOCKING:
4829c6fd2807SJeff Garzik  *	Inherited from caller.
4830c6fd2807SJeff Garzik  */
4831c6fd2807SJeff Garzik 
4832c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
4833c6fd2807SJeff Garzik {
4834c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4835c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4836c6fd2807SJeff Garzik 	struct page *page;
4837c6fd2807SJeff Garzik 	unsigned int offset;
4838c6fd2807SJeff Garzik 	unsigned char *buf;
4839c6fd2807SJeff Garzik 
48405a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
4841c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4842c6fd2807SJeff Garzik 
484345711f1aSJens Axboe 	page = sg_page(qc->cursg);
484487260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
4845c6fd2807SJeff Garzik 
4846c6fd2807SJeff Garzik 	/* get the current page and offset */
4847c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
4848c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
4849c6fd2807SJeff Garzik 
4850c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4851c6fd2807SJeff Garzik 
4852c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
4853c6fd2807SJeff Garzik 		unsigned long flags;
4854c6fd2807SJeff Garzik 
4855c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
4856c6fd2807SJeff Garzik 		local_irq_save(flags);
4857c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
4858c6fd2807SJeff Garzik 
4859c6fd2807SJeff Garzik 		/* do the actual data transfer */
48605a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4861c6fd2807SJeff Garzik 
4862c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
4863c6fd2807SJeff Garzik 		local_irq_restore(flags);
4864c6fd2807SJeff Garzik 	} else {
4865c6fd2807SJeff Garzik 		buf = page_address(page);
48665a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4867c6fd2807SJeff Garzik 	}
4868c6fd2807SJeff Garzik 
48695a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
48705a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
4871c6fd2807SJeff Garzik 
487287260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
487387260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
4874c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
4875c6fd2807SJeff Garzik 	}
4876c6fd2807SJeff Garzik }
4877c6fd2807SJeff Garzik 
4878c6fd2807SJeff Garzik /**
48795a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
4880c6fd2807SJeff Garzik  *	@qc: Command on going
4881c6fd2807SJeff Garzik  *
48825a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
4883c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
4884c6fd2807SJeff Garzik  *
4885c6fd2807SJeff Garzik  *	LOCKING:
4886c6fd2807SJeff Garzik  *	Inherited from caller.
4887c6fd2807SJeff Garzik  */
4888c6fd2807SJeff Garzik 
4889c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
4890c6fd2807SJeff Garzik {
4891c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
4892c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
4893c6fd2807SJeff Garzik 		unsigned int nsect;
4894c6fd2807SJeff Garzik 
4895c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
4896c6fd2807SJeff Garzik 
48975a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4898726f0785STejun Heo 			    qc->dev->multi_count);
4899c6fd2807SJeff Garzik 		while (nsect--)
4900c6fd2807SJeff Garzik 			ata_pio_sector(qc);
4901c6fd2807SJeff Garzik 	} else
4902c6fd2807SJeff Garzik 		ata_pio_sector(qc);
49034cc980b3SAlbert Lee 
49044cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
4905c6fd2807SJeff Garzik }
4906c6fd2807SJeff Garzik 
4907c6fd2807SJeff Garzik /**
4908c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
4909c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
4910c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
4911c6fd2807SJeff Garzik  *
4912c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
4913c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
4914c6fd2807SJeff Garzik  *
4915c6fd2807SJeff Garzik  *	LOCKING:
4916c6fd2807SJeff Garzik  *	caller.
4917c6fd2807SJeff Garzik  */
4918c6fd2807SJeff Garzik 
4919c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4920c6fd2807SJeff Garzik {
4921c6fd2807SJeff Garzik 	/* send SCSI cdb */
4922c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
4923c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
4924c6fd2807SJeff Garzik 
4925c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4926c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
4927c6fd2807SJeff Garzik 
4928c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
4929c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
4930c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
4931c6fd2807SJeff Garzik 		break;
4932c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
4933c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4934c6fd2807SJeff Garzik 		break;
4935c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
4936c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4937c6fd2807SJeff Garzik 		/* initiate bmdma */
4938c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
4939c6fd2807SJeff Garzik 		break;
4940c6fd2807SJeff Garzik 	}
4941c6fd2807SJeff Garzik }
4942c6fd2807SJeff Garzik 
4943c6fd2807SJeff Garzik /**
4944c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4945c6fd2807SJeff Garzik  *	@qc: Command on going
4946c6fd2807SJeff Garzik  *	@bytes: number of bytes
4947c6fd2807SJeff Garzik  *
4948c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
4949c6fd2807SJeff Garzik  *
4950c6fd2807SJeff Garzik  *	LOCKING:
4951c6fd2807SJeff Garzik  *	Inherited from caller.
4952c6fd2807SJeff Garzik  *
4953c6fd2807SJeff Garzik  */
4954c6fd2807SJeff Garzik 
4955c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4956c6fd2807SJeff Garzik {
4957c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4958c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
49590874ee76SFUJITA Tomonori 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4960c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4961c6fd2807SJeff Garzik 	struct page *page;
4962c6fd2807SJeff Garzik 	unsigned char *buf;
4963c6fd2807SJeff Garzik 	unsigned int offset, count;
49640874ee76SFUJITA Tomonori 	int no_more_sg = 0;
4965c6fd2807SJeff Garzik 
4966c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
4967c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4968c6fd2807SJeff Garzik 
4969c6fd2807SJeff Garzik next_sg:
49700874ee76SFUJITA Tomonori 	if (unlikely(no_more_sg)) {
4971c6fd2807SJeff Garzik 		/*
4972c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
4973c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
4974c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
4975c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
4976c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
4977c6fd2807SJeff Garzik 		 */
4978c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
4979c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
4980c6fd2807SJeff Garzik 		unsigned int i;
4981c6fd2807SJeff Garzik 
4982c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
4983c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
4984c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
4985c6fd2807SJeff Garzik 
4986c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
4987c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
4988c6fd2807SJeff Garzik 
4989c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
4990c6fd2807SJeff Garzik 		return;
4991c6fd2807SJeff Garzik 	}
4992c6fd2807SJeff Garzik 
499387260216SJens Axboe 	sg = qc->cursg;
4994c6fd2807SJeff Garzik 
499545711f1aSJens Axboe 	page = sg_page(sg);
4996c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
4997c6fd2807SJeff Garzik 
4998c6fd2807SJeff Garzik 	/* get the current page and offset */
4999c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5000c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5001c6fd2807SJeff Garzik 
5002c6fd2807SJeff Garzik 	/* don't overrun current sg */
5003c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5004c6fd2807SJeff Garzik 
5005c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5006c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5009c6fd2807SJeff Garzik 
5010c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5011c6fd2807SJeff Garzik 		unsigned long flags;
5012c6fd2807SJeff Garzik 
5013c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5014c6fd2807SJeff Garzik 		local_irq_save(flags);
5015c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5016c6fd2807SJeff Garzik 
5017c6fd2807SJeff Garzik 		/* do the actual data transfer */
5018c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5019c6fd2807SJeff Garzik 
5020c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5021c6fd2807SJeff Garzik 		local_irq_restore(flags);
5022c6fd2807SJeff Garzik 	} else {
5023c6fd2807SJeff Garzik 		buf = page_address(page);
5024c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5025c6fd2807SJeff Garzik 	}
5026c6fd2807SJeff Garzik 
5027c6fd2807SJeff Garzik 	bytes -= count;
5028c6fd2807SJeff Garzik 	qc->curbytes += count;
5029c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5030c6fd2807SJeff Garzik 
5031c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
50320874ee76SFUJITA Tomonori 		if (qc->cursg == lsg)
50330874ee76SFUJITA Tomonori 			no_more_sg = 1;
50340874ee76SFUJITA Tomonori 
503587260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5036c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5037c6fd2807SJeff Garzik 	}
5038c6fd2807SJeff Garzik 
5039c6fd2807SJeff Garzik 	if (bytes)
5040c6fd2807SJeff Garzik 		goto next_sg;
5041c6fd2807SJeff Garzik }
5042c6fd2807SJeff Garzik 
5043c6fd2807SJeff Garzik /**
5044c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5045c6fd2807SJeff Garzik  *	@qc: Command on going
5046c6fd2807SJeff Garzik  *
5047c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5048c6fd2807SJeff Garzik  *
5049c6fd2807SJeff Garzik  *	LOCKING:
5050c6fd2807SJeff Garzik  *	Inherited from caller.
5051c6fd2807SJeff Garzik  */
5052c6fd2807SJeff Garzik 
5053c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5054c6fd2807SJeff Garzik {
5055c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5056c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5057c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5058c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5059c6fd2807SJeff Garzik 
5060c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5061c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5062c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5063c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5064c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5065c6fd2807SJeff Garzik 	 */
5066c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5067c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5068c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5069c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5070c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5071c6fd2807SJeff Garzik 
5072c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
5073c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
5074c6fd2807SJeff Garzik 		goto err_out;
5075c6fd2807SJeff Garzik 
5076c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5077c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5078c6fd2807SJeff Garzik 	if (do_write != i_write)
5079c6fd2807SJeff Garzik 		goto err_out;
5080c6fd2807SJeff Garzik 
508144877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5082c6fd2807SJeff Garzik 
5083c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
50844cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5085c6fd2807SJeff Garzik 
5086c6fd2807SJeff Garzik 	return;
5087c6fd2807SJeff Garzik 
5088c6fd2807SJeff Garzik err_out:
5089c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5090c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5091c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5092c6fd2807SJeff Garzik }
5093c6fd2807SJeff Garzik 
5094c6fd2807SJeff Garzik /**
5095c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5096c6fd2807SJeff Garzik  *	@ap: the target ata_port
5097c6fd2807SJeff Garzik  *	@qc: qc on going
5098c6fd2807SJeff Garzik  *
5099c6fd2807SJeff Garzik  *	RETURNS:
5100c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5101c6fd2807SJeff Garzik  */
5102c6fd2807SJeff Garzik 
5103c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5104c6fd2807SJeff Garzik {
5105c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5106c6fd2807SJeff Garzik 		return 1;
5107c6fd2807SJeff Garzik 
5108c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5109c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5110c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5111c6fd2807SJeff Garzik 		    return 1;
5112c6fd2807SJeff Garzik 
5113c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
5114c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5115c6fd2807SJeff Garzik 			return 1;
5116c6fd2807SJeff Garzik 	}
5117c6fd2807SJeff Garzik 
5118c6fd2807SJeff Garzik 	return 0;
5119c6fd2807SJeff Garzik }
5120c6fd2807SJeff Garzik 
5121c6fd2807SJeff Garzik /**
5122c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5123c6fd2807SJeff Garzik  *	@qc: Command to complete
5124c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5125c6fd2807SJeff Garzik  *
5126c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5127c6fd2807SJeff Garzik  *
5128c6fd2807SJeff Garzik  *	LOCKING:
5129cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5130c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5131c6fd2807SJeff Garzik  */
5132c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5133c6fd2807SJeff Garzik {
5134c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5135c6fd2807SJeff Garzik 	unsigned long flags;
5136c6fd2807SJeff Garzik 
5137c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5138c6fd2807SJeff Garzik 		if (in_wq) {
5139c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5140c6fd2807SJeff Garzik 
5141cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5142cca3974eSJeff Garzik 			 * released.
5143c6fd2807SJeff Garzik 			 */
5144c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5145c6fd2807SJeff Garzik 			if (qc) {
5146c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
514783625006SAkira Iguchi 					ap->ops->irq_on(ap);
5148c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5149c6fd2807SJeff Garzik 				} else
5150c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5151c6fd2807SJeff Garzik 			}
5152c6fd2807SJeff Garzik 
5153c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5154c6fd2807SJeff Garzik 		} else {
5155c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5156c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5157c6fd2807SJeff Garzik 			else
5158c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5159c6fd2807SJeff Garzik 		}
5160c6fd2807SJeff Garzik 	} else {
5161c6fd2807SJeff Garzik 		if (in_wq) {
5162c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
516383625006SAkira Iguchi 			ap->ops->irq_on(ap);
5164c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5165c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5166c6fd2807SJeff Garzik 		} else
5167c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5168c6fd2807SJeff Garzik 	}
5169c6fd2807SJeff Garzik }
5170c6fd2807SJeff Garzik 
5171c6fd2807SJeff Garzik /**
5172c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5173c6fd2807SJeff Garzik  *	@ap: the target ata_port
5174c6fd2807SJeff Garzik  *	@qc: qc on going
5175c6fd2807SJeff Garzik  *	@status: current device status
5176c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5177c6fd2807SJeff Garzik  *
5178c6fd2807SJeff Garzik  *	RETURNS:
5179c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5180c6fd2807SJeff Garzik  */
5181c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5182c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5183c6fd2807SJeff Garzik {
5184c6fd2807SJeff Garzik 	unsigned long flags = 0;
5185c6fd2807SJeff Garzik 	int poll_next;
5186c6fd2807SJeff Garzik 
5187c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5188c6fd2807SJeff Garzik 
5189c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5190c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5191c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5192c6fd2807SJeff Garzik 	 */
5193c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5194c6fd2807SJeff Garzik 
5195c6fd2807SJeff Garzik fsm_start:
5196c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
519744877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5198c6fd2807SJeff Garzik 
5199c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5200c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5201c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5202c6fd2807SJeff Garzik 
5203c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5204c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5205c6fd2807SJeff Garzik 		 * takes over after sending the data.
5206c6fd2807SJeff Garzik 		 */
5207c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5208c6fd2807SJeff Garzik 
5209c6fd2807SJeff Garzik 		/* check device status */
5210c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5211c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5212c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5213c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5214c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5215c6fd2807SJeff Garzik 			else
5216c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5217c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5218c6fd2807SJeff Garzik 
5219c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5220c6fd2807SJeff Garzik 			goto fsm_start;
5221c6fd2807SJeff Garzik 		}
5222c6fd2807SJeff Garzik 
5223c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5224c6fd2807SJeff Garzik 		 * when it finds something wrong.
5225c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5226c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5227c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5228c6fd2807SJeff Garzik 		 */
5229c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
523044877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
523144877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
5232c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
5233c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5234c6fd2807SJeff Garzik 			goto fsm_start;
5235c6fd2807SJeff Garzik 		}
5236c6fd2807SJeff Garzik 
5237c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5238c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5239c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5240c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5241c6fd2807SJeff Garzik 		 */
5242c6fd2807SJeff Garzik 		if (in_wq)
5243c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5244c6fd2807SJeff Garzik 
5245c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5246c6fd2807SJeff Garzik 			/* PIO data out protocol.
5247c6fd2807SJeff Garzik 			 * send first data block.
5248c6fd2807SJeff Garzik 			 */
5249c6fd2807SJeff Garzik 
5250c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5251c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5252c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5253c6fd2807SJeff Garzik 			 */
5254c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5255c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5256c6fd2807SJeff Garzik 		} else
5257c6fd2807SJeff Garzik 			/* send CDB */
5258c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5259c6fd2807SJeff Garzik 
5260c6fd2807SJeff Garzik 		if (in_wq)
5261c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5262c6fd2807SJeff Garzik 
5263c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5264c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5265c6fd2807SJeff Garzik 		 */
5266c6fd2807SJeff Garzik 		break;
5267c6fd2807SJeff Garzik 
5268c6fd2807SJeff Garzik 	case HSM_ST:
5269c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
5270c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
5271c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5272c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5273c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5274c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5275c6fd2807SJeff Garzik 				 */
5276c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5277c6fd2807SJeff Garzik 				goto fsm_start;
5278c6fd2807SJeff Garzik 			}
5279c6fd2807SJeff Garzik 
5280c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5281c6fd2807SJeff Garzik 			 * when it finds something wrong.
5282c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5283c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5284c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5285c6fd2807SJeff Garzik 			 */
5286c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
528744877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
528844877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
528944877b4eSTejun Heo 						status);
5290c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5291c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5292c6fd2807SJeff Garzik 				goto fsm_start;
5293c6fd2807SJeff Garzik 			}
5294c6fd2807SJeff Garzik 
5295c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5296c6fd2807SJeff Garzik 
5297c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5298c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5299c6fd2807SJeff Garzik 				goto fsm_start;
5300c6fd2807SJeff Garzik 
5301c6fd2807SJeff Garzik 		} else {
5302c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5303c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5304c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5305c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5306c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5307c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5308c6fd2807SJeff Garzik 				else
530955a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
531055a8e2c8STejun Heo 					 * Phantom devices also trigger this
531155a8e2c8STejun Heo 					 * condition.  Mark hint.
531255a8e2c8STejun Heo 					 */
531355a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
531455a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5315c6fd2807SJeff Garzik 
5316c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5317c6fd2807SJeff Garzik 				goto fsm_start;
5318c6fd2807SJeff Garzik 			}
5319c6fd2807SJeff Garzik 
5320c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5321c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5322c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5323c6fd2807SJeff Garzik 			 * block of junk data before changing the
5324c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5325c6fd2807SJeff Garzik 			 *
5326c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5327c6fd2807SJeff Garzik 			 * sense since the data block has been
5328c6fd2807SJeff Garzik 			 * transferred to the device.
5329c6fd2807SJeff Garzik 			 */
5330c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5331c6fd2807SJeff Garzik 				/* data might be corrputed */
5332c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5333c6fd2807SJeff Garzik 
5334c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5335c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5336c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5337c6fd2807SJeff Garzik 				}
5338c6fd2807SJeff Garzik 
5339c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5340c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5341c6fd2807SJeff Garzik 
5342c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5343c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5344c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5345c6fd2807SJeff Garzik 				 */
5346c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5347c6fd2807SJeff Garzik 				goto fsm_start;
5348c6fd2807SJeff Garzik 			}
5349c6fd2807SJeff Garzik 
5350c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5351c6fd2807SJeff Garzik 
5352c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5353c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5354c6fd2807SJeff Garzik 				/* all data read */
5355c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5356c6fd2807SJeff Garzik 				goto fsm_start;
5357c6fd2807SJeff Garzik 			}
5358c6fd2807SJeff Garzik 		}
5359c6fd2807SJeff Garzik 
5360c6fd2807SJeff Garzik 		poll_next = 1;
5361c6fd2807SJeff Garzik 		break;
5362c6fd2807SJeff Garzik 
5363c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5364c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5365c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5366c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5367c6fd2807SJeff Garzik 			goto fsm_start;
5368c6fd2807SJeff Garzik 		}
5369c6fd2807SJeff Garzik 
5370c6fd2807SJeff Garzik 		/* no more data to transfer */
5371c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
537244877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5373c6fd2807SJeff Garzik 
5374c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5375c6fd2807SJeff Garzik 
5376c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5377c6fd2807SJeff Garzik 
5378c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5379c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5380c6fd2807SJeff Garzik 
5381c6fd2807SJeff Garzik 		poll_next = 0;
5382c6fd2807SJeff Garzik 		break;
5383c6fd2807SJeff Garzik 
5384c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5385c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5386c6fd2807SJeff Garzik 		 * know what's wrong and recover
5387c6fd2807SJeff Garzik 		 */
5388c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5389c6fd2807SJeff Garzik 
5390c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5391c6fd2807SJeff Garzik 
5392c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5393c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5394c6fd2807SJeff Garzik 
5395c6fd2807SJeff Garzik 		poll_next = 0;
5396c6fd2807SJeff Garzik 		break;
5397c6fd2807SJeff Garzik 	default:
5398c6fd2807SJeff Garzik 		poll_next = 0;
5399c6fd2807SJeff Garzik 		BUG();
5400c6fd2807SJeff Garzik 	}
5401c6fd2807SJeff Garzik 
5402c6fd2807SJeff Garzik 	return poll_next;
5403c6fd2807SJeff Garzik }
5404c6fd2807SJeff Garzik 
540565f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5406c6fd2807SJeff Garzik {
540765f27f38SDavid Howells 	struct ata_port *ap =
540865f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
540965f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5410c6fd2807SJeff Garzik 	u8 status;
5411c6fd2807SJeff Garzik 	int poll_next;
5412c6fd2807SJeff Garzik 
5413c6fd2807SJeff Garzik fsm_start:
5414c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5415c6fd2807SJeff Garzik 
5416c6fd2807SJeff Garzik 	/*
5417c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5418c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5419c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5420c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5421c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5422c6fd2807SJeff Garzik 	 */
5423c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5424c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5425c6fd2807SJeff Garzik 		msleep(2);
5426c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5427c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5428c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5429c6fd2807SJeff Garzik 			return;
5430c6fd2807SJeff Garzik 		}
5431c6fd2807SJeff Garzik 	}
5432c6fd2807SJeff Garzik 
5433c6fd2807SJeff Garzik 	/* move the HSM */
5434c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5435c6fd2807SJeff Garzik 
5436c6fd2807SJeff Garzik 	/* another command or interrupt handler
5437c6fd2807SJeff Garzik 	 * may be running at this point.
5438c6fd2807SJeff Garzik 	 */
5439c6fd2807SJeff Garzik 	if (poll_next)
5440c6fd2807SJeff Garzik 		goto fsm_start;
5441c6fd2807SJeff Garzik }
5442c6fd2807SJeff Garzik 
5443c6fd2807SJeff Garzik /**
5444c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5445c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5446c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5447c6fd2807SJeff Garzik  *
5448c6fd2807SJeff Garzik  *	LOCKING:
5449c6fd2807SJeff Garzik  *	None.
5450c6fd2807SJeff Garzik  */
5451c6fd2807SJeff Garzik 
5452c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5453c6fd2807SJeff Garzik {
5454c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5455c6fd2807SJeff Garzik 	unsigned int i;
5456c6fd2807SJeff Garzik 
5457c6fd2807SJeff Garzik 	/* no command while frozen */
5458c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5459c6fd2807SJeff Garzik 		return NULL;
5460c6fd2807SJeff Garzik 
5461c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5462c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5463c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5464c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5465c6fd2807SJeff Garzik 			break;
5466c6fd2807SJeff Garzik 		}
5467c6fd2807SJeff Garzik 
5468c6fd2807SJeff Garzik 	if (qc)
5469c6fd2807SJeff Garzik 		qc->tag = i;
5470c6fd2807SJeff Garzik 
5471c6fd2807SJeff Garzik 	return qc;
5472c6fd2807SJeff Garzik }
5473c6fd2807SJeff Garzik 
5474c6fd2807SJeff Garzik /**
5475c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5476c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5477c6fd2807SJeff Garzik  *
5478c6fd2807SJeff Garzik  *	LOCKING:
5479c6fd2807SJeff Garzik  *	None.
5480c6fd2807SJeff Garzik  */
5481c6fd2807SJeff Garzik 
5482c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5483c6fd2807SJeff Garzik {
54849af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5485c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5486c6fd2807SJeff Garzik 
5487c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5488c6fd2807SJeff Garzik 	if (qc) {
5489c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5490c6fd2807SJeff Garzik 		qc->ap = ap;
5491c6fd2807SJeff Garzik 		qc->dev = dev;
5492c6fd2807SJeff Garzik 
5493c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5494c6fd2807SJeff Garzik 	}
5495c6fd2807SJeff Garzik 
5496c6fd2807SJeff Garzik 	return qc;
5497c6fd2807SJeff Garzik }
5498c6fd2807SJeff Garzik 
5499c6fd2807SJeff Garzik /**
5500c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5501c6fd2807SJeff Garzik  *	@qc: Command to complete
5502c6fd2807SJeff Garzik  *
5503c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5504c6fd2807SJeff Garzik  *	in case something prevents using it.
5505c6fd2807SJeff Garzik  *
5506c6fd2807SJeff Garzik  *	LOCKING:
5507cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5508c6fd2807SJeff Garzik  */
5509c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5510c6fd2807SJeff Garzik {
5511c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5512c6fd2807SJeff Garzik 	unsigned int tag;
5513c6fd2807SJeff Garzik 
5514c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5515c6fd2807SJeff Garzik 
5516c6fd2807SJeff Garzik 	qc->flags = 0;
5517c6fd2807SJeff Garzik 	tag = qc->tag;
5518c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5519c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5520c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5521c6fd2807SJeff Garzik 	}
5522c6fd2807SJeff Garzik }
5523c6fd2807SJeff Garzik 
5524c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5525c6fd2807SJeff Garzik {
5526c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
55279af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5528c6fd2807SJeff Garzik 
5529c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5530c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5531c6fd2807SJeff Garzik 
5532c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5533c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5534c6fd2807SJeff Garzik 
5535c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5536da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
55379af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5538da917d69STejun Heo 		if (!link->sactive)
5539da917d69STejun Heo 			ap->nr_active_links--;
5540da917d69STejun Heo 	} else {
55419af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5542da917d69STejun Heo 		ap->nr_active_links--;
5543da917d69STejun Heo 	}
5544da917d69STejun Heo 
5545da917d69STejun Heo 	/* clear exclusive status */
5546da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5547da917d69STejun Heo 		     ap->excl_link == link))
5548da917d69STejun Heo 		ap->excl_link = NULL;
5549c6fd2807SJeff Garzik 
5550c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5551c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5552c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5553c6fd2807SJeff Garzik 	 */
5554c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5555c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5556c6fd2807SJeff Garzik 
5557c6fd2807SJeff Garzik 	/* call completion callback */
5558c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5559c6fd2807SJeff Garzik }
5560c6fd2807SJeff Garzik 
556139599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
556239599a53STejun Heo {
556339599a53STejun Heo 	struct ata_port *ap = qc->ap;
556439599a53STejun Heo 
556539599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
55664742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
556739599a53STejun Heo }
556839599a53STejun Heo 
5569c6fd2807SJeff Garzik /**
5570c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5571c6fd2807SJeff Garzik  *	@qc: Command to complete
5572c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5573c6fd2807SJeff Garzik  *
5574c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5575c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5576c6fd2807SJeff Garzik  *
5577c6fd2807SJeff Garzik  *	LOCKING:
5578cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5579c6fd2807SJeff Garzik  */
5580c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5581c6fd2807SJeff Garzik {
5582c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5583c6fd2807SJeff Garzik 
5584c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5585c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5586c6fd2807SJeff Garzik 	 *
5587c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5588c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5589c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5590c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5591c6fd2807SJeff Garzik 	 *
5592c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5593c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5594c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5595c6fd2807SJeff Garzik 	 * taken care of.
5596c6fd2807SJeff Garzik 	 */
5597c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5598c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5599c6fd2807SJeff Garzik 
5600c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5601c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5602c6fd2807SJeff Garzik 
5603c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5604c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5605c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
560639599a53STejun Heo 				fill_result_tf(qc);
5607c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5608c6fd2807SJeff Garzik 				return;
5609c6fd2807SJeff Garzik 			}
5610c6fd2807SJeff Garzik 		}
5611c6fd2807SJeff Garzik 
5612c6fd2807SJeff Garzik 		/* read result TF if requested */
5613c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
561439599a53STejun Heo 			fill_result_tf(qc);
5615c6fd2807SJeff Garzik 
5616c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5617c6fd2807SJeff Garzik 	} else {
5618c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5619c6fd2807SJeff Garzik 			return;
5620c6fd2807SJeff Garzik 
5621c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5622c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
562339599a53STejun Heo 			fill_result_tf(qc);
5624c6fd2807SJeff Garzik 
5625c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5626c6fd2807SJeff Garzik 	}
5627c6fd2807SJeff Garzik }
5628c6fd2807SJeff Garzik 
5629c6fd2807SJeff Garzik /**
5630c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5631c6fd2807SJeff Garzik  *	@ap: port in question
5632c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5633c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5634c6fd2807SJeff Garzik  *
5635c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5636c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5637c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5638c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5639c6fd2807SJeff Garzik  *
5640c6fd2807SJeff Garzik  *	LOCKING:
5641cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5642c6fd2807SJeff Garzik  *
5643c6fd2807SJeff Garzik  *	RETURNS:
5644c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5645c6fd2807SJeff Garzik  */
5646c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5647c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5648c6fd2807SJeff Garzik {
5649c6fd2807SJeff Garzik 	int nr_done = 0;
5650c6fd2807SJeff Garzik 	u32 done_mask;
5651c6fd2807SJeff Garzik 	int i;
5652c6fd2807SJeff Garzik 
5653c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5654c6fd2807SJeff Garzik 
5655c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5656c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5657c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5658c6fd2807SJeff Garzik 		return -EINVAL;
5659c6fd2807SJeff Garzik 	}
5660c6fd2807SJeff Garzik 
5661c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5662c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5663c6fd2807SJeff Garzik 
5664c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5665c6fd2807SJeff Garzik 			continue;
5666c6fd2807SJeff Garzik 
5667c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5668c6fd2807SJeff Garzik 			if (finish_qc)
5669c6fd2807SJeff Garzik 				finish_qc(qc);
5670c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5671c6fd2807SJeff Garzik 			nr_done++;
5672c6fd2807SJeff Garzik 		}
5673c6fd2807SJeff Garzik 	}
5674c6fd2807SJeff Garzik 
5675c6fd2807SJeff Garzik 	return nr_done;
5676c6fd2807SJeff Garzik }
5677c6fd2807SJeff Garzik 
5678c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5679c6fd2807SJeff Garzik {
5680c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5681c6fd2807SJeff Garzik 
5682c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5683c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5684c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5685c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5686c6fd2807SJeff Garzik 		return 1;
5687c6fd2807SJeff Garzik 
5688c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5689c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5690c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5691c6fd2807SJeff Garzik 			return 1;
5692c6fd2807SJeff Garzik 
5693c6fd2807SJeff Garzik 		/* fall through */
5694c6fd2807SJeff Garzik 
5695c6fd2807SJeff Garzik 	default:
5696c6fd2807SJeff Garzik 		return 0;
5697c6fd2807SJeff Garzik 	}
5698c6fd2807SJeff Garzik 
5699c6fd2807SJeff Garzik 	/* never reached */
5700c6fd2807SJeff Garzik }
5701c6fd2807SJeff Garzik 
5702c6fd2807SJeff Garzik /**
5703c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5704c6fd2807SJeff Garzik  *	@qc: command to issue to device
5705c6fd2807SJeff Garzik  *
5706c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5707c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5708c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5709c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5710c6fd2807SJeff Garzik  *
5711c6fd2807SJeff Garzik  *	LOCKING:
5712cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5713c6fd2807SJeff Garzik  */
5714c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5715c6fd2807SJeff Garzik {
5716c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57179af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5718c6fd2807SJeff Garzik 
5719c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5720c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5721c6fd2807SJeff Garzik 	 * request ATAPI sense.
5722c6fd2807SJeff Garzik 	 */
57239af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5724c6fd2807SJeff Garzik 
5725c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
57269af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5727da917d69STejun Heo 
5728da917d69STejun Heo 		if (!link->sactive)
5729da917d69STejun Heo 			ap->nr_active_links++;
57309af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5731c6fd2807SJeff Garzik 	} else {
57329af5c9c9STejun Heo 		WARN_ON(link->sactive);
5733da917d69STejun Heo 
5734da917d69STejun Heo 		ap->nr_active_links++;
57359af5c9c9STejun Heo 		link->active_tag = qc->tag;
5736c6fd2807SJeff Garzik 	}
5737c6fd2807SJeff Garzik 
5738c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5739c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5740c6fd2807SJeff Garzik 
5741c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5742c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5743c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5744c6fd2807SJeff Garzik 				goto sg_err;
5745c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5746c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5747c6fd2807SJeff Garzik 				goto sg_err;
5748c6fd2807SJeff Garzik 		}
5749c6fd2807SJeff Garzik 	} else {
5750c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5751c6fd2807SJeff Garzik 	}
5752c6fd2807SJeff Garzik 
5753c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5754c6fd2807SJeff Garzik 
5755c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5756c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5757c6fd2807SJeff Garzik 		goto err;
5758c6fd2807SJeff Garzik 	return;
5759c6fd2807SJeff Garzik 
5760c6fd2807SJeff Garzik sg_err:
5761c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5762c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5763c6fd2807SJeff Garzik err:
5764c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5765c6fd2807SJeff Garzik }
5766c6fd2807SJeff Garzik 
5767c6fd2807SJeff Garzik /**
5768c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5769c6fd2807SJeff Garzik  *	@qc: command to issue to device
5770c6fd2807SJeff Garzik  *
5771c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5772c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5773c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
5774c6fd2807SJeff Garzik  *	is slightly different.
5775c6fd2807SJeff Garzik  *
5776c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
5777c6fd2807SJeff Garzik  *
5778c6fd2807SJeff Garzik  *	LOCKING:
5779cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5780c6fd2807SJeff Garzik  *
5781c6fd2807SJeff Garzik  *	RETURNS:
5782c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
5783c6fd2807SJeff Garzik  */
5784c6fd2807SJeff Garzik 
5785c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5786c6fd2807SJeff Garzik {
5787c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5788c6fd2807SJeff Garzik 
5789c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
5790c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
5791c6fd2807SJeff Garzik 	 */
5792c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5793c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
5794c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
5795e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
5796c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
5797c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
5798c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
5799c6fd2807SJeff Garzik 			break;
5800c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
5801c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5802c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
5803c6fd2807SJeff Garzik 				BUG();
5804c6fd2807SJeff Garzik 			break;
5805c6fd2807SJeff Garzik 		default:
5806c6fd2807SJeff Garzik 			break;
5807c6fd2807SJeff Garzik 		}
5808c6fd2807SJeff Garzik 	}
5809c6fd2807SJeff Garzik 
5810c6fd2807SJeff Garzik 	/* select the device */
5811c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
5812c6fd2807SJeff Garzik 
5813c6fd2807SJeff Garzik 	/* start the command */
5814c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5815c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
5816c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5817c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5818c6fd2807SJeff Garzik 
5819c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5820c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5821c6fd2807SJeff Garzik 
5822c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5823c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5824c6fd2807SJeff Garzik 
5825c6fd2807SJeff Garzik 		break;
5826c6fd2807SJeff Garzik 
5827c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5828c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5829c6fd2807SJeff Garzik 
5830c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5831c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5832c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5833c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5834c6fd2807SJeff Garzik 		break;
5835c6fd2807SJeff Garzik 
5836c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5837c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5838c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5839c6fd2807SJeff Garzik 
5840c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5841c6fd2807SJeff Garzik 
5842c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5843c6fd2807SJeff Garzik 			/* PIO data out protocol */
5844c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
5845c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5846c6fd2807SJeff Garzik 
5847c6fd2807SJeff Garzik 			/* always send first data block using
5848c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
5849c6fd2807SJeff Garzik 			 */
5850c6fd2807SJeff Garzik 		} else {
5851c6fd2807SJeff Garzik 			/* PIO data in protocol */
5852c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5853c6fd2807SJeff Garzik 
5854c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
5855c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5856c6fd2807SJeff Garzik 
5857c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
5858c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
5859c6fd2807SJeff Garzik 			 */
5860c6fd2807SJeff Garzik 		}
5861c6fd2807SJeff Garzik 
5862c6fd2807SJeff Garzik 		break;
5863c6fd2807SJeff Garzik 
5864c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5865c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5866c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
5867c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
5868c6fd2807SJeff Garzik 
5869c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
5870c6fd2807SJeff Garzik 
5871c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5872c6fd2807SJeff Garzik 
5873c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5874c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5875c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
5876c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5877c6fd2807SJeff Garzik 		break;
5878c6fd2807SJeff Garzik 
5879c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5880c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5881c6fd2807SJeff Garzik 
5882c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5883c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5884c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
5885c6fd2807SJeff Garzik 
5886c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
5887c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5888c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5889c6fd2807SJeff Garzik 		break;
5890c6fd2807SJeff Garzik 
5891c6fd2807SJeff Garzik 	default:
5892c6fd2807SJeff Garzik 		WARN_ON(1);
5893c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
5894c6fd2807SJeff Garzik 	}
5895c6fd2807SJeff Garzik 
5896c6fd2807SJeff Garzik 	return 0;
5897c6fd2807SJeff Garzik }
5898c6fd2807SJeff Garzik 
5899c6fd2807SJeff Garzik /**
5900c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
5901c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
5902c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
5903c6fd2807SJeff Garzik  *
5904c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
5905c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
5906c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
5907c6fd2807SJeff Garzik  *
5908c6fd2807SJeff Garzik  *	LOCKING:
5909cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5910c6fd2807SJeff Garzik  *
5911c6fd2807SJeff Garzik  *	RETURNS:
5912c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
5913c6fd2807SJeff Garzik  */
5914c6fd2807SJeff Garzik 
5915c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
5916c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
5917c6fd2807SJeff Garzik {
59189af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
5919c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
5920c6fd2807SJeff Garzik 
5921c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
592244877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5923c6fd2807SJeff Garzik 
5924c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
5925c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5926c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5927c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
5928c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
5929c6fd2807SJeff Garzik 		 */
5930c6fd2807SJeff Garzik 
5931c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5932c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
5933c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
5934c6fd2807SJeff Garzik 		 */
5935c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5936c6fd2807SJeff Garzik 			goto idle_irq;
5937c6fd2807SJeff Garzik 		break;
5938c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5939c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
5940c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5941c6fd2807SJeff Garzik 			/* check status of DMA engine */
5942c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
594344877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
594444877b4eSTejun Heo 				ap->print_id, host_stat);
5945c6fd2807SJeff Garzik 
5946c6fd2807SJeff Garzik 			/* if it's not our irq... */
5947c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
5948c6fd2807SJeff Garzik 				goto idle_irq;
5949c6fd2807SJeff Garzik 
5950c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
5951c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
5952c6fd2807SJeff Garzik 
5953c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
5954c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
5955c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
5956c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5957c6fd2807SJeff Garzik 			}
5958c6fd2807SJeff Garzik 		}
5959c6fd2807SJeff Garzik 		break;
5960c6fd2807SJeff Garzik 	case HSM_ST:
5961c6fd2807SJeff Garzik 		break;
5962c6fd2807SJeff Garzik 	default:
5963c6fd2807SJeff Garzik 		goto idle_irq;
5964c6fd2807SJeff Garzik 	}
5965c6fd2807SJeff Garzik 
5966c6fd2807SJeff Garzik 	/* check altstatus */
5967c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
5968c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
5969c6fd2807SJeff Garzik 		goto idle_irq;
5970c6fd2807SJeff Garzik 
5971c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
5972c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
5973c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
5974c6fd2807SJeff Garzik 		goto idle_irq;
5975c6fd2807SJeff Garzik 
5976c6fd2807SJeff Garzik 	/* ack bmdma irq events */
5977c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
5978c6fd2807SJeff Garzik 
5979c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
5980ea54763fSTejun Heo 
5981ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5982ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5983ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5984ea54763fSTejun Heo 
5985c6fd2807SJeff Garzik 	return 1;	/* irq handled */
5986c6fd2807SJeff Garzik 
5987c6fd2807SJeff Garzik idle_irq:
5988c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
5989c6fd2807SJeff Garzik 
5990c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
5991c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
59926d32d30fSJeff Garzik 		ata_chk_status(ap);
59936d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
5994c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5995c6fd2807SJeff Garzik 		return 1;
5996c6fd2807SJeff Garzik 	}
5997c6fd2807SJeff Garzik #endif
5998c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
5999c6fd2807SJeff Garzik }
6000c6fd2807SJeff Garzik 
6001c6fd2807SJeff Garzik /**
6002c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6003c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6004cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6005c6fd2807SJeff Garzik  *
6006c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6007c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6008c6fd2807SJeff Garzik  *
6009c6fd2807SJeff Garzik  *	LOCKING:
6010cca3974eSJeff Garzik  *	Obtains host lock during operation.
6011c6fd2807SJeff Garzik  *
6012c6fd2807SJeff Garzik  *	RETURNS:
6013c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6014c6fd2807SJeff Garzik  */
6015c6fd2807SJeff Garzik 
60167d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6017c6fd2807SJeff Garzik {
6018cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6019c6fd2807SJeff Garzik 	unsigned int i;
6020c6fd2807SJeff Garzik 	unsigned int handled = 0;
6021c6fd2807SJeff Garzik 	unsigned long flags;
6022c6fd2807SJeff Garzik 
6023c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6024cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6025c6fd2807SJeff Garzik 
6026cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6027c6fd2807SJeff Garzik 		struct ata_port *ap;
6028c6fd2807SJeff Garzik 
6029cca3974eSJeff Garzik 		ap = host->ports[i];
6030c6fd2807SJeff Garzik 		if (ap &&
6031c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6032c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6033c6fd2807SJeff Garzik 
60349af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6035c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6036c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6037c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6038c6fd2807SJeff Garzik 		}
6039c6fd2807SJeff Garzik 	}
6040c6fd2807SJeff Garzik 
6041cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6042c6fd2807SJeff Garzik 
6043c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6044c6fd2807SJeff Garzik }
6045c6fd2807SJeff Garzik 
6046c6fd2807SJeff Garzik /**
6047c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6048936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6049c6fd2807SJeff Garzik  *
6050936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6051c6fd2807SJeff Garzik  *
6052c6fd2807SJeff Garzik  *	LOCKING:
6053c6fd2807SJeff Garzik  *	None.
6054c6fd2807SJeff Garzik  *
6055c6fd2807SJeff Garzik  *	RETURNS:
6056c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6057c6fd2807SJeff Garzik  */
6058936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6059c6fd2807SJeff Garzik {
6060936fd732STejun Heo 	struct ata_port *ap = link->ap;
6061936fd732STejun Heo 
6062a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6063c6fd2807SJeff Garzik }
6064c6fd2807SJeff Garzik 
6065c6fd2807SJeff Garzik /**
6066c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6067936fd732STejun Heo  *	@link: ATA link to read SCR for
6068c6fd2807SJeff Garzik  *	@reg: SCR to read
6069c6fd2807SJeff Garzik  *	@val: Place to store read value
6070c6fd2807SJeff Garzik  *
6071936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6072633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6073633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6074c6fd2807SJeff Garzik  *
6075c6fd2807SJeff Garzik  *	LOCKING:
6076633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6077c6fd2807SJeff Garzik  *
6078c6fd2807SJeff Garzik  *	RETURNS:
6079c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6080c6fd2807SJeff Garzik  */
6081936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6082c6fd2807SJeff Garzik {
6083633273a3STejun Heo 	if (ata_is_host_link(link)) {
6084936fd732STejun Heo 		struct ata_port *ap = link->ap;
6085936fd732STejun Heo 
6086936fd732STejun Heo 		if (sata_scr_valid(link))
6087da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6088c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6089c6fd2807SJeff Garzik 	}
6090c6fd2807SJeff Garzik 
6091633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6092633273a3STejun Heo }
6093633273a3STejun Heo 
6094c6fd2807SJeff Garzik /**
6095c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6096936fd732STejun Heo  *	@link: ATA link to write SCR for
6097c6fd2807SJeff Garzik  *	@reg: SCR to write
6098c6fd2807SJeff Garzik  *	@val: value to write
6099c6fd2807SJeff Garzik  *
6100936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6101633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6102633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6103c6fd2807SJeff Garzik  *
6104c6fd2807SJeff Garzik  *	LOCKING:
6105633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6106c6fd2807SJeff Garzik  *
6107c6fd2807SJeff Garzik  *	RETURNS:
6108c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6109c6fd2807SJeff Garzik  */
6110936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6111c6fd2807SJeff Garzik {
6112633273a3STejun Heo 	if (ata_is_host_link(link)) {
6113936fd732STejun Heo 		struct ata_port *ap = link->ap;
6114936fd732STejun Heo 
6115936fd732STejun Heo 		if (sata_scr_valid(link))
6116da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6117c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6118c6fd2807SJeff Garzik 	}
6119c6fd2807SJeff Garzik 
6120633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6121633273a3STejun Heo }
6122633273a3STejun Heo 
6123c6fd2807SJeff Garzik /**
6124c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6125936fd732STejun Heo  *	@link: ATA link to write SCR for
6126c6fd2807SJeff Garzik  *	@reg: SCR to write
6127c6fd2807SJeff Garzik  *	@val: value to write
6128c6fd2807SJeff Garzik  *
6129c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6130c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6131c6fd2807SJeff Garzik  *
6132c6fd2807SJeff Garzik  *	LOCKING:
6133633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6134c6fd2807SJeff Garzik  *
6135c6fd2807SJeff Garzik  *	RETURNS:
6136c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6137c6fd2807SJeff Garzik  */
6138936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6139c6fd2807SJeff Garzik {
6140633273a3STejun Heo 	if (ata_is_host_link(link)) {
6141936fd732STejun Heo 		struct ata_port *ap = link->ap;
6142da3dbb17STejun Heo 		int rc;
6143da3dbb17STejun Heo 
6144936fd732STejun Heo 		if (sata_scr_valid(link)) {
6145da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6146da3dbb17STejun Heo 			if (rc == 0)
6147da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6148da3dbb17STejun Heo 			return rc;
6149c6fd2807SJeff Garzik 		}
6150c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6151c6fd2807SJeff Garzik 	}
6152c6fd2807SJeff Garzik 
6153633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6154633273a3STejun Heo }
6155633273a3STejun Heo 
6156c6fd2807SJeff Garzik /**
6157936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6158936fd732STejun Heo  *	@link: ATA link to test
6159c6fd2807SJeff Garzik  *
6160936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6161936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6162936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6163c6fd2807SJeff Garzik  *
6164c6fd2807SJeff Garzik  *	LOCKING:
6165c6fd2807SJeff Garzik  *	None.
6166c6fd2807SJeff Garzik  *
6167c6fd2807SJeff Garzik  *	RETURNS:
6168c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6169c6fd2807SJeff Garzik  */
6170936fd732STejun Heo int ata_link_online(struct ata_link *link)
6171c6fd2807SJeff Garzik {
6172c6fd2807SJeff Garzik 	u32 sstatus;
6173c6fd2807SJeff Garzik 
6174936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6175936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6176c6fd2807SJeff Garzik 		return 1;
6177c6fd2807SJeff Garzik 	return 0;
6178c6fd2807SJeff Garzik }
6179c6fd2807SJeff Garzik 
6180c6fd2807SJeff Garzik /**
6181936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6182936fd732STejun Heo  *	@link: ATA link to test
6183c6fd2807SJeff Garzik  *
6184936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6185936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6186936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6187c6fd2807SJeff Garzik  *
6188c6fd2807SJeff Garzik  *	LOCKING:
6189c6fd2807SJeff Garzik  *	None.
6190c6fd2807SJeff Garzik  *
6191c6fd2807SJeff Garzik  *	RETURNS:
6192c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6193c6fd2807SJeff Garzik  */
6194936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6195c6fd2807SJeff Garzik {
6196c6fd2807SJeff Garzik 	u32 sstatus;
6197c6fd2807SJeff Garzik 
6198936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6199936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6200c6fd2807SJeff Garzik 		return 1;
6201c6fd2807SJeff Garzik 	return 0;
6202c6fd2807SJeff Garzik }
6203c6fd2807SJeff Garzik 
6204c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6205c6fd2807SJeff Garzik {
6206c6fd2807SJeff Garzik 	unsigned int err_mask;
6207c6fd2807SJeff Garzik 	u8 cmd;
6208c6fd2807SJeff Garzik 
6209c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6210c6fd2807SJeff Garzik 		return 0;
6211c6fd2807SJeff Garzik 
62126fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6213c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6214c6fd2807SJeff Garzik 	else
6215c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6216c6fd2807SJeff Garzik 
62174f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
62184f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
62194f34337bSAlan Cox 	   a further flush command to continue the writeback until it
62204f34337bSAlan Cox 	   does not error */
6221c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6222c6fd2807SJeff Garzik 	if (err_mask) {
6223c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6224c6fd2807SJeff Garzik 		return -EIO;
6225c6fd2807SJeff Garzik 	}
6226c6fd2807SJeff Garzik 
6227c6fd2807SJeff Garzik 	return 0;
6228c6fd2807SJeff Garzik }
6229c6fd2807SJeff Garzik 
62306ffa01d8STejun Heo #ifdef CONFIG_PM
6231cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6232cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6233cca3974eSJeff Garzik 			       int wait)
6234c6fd2807SJeff Garzik {
6235c6fd2807SJeff Garzik 	unsigned long flags;
6236c6fd2807SJeff Garzik 	int i, rc;
6237c6fd2807SJeff Garzik 
6238cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6239cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6240e3667ebfSTejun Heo 		struct ata_link *link;
6241c6fd2807SJeff Garzik 
6242c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6243c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6244c6fd2807SJeff Garzik 		 */
6245c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6246c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6247c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6248c6fd2807SJeff Garzik 		}
6249c6fd2807SJeff Garzik 
6250c6fd2807SJeff Garzik 		/* request PM ops to EH */
6251c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6252c6fd2807SJeff Garzik 
6253c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6254c6fd2807SJeff Garzik 		if (wait) {
6255c6fd2807SJeff Garzik 			rc = 0;
6256c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6257c6fd2807SJeff Garzik 		}
6258c6fd2807SJeff Garzik 
6259c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6260e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6261e3667ebfSTejun Heo 			link->eh_info.action |= action;
6262e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6263e3667ebfSTejun Heo 		}
6264c6fd2807SJeff Garzik 
6265c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6266c6fd2807SJeff Garzik 
6267c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6268c6fd2807SJeff Garzik 
6269c6fd2807SJeff Garzik 		/* wait and check result */
6270c6fd2807SJeff Garzik 		if (wait) {
6271c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6272c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6273c6fd2807SJeff Garzik 			if (rc)
6274c6fd2807SJeff Garzik 				return rc;
6275c6fd2807SJeff Garzik 		}
6276c6fd2807SJeff Garzik 	}
6277c6fd2807SJeff Garzik 
6278c6fd2807SJeff Garzik 	return 0;
6279c6fd2807SJeff Garzik }
6280c6fd2807SJeff Garzik 
6281c6fd2807SJeff Garzik /**
6282cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6283cca3974eSJeff Garzik  *	@host: host to suspend
6284c6fd2807SJeff Garzik  *	@mesg: PM message
6285c6fd2807SJeff Garzik  *
6286cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6287c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6288c6fd2807SJeff Garzik  *	to finish.
6289c6fd2807SJeff Garzik  *
6290c6fd2807SJeff Garzik  *	LOCKING:
6291c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6292c6fd2807SJeff Garzik  *
6293c6fd2807SJeff Garzik  *	RETURNS:
6294c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6295c6fd2807SJeff Garzik  */
6296cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6297c6fd2807SJeff Garzik {
62989666f400STejun Heo 	int rc;
6299c6fd2807SJeff Garzik 
6300cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
63019666f400STejun Heo 	if (rc == 0)
6302cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6303c6fd2807SJeff Garzik 	return rc;
6304c6fd2807SJeff Garzik }
6305c6fd2807SJeff Garzik 
6306c6fd2807SJeff Garzik /**
6307cca3974eSJeff Garzik  *	ata_host_resume - resume host
6308cca3974eSJeff Garzik  *	@host: host to resume
6309c6fd2807SJeff Garzik  *
6310cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6311c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6312c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6313c6fd2807SJeff Garzik  *
6314c6fd2807SJeff Garzik  *	LOCKING:
6315c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6316c6fd2807SJeff Garzik  */
6317cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6318c6fd2807SJeff Garzik {
6319cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6320c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6321cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6322c6fd2807SJeff Garzik }
63236ffa01d8STejun Heo #endif
6324c6fd2807SJeff Garzik 
6325c6fd2807SJeff Garzik /**
6326c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6327c6fd2807SJeff Garzik  *	@ap: Port to initialize
6328c6fd2807SJeff Garzik  *
6329c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6330c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6331c6fd2807SJeff Garzik  *
6332c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6333c6fd2807SJeff Garzik  *
6334c6fd2807SJeff Garzik  *	LOCKING:
6335c6fd2807SJeff Garzik  *	Inherited from caller.
6336c6fd2807SJeff Garzik  */
6337c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6338c6fd2807SJeff Garzik {
6339c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6340c6fd2807SJeff Garzik 	int rc;
6341c6fd2807SJeff Garzik 
6342f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6343f0d36efdSTejun Heo 				      GFP_KERNEL);
6344c6fd2807SJeff Garzik 	if (!ap->prd)
6345c6fd2807SJeff Garzik 		return -ENOMEM;
6346c6fd2807SJeff Garzik 
6347c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6348f0d36efdSTejun Heo 	if (rc)
6349c6fd2807SJeff Garzik 		return rc;
6350c6fd2807SJeff Garzik 
6351f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6352f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6353c6fd2807SJeff Garzik 	return 0;
6354c6fd2807SJeff Garzik }
6355c6fd2807SJeff Garzik 
6356c6fd2807SJeff Garzik /**
6357c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6358c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6359c6fd2807SJeff Garzik  *
6360c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6361c6fd2807SJeff Garzik  *
6362c6fd2807SJeff Garzik  *	LOCKING:
6363c6fd2807SJeff Garzik  *	Inherited from caller.
6364c6fd2807SJeff Garzik  */
6365c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6366c6fd2807SJeff Garzik {
63679af5c9c9STejun Heo 	struct ata_link *link = dev->link;
63689af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6369c6fd2807SJeff Garzik 	unsigned long flags;
6370c6fd2807SJeff Garzik 
6371c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
63729af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
63739af5c9c9STejun Heo 	link->sata_spd = 0;
6374c6fd2807SJeff Garzik 
6375c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6376c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6377cca3974eSJeff Garzik 	 * host lock.
6378c6fd2807SJeff Garzik 	 */
6379c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6380c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
63813dcc323fSTejun Heo 	dev->horkage = 0;
6382c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6383c6fd2807SJeff Garzik 
6384c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6385c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6386c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6387c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6388c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6389c6fd2807SJeff Garzik }
6390c6fd2807SJeff Garzik 
6391c6fd2807SJeff Garzik /**
63924fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
63934fb37a25STejun Heo  *	@ap: ATA port link is attached to
63944fb37a25STejun Heo  *	@link: Link structure to initialize
63958989805dSTejun Heo  *	@pmp: Port multiplier port number
63964fb37a25STejun Heo  *
63974fb37a25STejun Heo  *	Initialize @link.
63984fb37a25STejun Heo  *
63994fb37a25STejun Heo  *	LOCKING:
64004fb37a25STejun Heo  *	Kernel thread context (may sleep)
64014fb37a25STejun Heo  */
6402fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
64034fb37a25STejun Heo {
64044fb37a25STejun Heo 	int i;
64054fb37a25STejun Heo 
64064fb37a25STejun Heo 	/* clear everything except for devices */
64074fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
64084fb37a25STejun Heo 
64094fb37a25STejun Heo 	link->ap = ap;
64108989805dSTejun Heo 	link->pmp = pmp;
64114fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
64124fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
64134fb37a25STejun Heo 
64144fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
64154fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
64164fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
64174fb37a25STejun Heo 
64184fb37a25STejun Heo 		dev->link = link;
64194fb37a25STejun Heo 		dev->devno = dev - link->device;
64204fb37a25STejun Heo 		ata_dev_init(dev);
64214fb37a25STejun Heo 	}
64224fb37a25STejun Heo }
64234fb37a25STejun Heo 
64244fb37a25STejun Heo /**
64254fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
64264fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
64274fb37a25STejun Heo  *
64284fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
64294fb37a25STejun Heo  *	configured value.
64304fb37a25STejun Heo  *
64314fb37a25STejun Heo  *	LOCKING:
64324fb37a25STejun Heo  *	Kernel thread context (may sleep).
64334fb37a25STejun Heo  *
64344fb37a25STejun Heo  *	RETURNS:
64354fb37a25STejun Heo  *	0 on success, -errno on failure.
64364fb37a25STejun Heo  */
6437fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
64384fb37a25STejun Heo {
64394fb37a25STejun Heo 	u32 scontrol, spd;
64404fb37a25STejun Heo 	int rc;
64414fb37a25STejun Heo 
64424fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
64434fb37a25STejun Heo 	if (rc)
64444fb37a25STejun Heo 		return rc;
64454fb37a25STejun Heo 
64464fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
64474fb37a25STejun Heo 	if (spd)
64484fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
64494fb37a25STejun Heo 
64504fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
64514fb37a25STejun Heo 
64524fb37a25STejun Heo 	return 0;
64534fb37a25STejun Heo }
64544fb37a25STejun Heo 
64554fb37a25STejun Heo /**
6456f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6457f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6458c6fd2807SJeff Garzik  *
6459f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6460f3187195STejun Heo  *
6461f3187195STejun Heo  *	RETURNS:
6462f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6463c6fd2807SJeff Garzik  *
6464c6fd2807SJeff Garzik  *	LOCKING:
6465f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6466c6fd2807SJeff Garzik  */
6467f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6468c6fd2807SJeff Garzik {
6469f3187195STejun Heo 	struct ata_port *ap;
6470c6fd2807SJeff Garzik 
6471f3187195STejun Heo 	DPRINTK("ENTER\n");
6472f3187195STejun Heo 
6473f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6474f3187195STejun Heo 	if (!ap)
6475f3187195STejun Heo 		return NULL;
6476f3187195STejun Heo 
6477f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6478cca3974eSJeff Garzik 	ap->lock = &host->lock;
6479c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6480f3187195STejun Heo 	ap->print_id = -1;
6481c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6482cca3974eSJeff Garzik 	ap->host = host;
6483f3187195STejun Heo 	ap->dev = host->dev;
6484c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6485c6fd2807SJeff Garzik 
6486c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6487c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6488c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6489c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6490c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6491c6fd2807SJeff Garzik #else
6492c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6493c6fd2807SJeff Garzik #endif
6494c6fd2807SJeff Garzik 
649565f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
649665f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
649765f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6498c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6499c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
65005ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
65015ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
65025ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6503c6fd2807SJeff Garzik 
6504c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6505c6fd2807SJeff Garzik 
65068989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6507c6fd2807SJeff Garzik 
6508c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6509c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6510c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6511c6fd2807SJeff Garzik #endif
6512c6fd2807SJeff Garzik 	return ap;
6513c6fd2807SJeff Garzik }
6514c6fd2807SJeff Garzik 
6515f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6516f0d36efdSTejun Heo {
6517f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6518f0d36efdSTejun Heo 	int i;
6519f0d36efdSTejun Heo 
6520f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6521f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6522f0d36efdSTejun Heo 
6523ecef7253STejun Heo 		if (!ap)
6524ecef7253STejun Heo 			continue;
6525ecef7253STejun Heo 
6526ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6527f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6528f0d36efdSTejun Heo 	}
6529f0d36efdSTejun Heo 
6530ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6531f0d36efdSTejun Heo 		host->ops->host_stop(host);
65321aa56ccaSTejun Heo 
65331aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
65341aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
65351aa506e4STejun Heo 
65364911487aSTejun Heo 		if (!ap)
65374911487aSTejun Heo 			continue;
65384911487aSTejun Heo 
65394911487aSTejun Heo 		if (ap->scsi_host)
65401aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
65411aa506e4STejun Heo 
6542633273a3STejun Heo 		kfree(ap->pmp_link);
65434911487aSTejun Heo 		kfree(ap);
65441aa506e4STejun Heo 		host->ports[i] = NULL;
65451aa506e4STejun Heo 	}
65461aa506e4STejun Heo 
65471aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6548f0d36efdSTejun Heo }
6549f0d36efdSTejun Heo 
6550c6fd2807SJeff Garzik /**
6551f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6552f3187195STejun Heo  *	@dev: generic device this host is associated with
6553f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6554f3187195STejun Heo  *
6555f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6556f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6557f3187195STejun Heo  *	attaches it using ata_host_register().
6558f3187195STejun Heo  *
6559f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6560f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6561f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6562f3187195STejun Heo  *	ports will be automatically freed on registration.
6563f3187195STejun Heo  *
6564f3187195STejun Heo  *	RETURNS:
6565f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6566f3187195STejun Heo  *
6567f3187195STejun Heo  *	LOCKING:
6568f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6569f3187195STejun Heo  */
6570f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6571f3187195STejun Heo {
6572f3187195STejun Heo 	struct ata_host *host;
6573f3187195STejun Heo 	size_t sz;
6574f3187195STejun Heo 	int i;
6575f3187195STejun Heo 
6576f3187195STejun Heo 	DPRINTK("ENTER\n");
6577f3187195STejun Heo 
6578f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6579f3187195STejun Heo 		return NULL;
6580f3187195STejun Heo 
6581f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6582f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6583f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6584f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6585f3187195STejun Heo 	if (!host)
6586f3187195STejun Heo 		goto err_out;
6587f3187195STejun Heo 
6588f3187195STejun Heo 	devres_add(dev, host);
6589f3187195STejun Heo 	dev_set_drvdata(dev, host);
6590f3187195STejun Heo 
6591f3187195STejun Heo 	spin_lock_init(&host->lock);
6592f3187195STejun Heo 	host->dev = dev;
6593f3187195STejun Heo 	host->n_ports = max_ports;
6594f3187195STejun Heo 
6595f3187195STejun Heo 	/* allocate ports bound to this host */
6596f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6597f3187195STejun Heo 		struct ata_port *ap;
6598f3187195STejun Heo 
6599f3187195STejun Heo 		ap = ata_port_alloc(host);
6600f3187195STejun Heo 		if (!ap)
6601f3187195STejun Heo 			goto err_out;
6602f3187195STejun Heo 
6603f3187195STejun Heo 		ap->port_no = i;
6604f3187195STejun Heo 		host->ports[i] = ap;
6605f3187195STejun Heo 	}
6606f3187195STejun Heo 
6607f3187195STejun Heo 	devres_remove_group(dev, NULL);
6608f3187195STejun Heo 	return host;
6609f3187195STejun Heo 
6610f3187195STejun Heo  err_out:
6611f3187195STejun Heo 	devres_release_group(dev, NULL);
6612f3187195STejun Heo 	return NULL;
6613f3187195STejun Heo }
6614f3187195STejun Heo 
6615f3187195STejun Heo /**
6616f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6617f5cda257STejun Heo  *	@dev: generic device this host is associated with
6618f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6619f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6620f5cda257STejun Heo  *
6621f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6622f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6623f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6624f5cda257STejun Heo  *
6625f5cda257STejun Heo  *	RETURNS:
6626f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6627f5cda257STejun Heo  *
6628f5cda257STejun Heo  *	LOCKING:
6629f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6630f5cda257STejun Heo  */
6631f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6632f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6633f5cda257STejun Heo 				      int n_ports)
6634f5cda257STejun Heo {
6635f5cda257STejun Heo 	const struct ata_port_info *pi;
6636f5cda257STejun Heo 	struct ata_host *host;
6637f5cda257STejun Heo 	int i, j;
6638f5cda257STejun Heo 
6639f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6640f5cda257STejun Heo 	if (!host)
6641f5cda257STejun Heo 		return NULL;
6642f5cda257STejun Heo 
6643f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6644f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6645f5cda257STejun Heo 
6646f5cda257STejun Heo 		if (ppi[j])
6647f5cda257STejun Heo 			pi = ppi[j++];
6648f5cda257STejun Heo 
6649f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6650f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6651f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6652f5cda257STejun Heo 		ap->flags |= pi->flags;
66530c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6654f5cda257STejun Heo 		ap->ops = pi->port_ops;
6655f5cda257STejun Heo 
6656f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6657f5cda257STejun Heo 			host->ops = pi->port_ops;
6658f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6659f5cda257STejun Heo 			host->private_data = pi->private_data;
6660f5cda257STejun Heo 	}
6661f5cda257STejun Heo 
6662f5cda257STejun Heo 	return host;
6663f5cda257STejun Heo }
6664f5cda257STejun Heo 
6665f5cda257STejun Heo /**
6666ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6667ecef7253STejun Heo  *	@host: ATA host to start ports for
6668ecef7253STejun Heo  *
6669ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6670ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6671ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6672f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6673f3187195STejun Heo  *	first non-dummy port ops.
6674ecef7253STejun Heo  *
6675ecef7253STejun Heo  *	LOCKING:
6676ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6677ecef7253STejun Heo  *
6678ecef7253STejun Heo  *	RETURNS:
6679ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6680ecef7253STejun Heo  */
6681ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6682ecef7253STejun Heo {
6683ecef7253STejun Heo 	int i, rc;
6684ecef7253STejun Heo 
6685ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6686ecef7253STejun Heo 		return 0;
6687ecef7253STejun Heo 
6688ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6689ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6690ecef7253STejun Heo 
6691f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6692f3187195STejun Heo 			host->ops = ap->ops;
6693f3187195STejun Heo 
6694ecef7253STejun Heo 		if (ap->ops->port_start) {
6695ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6696ecef7253STejun Heo 			if (rc) {
6697ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
6698ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
6699ecef7253STejun Heo 				goto err_out;
6700ecef7253STejun Heo 			}
6701ecef7253STejun Heo 		}
6702ecef7253STejun Heo 
6703ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6704ecef7253STejun Heo 	}
6705ecef7253STejun Heo 
6706ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6707ecef7253STejun Heo 	return 0;
6708ecef7253STejun Heo 
6709ecef7253STejun Heo  err_out:
6710ecef7253STejun Heo 	while (--i >= 0) {
6711ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6712ecef7253STejun Heo 
6713ecef7253STejun Heo 		if (ap->ops->port_stop)
6714ecef7253STejun Heo 			ap->ops->port_stop(ap);
6715ecef7253STejun Heo 	}
6716ecef7253STejun Heo 	return rc;
6717ecef7253STejun Heo }
6718ecef7253STejun Heo 
6719ecef7253STejun Heo /**
6720cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6721cca3974eSJeff Garzik  *	@host:	host to initialize
6722cca3974eSJeff Garzik  *	@dev:	device host is attached to
6723cca3974eSJeff Garzik  *	@flags:	host flags
6724c6fd2807SJeff Garzik  *	@ops:	port_ops
6725c6fd2807SJeff Garzik  *
6726c6fd2807SJeff Garzik  *	LOCKING:
6727c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6728c6fd2807SJeff Garzik  *
6729c6fd2807SJeff Garzik  */
6730f3187195STejun Heo /* KILLME - the only user left is ipr */
6731cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6732cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6733c6fd2807SJeff Garzik {
6734cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6735cca3974eSJeff Garzik 	host->dev = dev;
6736cca3974eSJeff Garzik 	host->flags = flags;
6737cca3974eSJeff Garzik 	host->ops = ops;
6738c6fd2807SJeff Garzik }
6739c6fd2807SJeff Garzik 
6740c6fd2807SJeff Garzik /**
6741f3187195STejun Heo  *	ata_host_register - register initialized ATA host
6742f3187195STejun Heo  *	@host: ATA host to register
6743f3187195STejun Heo  *	@sht: template for SCSI host
6744c6fd2807SJeff Garzik  *
6745f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
6746f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
6747f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
6748f3187195STejun Heo  *	probe registered devices.
6749c6fd2807SJeff Garzik  *
6750c6fd2807SJeff Garzik  *	LOCKING:
6751f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6752c6fd2807SJeff Garzik  *
6753c6fd2807SJeff Garzik  *	RETURNS:
6754f3187195STejun Heo  *	0 on success, -errno otherwise.
6755c6fd2807SJeff Garzik  */
6756f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6757c6fd2807SJeff Garzik {
6758f3187195STejun Heo 	int i, rc;
6759c6fd2807SJeff Garzik 
6760f3187195STejun Heo 	/* host must have been started */
6761f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
6762f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
6763f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
6764f3187195STejun Heo 		WARN_ON(1);
6765f3187195STejun Heo 		return -EINVAL;
676602f076aaSAlan Cox 	}
6767f0d36efdSTejun Heo 
6768f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
6769f3187195STejun Heo 	 * determine the exact number of ports to allocate at
6770f3187195STejun Heo 	 * allocation time.
6771f3187195STejun Heo 	 */
6772f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
6773f3187195STejun Heo 		kfree(host->ports[i]);
6774f0d36efdSTejun Heo 
6775f3187195STejun Heo 	/* give ports names and add SCSI hosts */
6776f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
6777f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
6778c6fd2807SJeff Garzik 
6779f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
6780ecef7253STejun Heo 	if (rc)
6781f3187195STejun Heo 		return rc;
6782ecef7253STejun Heo 
6783fafbae87STejun Heo 	/* associate with ACPI nodes */
6784fafbae87STejun Heo 	ata_acpi_associate(host);
6785fafbae87STejun Heo 
6786f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
6787cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6788cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6789f3187195STejun Heo 		unsigned long xfer_mask;
6790f3187195STejun Heo 
6791f3187195STejun Heo 		/* set SATA cable type if still unset */
6792f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6793f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
6794c6fd2807SJeff Garzik 
6795c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
67964fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
6797c6fd2807SJeff Garzik 
6798cbcdd875STejun Heo 		/* print per-port info to dmesg */
6799f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6800f3187195STejun Heo 					      ap->udma_mask);
6801f3187195STejun Heo 
6802abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
6803cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
6804cbcdd875STejun Heo 					"%cATA max %s %s\n",
6805a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6806f3187195STejun Heo 					ata_mode_string(xfer_mask),
6807cbcdd875STejun Heo 					ap->link.eh_info.desc);
6808abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
6809abf6e8edSTejun Heo 		} else
6810f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6811c6fd2807SJeff Garzik 	}
6812c6fd2807SJeff Garzik 
6813f3187195STejun Heo 	/* perform each probe synchronously */
6814f3187195STejun Heo 	DPRINTK("probe begin\n");
6815f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6816f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
6817f3187195STejun Heo 		int rc;
6818f3187195STejun Heo 
6819f3187195STejun Heo 		/* probe */
6820c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
68219af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
6822c6fd2807SJeff Garzik 			unsigned long flags;
6823c6fd2807SJeff Garzik 
6824c6fd2807SJeff Garzik 			ata_port_probe(ap);
6825c6fd2807SJeff Garzik 
6826c6fd2807SJeff Garzik 			/* kick EH for boot probing */
6827c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
6828c6fd2807SJeff Garzik 
6829f58229f8STejun Heo 			ehi->probe_mask =
6830f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
6831c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
6832c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6833c6fd2807SJeff Garzik 
6834f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6835c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
6836c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
6837c6fd2807SJeff Garzik 
6838c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
6839c6fd2807SJeff Garzik 
6840c6fd2807SJeff Garzik 			/* wait for EH to finish */
6841c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6842c6fd2807SJeff Garzik 		} else {
684344877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6844c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
684544877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6846c6fd2807SJeff Garzik 
6847c6fd2807SJeff Garzik 			if (rc) {
6848c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
6849c6fd2807SJeff Garzik 				 * Current libata behavior will
6850c6fd2807SJeff Garzik 				 * tear down everything when
6851c6fd2807SJeff Garzik 				 * the module is removed
6852c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
6853c6fd2807SJeff Garzik 				 */
6854c6fd2807SJeff Garzik 			}
6855c6fd2807SJeff Garzik 		}
6856c6fd2807SJeff Garzik 	}
6857c6fd2807SJeff Garzik 
6858c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
6859c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
6860cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6861cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6862c6fd2807SJeff Garzik 
68631ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
6864c6fd2807SJeff Garzik 	}
6865c6fd2807SJeff Garzik 
6866f3187195STejun Heo 	return 0;
6867f3187195STejun Heo }
6868f3187195STejun Heo 
6869f3187195STejun Heo /**
6870f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
6871f5cda257STejun Heo  *	@host: target ATA host
6872f5cda257STejun Heo  *	@irq: IRQ to request
6873f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
6874f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
6875f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
6876f5cda257STejun Heo  *
6877f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
6878f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
6879f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
6880f5cda257STejun Heo  *	arguments and performs the three steps in one go.
6881f5cda257STejun Heo  *
6882f5cda257STejun Heo  *	LOCKING:
6883f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6884f5cda257STejun Heo  *
6885f5cda257STejun Heo  *	RETURNS:
6886f5cda257STejun Heo  *	0 on success, -errno otherwise.
6887f5cda257STejun Heo  */
6888f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
6889f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
6890f5cda257STejun Heo 		      struct scsi_host_template *sht)
6891f5cda257STejun Heo {
6892cbcdd875STejun Heo 	int i, rc;
6893f5cda257STejun Heo 
6894f5cda257STejun Heo 	rc = ata_host_start(host);
6895f5cda257STejun Heo 	if (rc)
6896f5cda257STejun Heo 		return rc;
6897f5cda257STejun Heo 
6898f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6899f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
6900f5cda257STejun Heo 	if (rc)
6901f5cda257STejun Heo 		return rc;
6902f5cda257STejun Heo 
6903cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
6904cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
69054031826bSTejun Heo 
6906f5cda257STejun Heo 	rc = ata_host_register(host, sht);
6907f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
6908f5cda257STejun Heo 	if (rc)
6909f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
6910f5cda257STejun Heo 
6911f5cda257STejun Heo 	return rc;
6912f5cda257STejun Heo }
6913f5cda257STejun Heo 
6914f5cda257STejun Heo /**
6915c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
6916c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
6917c6fd2807SJeff Garzik  *
6918c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
6919c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
6920c6fd2807SJeff Garzik  *	be quiescent on return from this function.
6921c6fd2807SJeff Garzik  *
6922c6fd2807SJeff Garzik  *	LOCKING:
6923c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6924c6fd2807SJeff Garzik  */
6925741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
6926c6fd2807SJeff Garzik {
6927c6fd2807SJeff Garzik 	unsigned long flags;
692841bda9c9STejun Heo 	struct ata_link *link;
6929f58229f8STejun Heo 	struct ata_device *dev;
6930c6fd2807SJeff Garzik 
6931c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
6932c6fd2807SJeff Garzik 		goto skip_eh;
6933c6fd2807SJeff Garzik 
6934c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
6935c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6936c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
6937c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6938c6fd2807SJeff Garzik 
6939c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
6940c6fd2807SJeff Garzik 
6941c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
6942c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
6943c6fd2807SJeff Garzik 	 */
6944c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6945c6fd2807SJeff Garzik 
694641bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
694741bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
6948f58229f8STejun Heo 			ata_dev_disable(dev);
694941bda9c9STejun Heo 	}
6950c6fd2807SJeff Garzik 
6951c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6952c6fd2807SJeff Garzik 
6953c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6954c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
6955c6fd2807SJeff Garzik 	 * target.
6956c6fd2807SJeff Garzik 	 */
6957c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6958c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
6959c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6960c6fd2807SJeff Garzik 
6961c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
696245a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
6963c6fd2807SJeff Garzik 
6964c6fd2807SJeff Garzik  skip_eh:
6965c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
6966cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
6967c6fd2807SJeff Garzik }
6968c6fd2807SJeff Garzik 
6969c6fd2807SJeff Garzik /**
69700529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
69710529c159STejun Heo  *	@host: Host to detach
69720529c159STejun Heo  *
69730529c159STejun Heo  *	Detach all ports of @host.
69740529c159STejun Heo  *
69750529c159STejun Heo  *	LOCKING:
69760529c159STejun Heo  *	Kernel thread context (may sleep).
69770529c159STejun Heo  */
69780529c159STejun Heo void ata_host_detach(struct ata_host *host)
69790529c159STejun Heo {
69800529c159STejun Heo 	int i;
69810529c159STejun Heo 
69820529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
69830529c159STejun Heo 		ata_port_detach(host->ports[i]);
69840529c159STejun Heo }
69850529c159STejun Heo 
6986c6fd2807SJeff Garzik /**
6987c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
6988c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
6989c6fd2807SJeff Garzik  *
6990c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
6991c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6992c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
6993c6fd2807SJeff Garzik  *	relative to cmd_addr.
6994c6fd2807SJeff Garzik  *
6995c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6996c6fd2807SJeff Garzik  */
6997c6fd2807SJeff Garzik 
6998c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
6999c6fd2807SJeff Garzik {
7000c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7001c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7002c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7003c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7004c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7005c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7006c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7007c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7008c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7009c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7010c6fd2807SJeff Garzik }
7011c6fd2807SJeff Garzik 
7012c6fd2807SJeff Garzik 
7013c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7014c6fd2807SJeff Garzik 
7015c6fd2807SJeff Garzik /**
7016c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7017c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7018c6fd2807SJeff Garzik  *
7019b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7020b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7021b878ca5dSTejun Heo  *	release is handled via devres.
7022c6fd2807SJeff Garzik  *
7023c6fd2807SJeff Garzik  *	LOCKING:
7024c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7025c6fd2807SJeff Garzik  */
7026c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7027c6fd2807SJeff Garzik {
70282855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7029cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7030c6fd2807SJeff Garzik 
7031f0d36efdSTejun Heo 	ata_host_detach(host);
7032c6fd2807SJeff Garzik }
7033c6fd2807SJeff Garzik 
7034c6fd2807SJeff Garzik /* move to PCI subsystem */
7035c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7036c6fd2807SJeff Garzik {
7037c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7038c6fd2807SJeff Garzik 
7039c6fd2807SJeff Garzik 	switch (bits->width) {
7040c6fd2807SJeff Garzik 	case 1: {
7041c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7042c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7043c6fd2807SJeff Garzik 		tmp = tmp8;
7044c6fd2807SJeff Garzik 		break;
7045c6fd2807SJeff Garzik 	}
7046c6fd2807SJeff Garzik 	case 2: {
7047c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7048c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7049c6fd2807SJeff Garzik 		tmp = tmp16;
7050c6fd2807SJeff Garzik 		break;
7051c6fd2807SJeff Garzik 	}
7052c6fd2807SJeff Garzik 	case 4: {
7053c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7054c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7055c6fd2807SJeff Garzik 		tmp = tmp32;
7056c6fd2807SJeff Garzik 		break;
7057c6fd2807SJeff Garzik 	}
7058c6fd2807SJeff Garzik 
7059c6fd2807SJeff Garzik 	default:
7060c6fd2807SJeff Garzik 		return -EINVAL;
7061c6fd2807SJeff Garzik 	}
7062c6fd2807SJeff Garzik 
7063c6fd2807SJeff Garzik 	tmp &= bits->mask;
7064c6fd2807SJeff Garzik 
7065c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7066c6fd2807SJeff Garzik }
7067c6fd2807SJeff Garzik 
70686ffa01d8STejun Heo #ifdef CONFIG_PM
7069c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7070c6fd2807SJeff Garzik {
7071c6fd2807SJeff Garzik 	pci_save_state(pdev);
7072c6fd2807SJeff Garzik 	pci_disable_device(pdev);
70734c90d971STejun Heo 
70744c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7075c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7076c6fd2807SJeff Garzik }
7077c6fd2807SJeff Garzik 
7078553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7079c6fd2807SJeff Garzik {
7080553c4aa6STejun Heo 	int rc;
7081553c4aa6STejun Heo 
7082c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7083c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7084553c4aa6STejun Heo 
7085f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7086553c4aa6STejun Heo 	if (rc) {
7087553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7088553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7089553c4aa6STejun Heo 		return rc;
7090553c4aa6STejun Heo 	}
7091553c4aa6STejun Heo 
7092c6fd2807SJeff Garzik 	pci_set_master(pdev);
7093553c4aa6STejun Heo 	return 0;
7094c6fd2807SJeff Garzik }
7095c6fd2807SJeff Garzik 
7096c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7097c6fd2807SJeff Garzik {
7098cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7099c6fd2807SJeff Garzik 	int rc = 0;
7100c6fd2807SJeff Garzik 
7101cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7102c6fd2807SJeff Garzik 	if (rc)
7103c6fd2807SJeff Garzik 		return rc;
7104c6fd2807SJeff Garzik 
7105c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7106c6fd2807SJeff Garzik 
7107c6fd2807SJeff Garzik 	return 0;
7108c6fd2807SJeff Garzik }
7109c6fd2807SJeff Garzik 
7110c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7111c6fd2807SJeff Garzik {
7112cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7113553c4aa6STejun Heo 	int rc;
7114c6fd2807SJeff Garzik 
7115553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7116553c4aa6STejun Heo 	if (rc == 0)
7117cca3974eSJeff Garzik 		ata_host_resume(host);
7118553c4aa6STejun Heo 	return rc;
7119c6fd2807SJeff Garzik }
71206ffa01d8STejun Heo #endif /* CONFIG_PM */
71216ffa01d8STejun Heo 
7122c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7123c6fd2807SJeff Garzik 
7124c6fd2807SJeff Garzik 
7125c6fd2807SJeff Garzik static int __init ata_init(void)
7126c6fd2807SJeff Garzik {
7127c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7128c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7129c6fd2807SJeff Garzik 	if (!ata_wq)
7130c6fd2807SJeff Garzik 		return -ENOMEM;
7131c6fd2807SJeff Garzik 
7132c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7133c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7134c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7135c6fd2807SJeff Garzik 		return -ENOMEM;
7136c6fd2807SJeff Garzik 	}
7137c6fd2807SJeff Garzik 
7138c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7139c6fd2807SJeff Garzik 	return 0;
7140c6fd2807SJeff Garzik }
7141c6fd2807SJeff Garzik 
7142c6fd2807SJeff Garzik static void __exit ata_exit(void)
7143c6fd2807SJeff Garzik {
7144c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7145c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7146c6fd2807SJeff Garzik }
7147c6fd2807SJeff Garzik 
7148a4625085SBrian King subsys_initcall(ata_init);
7149c6fd2807SJeff Garzik module_exit(ata_exit);
7150c6fd2807SJeff Garzik 
7151c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7152c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7153c6fd2807SJeff Garzik 
7154c6fd2807SJeff Garzik int ata_ratelimit(void)
7155c6fd2807SJeff Garzik {
7156c6fd2807SJeff Garzik 	int rc;
7157c6fd2807SJeff Garzik 	unsigned long flags;
7158c6fd2807SJeff Garzik 
7159c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7160c6fd2807SJeff Garzik 
7161c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7162c6fd2807SJeff Garzik 		rc = 1;
7163c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7164c6fd2807SJeff Garzik 	} else
7165c6fd2807SJeff Garzik 		rc = 0;
7166c6fd2807SJeff Garzik 
7167c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7168c6fd2807SJeff Garzik 
7169c6fd2807SJeff Garzik 	return rc;
7170c6fd2807SJeff Garzik }
7171c6fd2807SJeff Garzik 
7172c6fd2807SJeff Garzik /**
7173c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7174c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7175c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7176c6fd2807SJeff Garzik  *	@val: Wait condition
7177c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7178c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7179c6fd2807SJeff Garzik  *
7180c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7181c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7182c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7183c6fd2807SJeff Garzik  *
7184c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7185c6fd2807SJeff Garzik  *
7186c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7187c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7188c6fd2807SJeff Garzik  *
7189c6fd2807SJeff Garzik  *	LOCKING:
7190c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7191c6fd2807SJeff Garzik  *
7192c6fd2807SJeff Garzik  *	RETURNS:
7193c6fd2807SJeff Garzik  *	The final register value.
7194c6fd2807SJeff Garzik  */
7195c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7196c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7197c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7198c6fd2807SJeff Garzik {
7199c6fd2807SJeff Garzik 	unsigned long timeout;
7200c6fd2807SJeff Garzik 	u32 tmp;
7201c6fd2807SJeff Garzik 
7202c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7203c6fd2807SJeff Garzik 
7204c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7205c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7206c6fd2807SJeff Garzik 	 * eat away the timeout.
7207c6fd2807SJeff Garzik 	 */
7208c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7209c6fd2807SJeff Garzik 
7210c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7211c6fd2807SJeff Garzik 		msleep(interval_msec);
7212c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7213c6fd2807SJeff Garzik 	}
7214c6fd2807SJeff Garzik 
7215c6fd2807SJeff Garzik 	return tmp;
7216c6fd2807SJeff Garzik }
7217c6fd2807SJeff Garzik 
7218c6fd2807SJeff Garzik /*
7219c6fd2807SJeff Garzik  * Dummy port_ops
7220c6fd2807SJeff Garzik  */
7221c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7222c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7223c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7224c6fd2807SJeff Garzik 
7225c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7226c6fd2807SJeff Garzik {
7227c6fd2807SJeff Garzik 	return ATA_DRDY;
7228c6fd2807SJeff Garzik }
7229c6fd2807SJeff Garzik 
7230c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7231c6fd2807SJeff Garzik {
7232c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7233c6fd2807SJeff Garzik }
7234c6fd2807SJeff Garzik 
7235c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7236c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7237c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7238c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7239c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7240c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7241c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7242c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7243c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7244c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7245c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7246c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7247c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7248c6fd2807SJeff Garzik };
7249c6fd2807SJeff Garzik 
725021b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
725121b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
725221b0ad4fSTejun Heo };
725321b0ad4fSTejun Heo 
7254c6fd2807SJeff Garzik /*
7255c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7256c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7257c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7258c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7259c6fd2807SJeff Garzik  */
7260c6fd2807SJeff Garzik 
7261c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7262c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7263c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7264c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
726521b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7266c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7267c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7268cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7269f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7270f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7271ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7272f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7273f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
72740529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7275c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7276c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7277c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7278c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7279c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7280c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7281c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7282c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7283c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7284c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
728543727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7286c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7287c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7288c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7289c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7290c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7291c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7292d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7293c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
729404351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
72950d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
72960d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
729731cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7298c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7299d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7300c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7301c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7302c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7303c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7304c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7305c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7306c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7307c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7308c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7309c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7310c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7311c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
731210305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7313c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7314936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7315936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7316c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
7317c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
7318c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7319c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7320c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7321cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7322c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7323c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7324c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7325c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7326c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7327c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7328c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7329c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
7330d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7331c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7332c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7333c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7334c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7335c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7336c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7337c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7338c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7339c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7340c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7341c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7342936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7343936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
73446ffa01d8STejun Heo #ifdef CONFIG_PM
7345cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7346cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
73476ffa01d8STejun Heo #endif /* CONFIG_PM */
7348c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7349c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
735010305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7351c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7352c6fd2807SJeff Garzik 
7353c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7354c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7355c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7356c6fd2807SJeff Garzik 
7357c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7358c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7359d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
73601626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7361d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7362c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7363c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
73646ffa01d8STejun Heo #ifdef CONFIG_PM
7365c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7366c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7367c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7368c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
73696ffa01d8STejun Heo #endif /* CONFIG_PM */
7370c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7371c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7372c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7373c6fd2807SJeff Garzik 
737431f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
73753af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
73763af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
73773af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
73783af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
73793af9a77aSTejun Heo 
7380b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7381b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7382b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7383cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7384cbcdd875STejun Heo #ifdef CONFIG_PCI
7385cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7386cbcdd875STejun Heo #endif /* CONFIG_PCI */
7387c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
7388c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7389dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7390c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7391c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
73927d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7393c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7394c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7395c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7396c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7397c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
739883625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7399a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7400be0d18dfSAlan Cox 
7401be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7402be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7403be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7404be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7405