xref: /openbmc/linux/drivers/ata/libata-core.c (revision 1974e201)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
33c6fd2807SJeff Garzik  */
34c6fd2807SJeff Garzik 
35c6fd2807SJeff Garzik #include <linux/kernel.h>
36c6fd2807SJeff Garzik #include <linux/module.h>
37c6fd2807SJeff Garzik #include <linux/pci.h>
38c6fd2807SJeff Garzik #include <linux/init.h>
39c6fd2807SJeff Garzik #include <linux/list.h>
40c6fd2807SJeff Garzik #include <linux/mm.h>
41c6fd2807SJeff Garzik #include <linux/highmem.h>
42c6fd2807SJeff Garzik #include <linux/spinlock.h>
43c6fd2807SJeff Garzik #include <linux/blkdev.h>
44c6fd2807SJeff Garzik #include <linux/delay.h>
45c6fd2807SJeff Garzik #include <linux/timer.h>
46c6fd2807SJeff Garzik #include <linux/interrupt.h>
47c6fd2807SJeff Garzik #include <linux/completion.h>
48c6fd2807SJeff Garzik #include <linux/suspend.h>
49c6fd2807SJeff Garzik #include <linux/workqueue.h>
50c6fd2807SJeff Garzik #include <linux/jiffies.h>
51c6fd2807SJeff Garzik #include <linux/scatterlist.h>
522dcb407eSJeff Garzik #include <linux/io.h>
53c6fd2807SJeff Garzik #include <scsi/scsi.h>
54c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
55c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
56c6fd2807SJeff Garzik #include <linux/libata.h>
57c6fd2807SJeff Garzik #include <asm/semaphore.h>
58c6fd2807SJeff Garzik #include <asm/byteorder.h>
59c6fd2807SJeff Garzik 
60c6fd2807SJeff Garzik #include "libata.h"
61c6fd2807SJeff Garzik 
62fda0efc5SJeff Garzik 
63c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
64c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
65c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
66c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
67c6fd2807SJeff Garzik 
68c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
69c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
70c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
72218f3d30SJeff Garzik 					u8 enable, u8 feature);
73c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
7475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75c6fd2807SJeff Garzik 
76f3187195STejun Heo unsigned int ata_print_id = 1;
77c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
78c6fd2807SJeff Garzik 
79c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
80c6fd2807SJeff Garzik 
81c6fd2807SJeff Garzik int atapi_enabled = 1;
82c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
83c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84c6fd2807SJeff Garzik 
85c6fd2807SJeff Garzik int atapi_dmadir = 0;
86c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
87c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88c6fd2807SJeff Garzik 
89baf4fdfaSMark Lord int atapi_passthru16 = 1;
90baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
91baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92baf4fdfaSMark Lord 
93c6fd2807SJeff Garzik int libata_fua = 0;
94c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
95c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96c6fd2807SJeff Garzik 
972dcb407eSJeff Garzik static int ata_ignore_hpa;
981e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
991e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1001e999736SAlan Cox 
101b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
103b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104b3a70601SAlan Cox 
105c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
107c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108c6fd2807SJeff Garzik 
1096ebe9d86SJeff Garzik int libata_noacpi = 0;
110d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1116ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11211ef697bSKristen Carlson Accardi 
113c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
114c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
115c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
116c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
117c6fd2807SJeff Garzik 
118c6fd2807SJeff Garzik 
119c6fd2807SJeff Garzik /**
120c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
122c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1239977126cSTejun Heo  *	@is_cmd: This FIS is for command
1249977126cSTejun Heo  *	@fis: Buffer into which data will output
125c6fd2807SJeff Garzik  *
126c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
127c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
128c6fd2807SJeff Garzik  *
129c6fd2807SJeff Garzik  *	LOCKING:
130c6fd2807SJeff Garzik  *	Inherited from caller.
131c6fd2807SJeff Garzik  */
1329977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
133c6fd2807SJeff Garzik {
134c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1359977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1369977126cSTejun Heo 	if (is_cmd)
1379977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1389977126cSTejun Heo 
139c6fd2807SJeff Garzik 	fis[2] = tf->command;
140c6fd2807SJeff Garzik 	fis[3] = tf->feature;
141c6fd2807SJeff Garzik 
142c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
143c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
144c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
145c6fd2807SJeff Garzik 	fis[7] = tf->device;
146c6fd2807SJeff Garzik 
147c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
148c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
149c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
150c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
151c6fd2807SJeff Garzik 
152c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
153c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
154c6fd2807SJeff Garzik 	fis[14] = 0;
155c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
156c6fd2807SJeff Garzik 
157c6fd2807SJeff Garzik 	fis[16] = 0;
158c6fd2807SJeff Garzik 	fis[17] = 0;
159c6fd2807SJeff Garzik 	fis[18] = 0;
160c6fd2807SJeff Garzik 	fis[19] = 0;
161c6fd2807SJeff Garzik }
162c6fd2807SJeff Garzik 
163c6fd2807SJeff Garzik /**
164c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
166c6fd2807SJeff Garzik  *	@tf: Taskfile to output
167c6fd2807SJeff Garzik  *
168c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
169c6fd2807SJeff Garzik  *
170c6fd2807SJeff Garzik  *	LOCKING:
171c6fd2807SJeff Garzik  *	Inherited from caller.
172c6fd2807SJeff Garzik  */
173c6fd2807SJeff Garzik 
174c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
175c6fd2807SJeff Garzik {
176c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
177c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
178c6fd2807SJeff Garzik 
179c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
180c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
181c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
182c6fd2807SJeff Garzik 	tf->device	= fis[7];
183c6fd2807SJeff Garzik 
184c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
185c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
186c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
187c6fd2807SJeff Garzik 
188c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
189c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
190c6fd2807SJeff Garzik }
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
193c6fd2807SJeff Garzik 	/* pio multi */
194c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
195c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
196c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
197c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
198c6fd2807SJeff Garzik 	0,
199c6fd2807SJeff Garzik 	0,
200c6fd2807SJeff Garzik 	0,
201c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
202c6fd2807SJeff Garzik 	/* pio */
203c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
204c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
205c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
206c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	0,
209c6fd2807SJeff Garzik 	0,
210c6fd2807SJeff Garzik 	0,
211c6fd2807SJeff Garzik 	/* dma */
212c6fd2807SJeff Garzik 	ATA_CMD_READ,
213c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
214c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
215c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
216c6fd2807SJeff Garzik 	0,
217c6fd2807SJeff Garzik 	0,
218c6fd2807SJeff Garzik 	0,
219c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
220c6fd2807SJeff Garzik };
221c6fd2807SJeff Garzik 
222c6fd2807SJeff Garzik /**
223c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
224bd056d7eSTejun Heo  *	@tf: command to examine and configure
225bd056d7eSTejun Heo  *	@dev: device tf belongs to
226c6fd2807SJeff Garzik  *
227c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
228c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
229c6fd2807SJeff Garzik  *
230c6fd2807SJeff Garzik  *	LOCKING:
231c6fd2807SJeff Garzik  *	caller.
232c6fd2807SJeff Garzik  */
233bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
234c6fd2807SJeff Garzik {
235c6fd2807SJeff Garzik 	u8 cmd;
236c6fd2807SJeff Garzik 
237c6fd2807SJeff Garzik 	int index, fua, lba48, write;
238c6fd2807SJeff Garzik 
239c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
240c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
242c6fd2807SJeff Garzik 
243c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
244c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
245c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2469af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
247c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
248c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
249c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
250c6fd2807SJeff Garzik 	} else {
251c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
252c6fd2807SJeff Garzik 		index = 16;
253c6fd2807SJeff Garzik 	}
254c6fd2807SJeff Garzik 
255c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
256c6fd2807SJeff Garzik 	if (cmd) {
257c6fd2807SJeff Garzik 		tf->command = cmd;
258c6fd2807SJeff Garzik 		return 0;
259c6fd2807SJeff Garzik 	}
260c6fd2807SJeff Garzik 	return -1;
261c6fd2807SJeff Garzik }
262c6fd2807SJeff Garzik 
263c6fd2807SJeff Garzik /**
26435b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
26535b649feSTejun Heo  *	@tf: ATA taskfile of interest
26635b649feSTejun Heo  *	@dev: ATA device @tf belongs to
26735b649feSTejun Heo  *
26835b649feSTejun Heo  *	LOCKING:
26935b649feSTejun Heo  *	None.
27035b649feSTejun Heo  *
27135b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
27235b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
27335b649feSTejun Heo  *	flags select the address format to use.
27435b649feSTejun Heo  *
27535b649feSTejun Heo  *	RETURNS:
27635b649feSTejun Heo  *	Block address read from @tf.
27735b649feSTejun Heo  */
27835b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
27935b649feSTejun Heo {
28035b649feSTejun Heo 	u64 block = 0;
28135b649feSTejun Heo 
28235b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
28335b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
28435b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
28535b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
28635b649feSTejun Heo 			block |= tf->hob_lbal << 24;
28735b649feSTejun Heo 		} else
28835b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
28935b649feSTejun Heo 
29035b649feSTejun Heo 		block |= tf->lbah << 16;
29135b649feSTejun Heo 		block |= tf->lbam << 8;
29235b649feSTejun Heo 		block |= tf->lbal;
29335b649feSTejun Heo 	} else {
29435b649feSTejun Heo 		u32 cyl, head, sect;
29535b649feSTejun Heo 
29635b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
29735b649feSTejun Heo 		head = tf->device & 0xf;
29835b649feSTejun Heo 		sect = tf->lbal;
29935b649feSTejun Heo 
30035b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
30135b649feSTejun Heo 	}
30235b649feSTejun Heo 
30335b649feSTejun Heo 	return block;
30435b649feSTejun Heo }
30535b649feSTejun Heo 
30635b649feSTejun Heo /**
307bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
308bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
309bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
310bd056d7eSTejun Heo  *	@block: Block address
311bd056d7eSTejun Heo  *	@n_block: Number of blocks
312bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
313bd056d7eSTejun Heo  *	@tag: tag
314bd056d7eSTejun Heo  *
315bd056d7eSTejun Heo  *	LOCKING:
316bd056d7eSTejun Heo  *	None.
317bd056d7eSTejun Heo  *
318bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
319bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
320bd056d7eSTejun Heo  *
321bd056d7eSTejun Heo  *	RETURNS:
322bd056d7eSTejun Heo  *
323bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
324bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
325bd056d7eSTejun Heo  */
326bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
328bd056d7eSTejun Heo 		    unsigned int tag)
329bd056d7eSTejun Heo {
330bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331bd056d7eSTejun Heo 	tf->flags |= tf_flags;
332bd056d7eSTejun Heo 
3336d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
334bd056d7eSTejun Heo 		/* yay, NCQ */
335bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
336bd056d7eSTejun Heo 			return -ERANGE;
337bd056d7eSTejun Heo 
338bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
339bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340bd056d7eSTejun Heo 
341bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
342bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
343bd056d7eSTejun Heo 		else
344bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->nsect = tag << 3;
347bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
348bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
349bd056d7eSTejun Heo 
350bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
351bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
352bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
353bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
354bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
355bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
356bd056d7eSTejun Heo 
357bd056d7eSTejun Heo 		tf->device = 1 << 6;
358bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
359bd056d7eSTejun Heo 			tf->device |= 1 << 7;
360bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
361bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
362bd056d7eSTejun Heo 
363bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
364bd056d7eSTejun Heo 			/* use LBA28 */
365bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
366bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
367bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
368bd056d7eSTejun Heo 				return -ERANGE;
369bd056d7eSTejun Heo 
370bd056d7eSTejun Heo 			/* use LBA48 */
371bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
372bd056d7eSTejun Heo 
373bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
374bd056d7eSTejun Heo 
375bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
376bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
377bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
378bd056d7eSTejun Heo 		} else
379bd056d7eSTejun Heo 			/* request too large even for LBA48 */
380bd056d7eSTejun Heo 			return -ERANGE;
381bd056d7eSTejun Heo 
382bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383bd056d7eSTejun Heo 			return -EINVAL;
384bd056d7eSTejun Heo 
385bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
386bd056d7eSTejun Heo 
387bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
388bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
389bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
390bd056d7eSTejun Heo 
391bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
392bd056d7eSTejun Heo 	} else {
393bd056d7eSTejun Heo 		/* CHS */
394bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
395bd056d7eSTejun Heo 
396bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
397bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
398bd056d7eSTejun Heo 			return -ERANGE;
399bd056d7eSTejun Heo 
400bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401bd056d7eSTejun Heo 			return -EINVAL;
402bd056d7eSTejun Heo 
403bd056d7eSTejun Heo 		/* Convert LBA to CHS */
404bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
405bd056d7eSTejun Heo 		cyl   = track / dev->heads;
406bd056d7eSTejun Heo 		head  = track % dev->heads;
407bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
408bd056d7eSTejun Heo 
409bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
411bd056d7eSTejun Heo 
412bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
413bd056d7eSTejun Heo 		   Cylinder: 0-65535
414bd056d7eSTejun Heo 		   Head: 0-15
415bd056d7eSTejun Heo 		   Sector: 1-255*/
416bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417bd056d7eSTejun Heo 			return -ERANGE;
418bd056d7eSTejun Heo 
419bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420bd056d7eSTejun Heo 		tf->lbal = sect;
421bd056d7eSTejun Heo 		tf->lbam = cyl;
422bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
423bd056d7eSTejun Heo 		tf->device |= head;
424bd056d7eSTejun Heo 	}
425bd056d7eSTejun Heo 
426bd056d7eSTejun Heo 	return 0;
427bd056d7eSTejun Heo }
428bd056d7eSTejun Heo 
429bd056d7eSTejun Heo /**
430c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
432c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
433c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
434c6fd2807SJeff Garzik  *
435c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
437c6fd2807SJeff Garzik  *
438c6fd2807SJeff Garzik  *	LOCKING:
439c6fd2807SJeff Garzik  *	None.
440c6fd2807SJeff Garzik  *
441c6fd2807SJeff Garzik  *	RETURNS:
442c6fd2807SJeff Garzik  *	Packed xfer_mask.
443c6fd2807SJeff Garzik  */
444c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
446c6fd2807SJeff Garzik 				      unsigned int udma_mask)
447c6fd2807SJeff Garzik {
448c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451c6fd2807SJeff Garzik }
452c6fd2807SJeff Garzik 
453c6fd2807SJeff Garzik /**
454c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
456c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
457c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
458c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
459c6fd2807SJeff Garzik  *
460c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
462c6fd2807SJeff Garzik  */
463c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
464c6fd2807SJeff Garzik 				unsigned int *pio_mask,
465c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
466c6fd2807SJeff Garzik 				unsigned int *udma_mask)
467c6fd2807SJeff Garzik {
468c6fd2807SJeff Garzik 	if (pio_mask)
469c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470c6fd2807SJeff Garzik 	if (mwdma_mask)
471c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472c6fd2807SJeff Garzik 	if (udma_mask)
473c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474c6fd2807SJeff Garzik }
475c6fd2807SJeff Garzik 
476c6fd2807SJeff Garzik static const struct ata_xfer_ent {
477c6fd2807SJeff Garzik 	int shift, bits;
478c6fd2807SJeff Garzik 	u8 base;
479c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
480c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483c6fd2807SJeff Garzik 	{ -1, },
484c6fd2807SJeff Garzik };
485c6fd2807SJeff Garzik 
486c6fd2807SJeff Garzik /**
487c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
489c6fd2807SJeff Garzik  *
490c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
491c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
492c6fd2807SJeff Garzik  *
493c6fd2807SJeff Garzik  *	LOCKING:
494c6fd2807SJeff Garzik  *	None.
495c6fd2807SJeff Garzik  *
496c6fd2807SJeff Garzik  *	RETURNS:
497c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
498c6fd2807SJeff Garzik  */
499c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500c6fd2807SJeff Garzik {
501c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
502c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
503c6fd2807SJeff Garzik 
504c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
507c6fd2807SJeff Garzik 	return 0;
508c6fd2807SJeff Garzik }
509c6fd2807SJeff Garzik 
510c6fd2807SJeff Garzik /**
511c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
513c6fd2807SJeff Garzik  *
514c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
515c6fd2807SJeff Garzik  *
516c6fd2807SJeff Garzik  *	LOCKING:
517c6fd2807SJeff Garzik  *	None.
518c6fd2807SJeff Garzik  *
519c6fd2807SJeff Garzik  *	RETURNS:
520c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
521c6fd2807SJeff Garzik  */
522c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523c6fd2807SJeff Garzik {
524c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
525c6fd2807SJeff Garzik 
526c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
529c6fd2807SJeff Garzik 	return 0;
530c6fd2807SJeff Garzik }
531c6fd2807SJeff Garzik 
532c6fd2807SJeff Garzik /**
533c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
535c6fd2807SJeff Garzik  *
536c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
537c6fd2807SJeff Garzik  *
538c6fd2807SJeff Garzik  *	LOCKING:
539c6fd2807SJeff Garzik  *	None.
540c6fd2807SJeff Garzik  *
541c6fd2807SJeff Garzik  *	RETURNS:
542c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
543c6fd2807SJeff Garzik  */
544c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
545c6fd2807SJeff Garzik {
546c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
547c6fd2807SJeff Garzik 
548c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550c6fd2807SJeff Garzik 			return ent->shift;
551c6fd2807SJeff Garzik 	return -1;
552c6fd2807SJeff Garzik }
553c6fd2807SJeff Garzik 
554c6fd2807SJeff Garzik /**
555c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
556c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
557c6fd2807SJeff Garzik  *
558c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
559c6fd2807SJeff Garzik  *	(highest bit in @modemask).
560c6fd2807SJeff Garzik  *
561c6fd2807SJeff Garzik  *	LOCKING:
562c6fd2807SJeff Garzik  *	None.
563c6fd2807SJeff Garzik  *
564c6fd2807SJeff Garzik  *	RETURNS:
565c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
566c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
567c6fd2807SJeff Garzik  */
568c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
569c6fd2807SJeff Garzik {
570c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
571c6fd2807SJeff Garzik 		"PIO0",
572c6fd2807SJeff Garzik 		"PIO1",
573c6fd2807SJeff Garzik 		"PIO2",
574c6fd2807SJeff Garzik 		"PIO3",
575c6fd2807SJeff Garzik 		"PIO4",
576b352e57dSAlan Cox 		"PIO5",
577b352e57dSAlan Cox 		"PIO6",
578c6fd2807SJeff Garzik 		"MWDMA0",
579c6fd2807SJeff Garzik 		"MWDMA1",
580c6fd2807SJeff Garzik 		"MWDMA2",
581b352e57dSAlan Cox 		"MWDMA3",
582b352e57dSAlan Cox 		"MWDMA4",
583c6fd2807SJeff Garzik 		"UDMA/16",
584c6fd2807SJeff Garzik 		"UDMA/25",
585c6fd2807SJeff Garzik 		"UDMA/33",
586c6fd2807SJeff Garzik 		"UDMA/44",
587c6fd2807SJeff Garzik 		"UDMA/66",
588c6fd2807SJeff Garzik 		"UDMA/100",
589c6fd2807SJeff Garzik 		"UDMA/133",
590c6fd2807SJeff Garzik 		"UDMA7",
591c6fd2807SJeff Garzik 	};
592c6fd2807SJeff Garzik 	int highbit;
593c6fd2807SJeff Garzik 
594c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
595c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
597c6fd2807SJeff Garzik 	return "<n/a>";
598c6fd2807SJeff Garzik }
599c6fd2807SJeff Garzik 
600c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
601c6fd2807SJeff Garzik {
602c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
603c6fd2807SJeff Garzik 		"1.5 Gbps",
604c6fd2807SJeff Garzik 		"3.0 Gbps",
605c6fd2807SJeff Garzik 	};
606c6fd2807SJeff Garzik 
607c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608c6fd2807SJeff Garzik 		return "<unknown>";
609c6fd2807SJeff Garzik 	return spd_str[spd - 1];
610c6fd2807SJeff Garzik }
611c6fd2807SJeff Garzik 
612c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
613c6fd2807SJeff Garzik {
61409d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6159af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
616c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6174ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6184ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
619c6fd2807SJeff Garzik 		dev->class++;
620c6fd2807SJeff Garzik 	}
621c6fd2807SJeff Garzik }
622c6fd2807SJeff Garzik 
623ca77329fSKristen Carlson Accardi static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
624ca77329fSKristen Carlson Accardi {
625ca77329fSKristen Carlson Accardi 	struct ata_link *link = dev->link;
626ca77329fSKristen Carlson Accardi 	struct ata_port *ap = link->ap;
627ca77329fSKristen Carlson Accardi 	u32 scontrol;
628ca77329fSKristen Carlson Accardi 	unsigned int err_mask;
629ca77329fSKristen Carlson Accardi 	int rc;
630ca77329fSKristen Carlson Accardi 
631ca77329fSKristen Carlson Accardi 	/*
632ca77329fSKristen Carlson Accardi 	 * disallow DIPM for drivers which haven't set
633ca77329fSKristen Carlson Accardi 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
634ca77329fSKristen Carlson Accardi 	 * phy ready will be set in the interrupt status on
635ca77329fSKristen Carlson Accardi 	 * state changes, which will cause some drivers to
636ca77329fSKristen Carlson Accardi 	 * think there are errors - additionally drivers will
637ca77329fSKristen Carlson Accardi 	 * need to disable hot plug.
638ca77329fSKristen Carlson Accardi 	 */
639ca77329fSKristen Carlson Accardi 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
640ca77329fSKristen Carlson Accardi 		ap->pm_policy = NOT_AVAILABLE;
641ca77329fSKristen Carlson Accardi 		return -EINVAL;
642ca77329fSKristen Carlson Accardi 	}
643ca77329fSKristen Carlson Accardi 
644ca77329fSKristen Carlson Accardi 	/*
645ca77329fSKristen Carlson Accardi 	 * For DIPM, we will only enable it for the
646ca77329fSKristen Carlson Accardi 	 * min_power setting.
647ca77329fSKristen Carlson Accardi 	 *
648ca77329fSKristen Carlson Accardi 	 * Why?  Because Disks are too stupid to know that
649ca77329fSKristen Carlson Accardi 	 * If the host rejects a request to go to SLUMBER
650ca77329fSKristen Carlson Accardi 	 * they should retry at PARTIAL, and instead it
651ca77329fSKristen Carlson Accardi 	 * just would give up.  So, for medium_power to
652ca77329fSKristen Carlson Accardi 	 * work at all, we need to only allow HIPM.
653ca77329fSKristen Carlson Accardi 	 */
654ca77329fSKristen Carlson Accardi 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
655ca77329fSKristen Carlson Accardi 	if (rc)
656ca77329fSKristen Carlson Accardi 		return rc;
657ca77329fSKristen Carlson Accardi 
658ca77329fSKristen Carlson Accardi 	switch (policy) {
659ca77329fSKristen Carlson Accardi 	case MIN_POWER:
660ca77329fSKristen Carlson Accardi 		/* no restrictions on IPM transitions */
661ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x3 << 8);
662ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
663ca77329fSKristen Carlson Accardi 		if (rc)
664ca77329fSKristen Carlson Accardi 			return rc;
665ca77329fSKristen Carlson Accardi 
666ca77329fSKristen Carlson Accardi 		/* enable DIPM */
667ca77329fSKristen Carlson Accardi 		if (dev->flags & ATA_DFLAG_DIPM)
668ca77329fSKristen Carlson Accardi 			err_mask = ata_dev_set_feature(dev,
669ca77329fSKristen Carlson Accardi 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
670ca77329fSKristen Carlson Accardi 		break;
671ca77329fSKristen Carlson Accardi 	case MEDIUM_POWER:
672ca77329fSKristen Carlson Accardi 		/* allow IPM to PARTIAL */
673ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x1 << 8);
674ca77329fSKristen Carlson Accardi 		scontrol |= (0x2 << 8);
675ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676ca77329fSKristen Carlson Accardi 		if (rc)
677ca77329fSKristen Carlson Accardi 			return rc;
678ca77329fSKristen Carlson Accardi 
679f5456b63SKristen Carlson Accardi 		/*
680f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
681f5456b63SKristen Carlson Accardi 		 * disallow transitions to SLUMBER, which effectively
682f5456b63SKristen Carlson Accardi 		 * disable DIPM if it does not support PARTIAL
683f5456b63SKristen Carlson Accardi 		 */
684ca77329fSKristen Carlson Accardi 		break;
685ca77329fSKristen Carlson Accardi 	case NOT_AVAILABLE:
686ca77329fSKristen Carlson Accardi 	case MAX_PERFORMANCE:
687ca77329fSKristen Carlson Accardi 		/* disable all IPM transitions */
688ca77329fSKristen Carlson Accardi 		scontrol |= (0x3 << 8);
689ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
690ca77329fSKristen Carlson Accardi 		if (rc)
691ca77329fSKristen Carlson Accardi 			return rc;
692ca77329fSKristen Carlson Accardi 
693f5456b63SKristen Carlson Accardi 		/*
694f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
695f5456b63SKristen Carlson Accardi 		 * disallow all transitions which effectively
696f5456b63SKristen Carlson Accardi 		 * disable DIPM anyway.
697f5456b63SKristen Carlson Accardi 		 */
698ca77329fSKristen Carlson Accardi 		break;
699ca77329fSKristen Carlson Accardi 	}
700ca77329fSKristen Carlson Accardi 
701ca77329fSKristen Carlson Accardi 	/* FIXME: handle SET FEATURES failure */
702ca77329fSKristen Carlson Accardi 	(void) err_mask;
703ca77329fSKristen Carlson Accardi 
704ca77329fSKristen Carlson Accardi 	return 0;
705ca77329fSKristen Carlson Accardi }
706ca77329fSKristen Carlson Accardi 
707ca77329fSKristen Carlson Accardi /**
708ca77329fSKristen Carlson Accardi  *	ata_dev_enable_pm - enable SATA interface power management
70948166fd9SStephen Hemminger  *	@dev:  device to enable power management
71048166fd9SStephen Hemminger  *	@policy: the link power management policy
711ca77329fSKristen Carlson Accardi  *
712ca77329fSKristen Carlson Accardi  *	Enable SATA Interface power management.  This will enable
713ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) for min_power
714ca77329fSKristen Carlson Accardi  * 	policy, and then call driver specific callbacks for
715ca77329fSKristen Carlson Accardi  *	enabling Host Initiated Power management.
716ca77329fSKristen Carlson Accardi  *
717ca77329fSKristen Carlson Accardi  *	Locking: Caller.
718ca77329fSKristen Carlson Accardi  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
719ca77329fSKristen Carlson Accardi  */
720ca77329fSKristen Carlson Accardi void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
721ca77329fSKristen Carlson Accardi {
722ca77329fSKristen Carlson Accardi 	int rc = 0;
723ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
724ca77329fSKristen Carlson Accardi 
725ca77329fSKristen Carlson Accardi 	/* set HIPM first, then DIPM */
726ca77329fSKristen Carlson Accardi 	if (ap->ops->enable_pm)
727ca77329fSKristen Carlson Accardi 		rc = ap->ops->enable_pm(ap, policy);
728ca77329fSKristen Carlson Accardi 	if (rc)
729ca77329fSKristen Carlson Accardi 		goto enable_pm_out;
730ca77329fSKristen Carlson Accardi 	rc = ata_dev_set_dipm(dev, policy);
731ca77329fSKristen Carlson Accardi 
732ca77329fSKristen Carlson Accardi enable_pm_out:
733ca77329fSKristen Carlson Accardi 	if (rc)
734ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
735ca77329fSKristen Carlson Accardi 	else
736ca77329fSKristen Carlson Accardi 		ap->pm_policy = policy;
737ca77329fSKristen Carlson Accardi 	return /* rc */;	/* hopefully we can use 'rc' eventually */
738ca77329fSKristen Carlson Accardi }
739ca77329fSKristen Carlson Accardi 
7401992a5edSStephen Rothwell #ifdef CONFIG_PM
741ca77329fSKristen Carlson Accardi /**
742ca77329fSKristen Carlson Accardi  *	ata_dev_disable_pm - disable SATA interface power management
74348166fd9SStephen Hemminger  *	@dev: device to disable power management
744ca77329fSKristen Carlson Accardi  *
745ca77329fSKristen Carlson Accardi  *	Disable SATA Interface power management.  This will disable
746ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) without changing
747ca77329fSKristen Carlson Accardi  * 	policy,  call driver specific callbacks for disabling Host
748ca77329fSKristen Carlson Accardi  * 	Initiated Power management.
749ca77329fSKristen Carlson Accardi  *
750ca77329fSKristen Carlson Accardi  *	Locking: Caller.
751ca77329fSKristen Carlson Accardi  *	Returns: void
752ca77329fSKristen Carlson Accardi  */
753ca77329fSKristen Carlson Accardi static void ata_dev_disable_pm(struct ata_device *dev)
754ca77329fSKristen Carlson Accardi {
755ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
756ca77329fSKristen Carlson Accardi 
757ca77329fSKristen Carlson Accardi 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
758ca77329fSKristen Carlson Accardi 	if (ap->ops->disable_pm)
759ca77329fSKristen Carlson Accardi 		ap->ops->disable_pm(ap);
760ca77329fSKristen Carlson Accardi }
7611992a5edSStephen Rothwell #endif	/* CONFIG_PM */
762ca77329fSKristen Carlson Accardi 
763ca77329fSKristen Carlson Accardi void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
764ca77329fSKristen Carlson Accardi {
765ca77329fSKristen Carlson Accardi 	ap->pm_policy = policy;
766ca77329fSKristen Carlson Accardi 	ap->link.eh_info.action |= ATA_EHI_LPM;
767ca77329fSKristen Carlson Accardi 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
768ca77329fSKristen Carlson Accardi 	ata_port_schedule_eh(ap);
769ca77329fSKristen Carlson Accardi }
770ca77329fSKristen Carlson Accardi 
7711992a5edSStephen Rothwell #ifdef CONFIG_PM
772ca77329fSKristen Carlson Accardi static void ata_lpm_enable(struct ata_host *host)
773ca77329fSKristen Carlson Accardi {
774ca77329fSKristen Carlson Accardi 	struct ata_link *link;
775ca77329fSKristen Carlson Accardi 	struct ata_port *ap;
776ca77329fSKristen Carlson Accardi 	struct ata_device *dev;
777ca77329fSKristen Carlson Accardi 	int i;
778ca77329fSKristen Carlson Accardi 
779ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
780ca77329fSKristen Carlson Accardi 		ap = host->ports[i];
781ca77329fSKristen Carlson Accardi 		ata_port_for_each_link(link, ap) {
782ca77329fSKristen Carlson Accardi 			ata_link_for_each_dev(dev, link)
783ca77329fSKristen Carlson Accardi 				ata_dev_disable_pm(dev);
784ca77329fSKristen Carlson Accardi 		}
785ca77329fSKristen Carlson Accardi 	}
786ca77329fSKristen Carlson Accardi }
787ca77329fSKristen Carlson Accardi 
788ca77329fSKristen Carlson Accardi static void ata_lpm_disable(struct ata_host *host)
789ca77329fSKristen Carlson Accardi {
790ca77329fSKristen Carlson Accardi 	int i;
791ca77329fSKristen Carlson Accardi 
792ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
793ca77329fSKristen Carlson Accardi 		struct ata_port *ap = host->ports[i];
794ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
795ca77329fSKristen Carlson Accardi 	}
796ca77329fSKristen Carlson Accardi }
7971992a5edSStephen Rothwell #endif	/* CONFIG_PM */
798ca77329fSKristen Carlson Accardi 
799ca77329fSKristen Carlson Accardi 
800c6fd2807SJeff Garzik /**
801c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
802c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
803c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
804c6fd2807SJeff Garzik  *
8050d5ff566STejun Heo  *	This technique was originally described in
8060d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
8070d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
8080d5ff566STejun Heo  *
8090d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
8100d5ff566STejun Heo  *	and if a device is present, it will respond by
8110d5ff566STejun Heo  *	correctly storing and echoing back the
8120d5ff566STejun Heo  *	ATA shadow register contents.
813c6fd2807SJeff Garzik  *
814c6fd2807SJeff Garzik  *	LOCKING:
815c6fd2807SJeff Garzik  *	caller.
816c6fd2807SJeff Garzik  */
817c6fd2807SJeff Garzik 
8180d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
819c6fd2807SJeff Garzik {
8200d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
8210d5ff566STejun Heo 	u8 nsect, lbal;
8220d5ff566STejun Heo 
8230d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
8240d5ff566STejun Heo 
8250d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8260d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8270d5ff566STejun Heo 
8280d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
8290d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
8300d5ff566STejun Heo 
8310d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8320d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8330d5ff566STejun Heo 
8340d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
8350d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
8360d5ff566STejun Heo 
8370d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
8380d5ff566STejun Heo 		return 1;	/* we found a device */
8390d5ff566STejun Heo 
8400d5ff566STejun Heo 	return 0;		/* nothing found */
841c6fd2807SJeff Garzik }
842c6fd2807SJeff Garzik 
843c6fd2807SJeff Garzik /**
844c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
845c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
846c6fd2807SJeff Garzik  *
847c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
848c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
849c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
850c6fd2807SJeff Garzik  *
851c6fd2807SJeff Garzik  *	LOCKING:
852c6fd2807SJeff Garzik  *	None.
853c6fd2807SJeff Garzik  *
854c6fd2807SJeff Garzik  *	RETURNS:
855633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
856633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
857c6fd2807SJeff Garzik  */
858c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
859c6fd2807SJeff Garzik {
860c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
861c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
862c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
863633273a3STejun Heo 	 *
864633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
865633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
866633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
867633273a3STejun Heo 	 * spec has never mentioned about using different signatures
868633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
869633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
870633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
871633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
872633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
873633273a3STejun Heo 	 * SerialATA.
874633273a3STejun Heo 	 *
875633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
876633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
877c6fd2807SJeff Garzik 	 */
878633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
879c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
880c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
881c6fd2807SJeff Garzik 	}
882c6fd2807SJeff Garzik 
883633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
884c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
885c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
886c6fd2807SJeff Garzik 	}
887c6fd2807SJeff Garzik 
888633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
889633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
890633273a3STejun Heo 		return ATA_DEV_PMP;
891633273a3STejun Heo 	}
892633273a3STejun Heo 
893633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
8942dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
895633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
896633273a3STejun Heo 	}
897633273a3STejun Heo 
898c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
899c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
900c6fd2807SJeff Garzik }
901c6fd2807SJeff Garzik 
902c6fd2807SJeff Garzik /**
903c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
9043f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
9053f19859eSTejun Heo  *	@present: device seems present
906c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
907c6fd2807SJeff Garzik  *
908c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
909c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
910c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
911c6fd2807SJeff Garzik  *	and diagnostics.
912c6fd2807SJeff Garzik  *
913c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
914c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
915c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
916c6fd2807SJeff Garzik  *
917c6fd2807SJeff Garzik  *	LOCKING:
918c6fd2807SJeff Garzik  *	caller.
919c6fd2807SJeff Garzik  *
920c6fd2807SJeff Garzik  *	RETURNS:
921c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
922c6fd2807SJeff Garzik  */
9233f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
9243f19859eSTejun Heo 				  u8 *r_err)
925c6fd2807SJeff Garzik {
9263f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
927c6fd2807SJeff Garzik 	struct ata_taskfile tf;
928c6fd2807SJeff Garzik 	unsigned int class;
929c6fd2807SJeff Garzik 	u8 err;
930c6fd2807SJeff Garzik 
9313f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
932c6fd2807SJeff Garzik 
933c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
934c6fd2807SJeff Garzik 
935c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
936c6fd2807SJeff Garzik 	err = tf.feature;
937c6fd2807SJeff Garzik 	if (r_err)
938c6fd2807SJeff Garzik 		*r_err = err;
939c6fd2807SJeff Garzik 
94093590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
9413f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
94293590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
9433f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
94493590859SAlan Cox 	else if (err == 1)
945c6fd2807SJeff Garzik 		/* do nothing */ ;
9463f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
947c6fd2807SJeff Garzik 		/* do nothing */ ;
948c6fd2807SJeff Garzik 	else
949c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
950c6fd2807SJeff Garzik 
951c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
952c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
953c6fd2807SJeff Garzik 
954d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
955d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
956d7fbee05STejun Heo 		 * have reported incorrect device signature too.
957d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
958d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
959d7fbee05STejun Heo 		 * failure.
960d7fbee05STejun Heo 		 */
961d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
962d7fbee05STejun Heo 			class = ATA_DEV_ATA;
963d7fbee05STejun Heo 		else
964d7fbee05STejun Heo 			class = ATA_DEV_NONE;
965d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
966d7fbee05STejun Heo 		class = ATA_DEV_NONE;
967d7fbee05STejun Heo 
968c6fd2807SJeff Garzik 	return class;
969c6fd2807SJeff Garzik }
970c6fd2807SJeff Garzik 
971c6fd2807SJeff Garzik /**
972c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
973c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
974c6fd2807SJeff Garzik  *	@s: string into which data is output
975c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
976c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
977c6fd2807SJeff Garzik  *
978c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
979c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
980c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
981c6fd2807SJeff Garzik  *
982c6fd2807SJeff Garzik  *	LOCKING:
983c6fd2807SJeff Garzik  *	caller.
984c6fd2807SJeff Garzik  */
985c6fd2807SJeff Garzik 
986c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
987c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
988c6fd2807SJeff Garzik {
989c6fd2807SJeff Garzik 	unsigned int c;
990c6fd2807SJeff Garzik 
991c6fd2807SJeff Garzik 	while (len > 0) {
992c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
993c6fd2807SJeff Garzik 		*s = c;
994c6fd2807SJeff Garzik 		s++;
995c6fd2807SJeff Garzik 
996c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
997c6fd2807SJeff Garzik 		*s = c;
998c6fd2807SJeff Garzik 		s++;
999c6fd2807SJeff Garzik 
1000c6fd2807SJeff Garzik 		ofs++;
1001c6fd2807SJeff Garzik 		len -= 2;
1002c6fd2807SJeff Garzik 	}
1003c6fd2807SJeff Garzik }
1004c6fd2807SJeff Garzik 
1005c6fd2807SJeff Garzik /**
1006c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1007c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1008c6fd2807SJeff Garzik  *	@s: string into which data is output
1009c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1010c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
1011c6fd2807SJeff Garzik  *
1012c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
1013c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
1014c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
1015c6fd2807SJeff Garzik  *
1016c6fd2807SJeff Garzik  *	LOCKING:
1017c6fd2807SJeff Garzik  *	caller.
1018c6fd2807SJeff Garzik  */
1019c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
1020c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
1021c6fd2807SJeff Garzik {
1022c6fd2807SJeff Garzik 	unsigned char *p;
1023c6fd2807SJeff Garzik 
1024c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
1025c6fd2807SJeff Garzik 
1026c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
1027c6fd2807SJeff Garzik 
1028c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
1029c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
1030c6fd2807SJeff Garzik 		p--;
1031c6fd2807SJeff Garzik 	*p = '\0';
1032c6fd2807SJeff Garzik }
1033c6fd2807SJeff Garzik 
1034db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
1035db6f8759STejun Heo {
1036db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
1037db6f8759STejun Heo 		if (ata_id_has_lba48(id))
1038db6f8759STejun Heo 			return ata_id_u64(id, 100);
1039db6f8759STejun Heo 		else
1040db6f8759STejun Heo 			return ata_id_u32(id, 60);
1041db6f8759STejun Heo 	} else {
1042db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
1043db6f8759STejun Heo 			return ata_id_u32(id, 57);
1044db6f8759STejun Heo 		else
1045db6f8759STejun Heo 			return id[1] * id[3] * id[6];
1046db6f8759STejun Heo 	}
1047db6f8759STejun Heo }
1048db6f8759STejun Heo 
10491e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
10501e999736SAlan Cox {
10511e999736SAlan Cox 	u64 sectors = 0;
10521e999736SAlan Cox 
10531e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
10541e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
10551e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
10561e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10571e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10581e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10591e999736SAlan Cox 
10601e999736SAlan Cox 	return ++sectors;
10611e999736SAlan Cox }
10621e999736SAlan Cox 
10631e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
10641e999736SAlan Cox {
10651e999736SAlan Cox 	u64 sectors = 0;
10661e999736SAlan Cox 
10671e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
10681e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10691e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10701e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10711e999736SAlan Cox 
10721e999736SAlan Cox 	return ++sectors;
10731e999736SAlan Cox }
10741e999736SAlan Cox 
10751e999736SAlan Cox /**
1076c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
1077c728a914STejun Heo  *	@dev: target device
1078c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
10791e999736SAlan Cox  *
1080c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
1081c728a914STejun Heo  *	question.
1082c728a914STejun Heo  *
1083c728a914STejun Heo  *	RETURNS:
1084c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
1085c728a914STejun Heo  *	-EIO on other errors.
10861e999736SAlan Cox  */
1087c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
10881e999736SAlan Cox {
1089c728a914STejun Heo 	unsigned int err_mask;
10901e999736SAlan Cox 	struct ata_taskfile tf;
1091c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
10921e999736SAlan Cox 
10931e999736SAlan Cox 	ata_tf_init(dev, &tf);
10941e999736SAlan Cox 
1095c728a914STejun Heo 	/* always clear all address registers */
10961e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1097c728a914STejun Heo 
1098c728a914STejun Heo 	if (lba48) {
1099c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1100c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
1101c728a914STejun Heo 	} else
1102c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1103c728a914STejun Heo 
11041e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1105c728a914STejun Heo 	tf.device |= ATA_LBA;
11061e999736SAlan Cox 
11072b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1108c728a914STejun Heo 	if (err_mask) {
1109c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1110c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1111c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1112c728a914STejun Heo 			return -EACCES;
1113c728a914STejun Heo 		return -EIO;
1114c728a914STejun Heo 	}
1115c728a914STejun Heo 
1116c728a914STejun Heo 	if (lba48)
1117c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
1118c728a914STejun Heo 	else
1119c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
112093328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
112193328e11SAlan Cox 		(*max_sectors)--;
11221e999736SAlan Cox 	return 0;
11231e999736SAlan Cox }
11241e999736SAlan Cox 
11251e999736SAlan Cox /**
1126c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
1127c728a914STejun Heo  *	@dev: target device
11286b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
11291e999736SAlan Cox  *
1130c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
1131c728a914STejun Heo  *
1132c728a914STejun Heo  *	RETURNS:
1133c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
1134c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1135c728a914STejun Heo  *	errors.
11361e999736SAlan Cox  */
113705027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
11381e999736SAlan Cox {
1139c728a914STejun Heo 	unsigned int err_mask;
11401e999736SAlan Cox 	struct ata_taskfile tf;
1141c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11421e999736SAlan Cox 
11431e999736SAlan Cox 	new_sectors--;
11441e999736SAlan Cox 
11451e999736SAlan Cox 	ata_tf_init(dev, &tf);
11461e999736SAlan Cox 
1147c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
11481e999736SAlan Cox 
1149c728a914STejun Heo 	if (lba48) {
1150c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
1151c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
11521e999736SAlan Cox 
11531e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
11541e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
11551e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
11561e582ba4STejun Heo 	} else {
11571e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
1158c728a914STejun Heo 
11591e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
11601e582ba4STejun Heo 	}
11611e582ba4STejun Heo 
11621e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1163c728a914STejun Heo 	tf.device |= ATA_LBA;
11641e999736SAlan Cox 
11651e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
11661e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
11671e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
11681e999736SAlan Cox 
11692b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1170c728a914STejun Heo 	if (err_mask) {
1171c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1172c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1173c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
1174c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1175c728a914STejun Heo 			return -EACCES;
1176c728a914STejun Heo 		return -EIO;
1177c728a914STejun Heo 	}
1178c728a914STejun Heo 
11791e999736SAlan Cox 	return 0;
11801e999736SAlan Cox }
11811e999736SAlan Cox 
11821e999736SAlan Cox /**
11831e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
11841e999736SAlan Cox  *	@dev: Device to resize
11851e999736SAlan Cox  *
11861e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
11871e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
11881e999736SAlan Cox  *	the drive has the HPA feature set enabled.
118905027adcSTejun Heo  *
119005027adcSTejun Heo  *	RETURNS:
119105027adcSTejun Heo  *	0 on success, -errno on failure.
11921e999736SAlan Cox  */
119305027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
11941e999736SAlan Cox {
119505027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
119605027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
119705027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
119805027adcSTejun Heo 	u64 native_sectors;
1199c728a914STejun Heo 	int rc;
12001e999736SAlan Cox 
120105027adcSTejun Heo 	/* do we need to do it? */
120205027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
120305027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
120405027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1205c728a914STejun Heo 		return 0;
12061e999736SAlan Cox 
120705027adcSTejun Heo 	/* read native max address */
120805027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
120905027adcSTejun Heo 	if (rc) {
121005027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
121105027adcSTejun Heo 		 * resizing from the next try.
121205027adcSTejun Heo 		 */
121305027adcSTejun Heo 		if (!ata_ignore_hpa) {
121405027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
121505027adcSTejun Heo 				       "broken, will skip HPA handling\n");
121605027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
121705027adcSTejun Heo 
121805027adcSTejun Heo 			/* we can continue if device aborted the command */
121905027adcSTejun Heo 			if (rc == -EACCES)
122005027adcSTejun Heo 				rc = 0;
122105027adcSTejun Heo 		}
122205027adcSTejun Heo 
122305027adcSTejun Heo 		return rc;
122405027adcSTejun Heo 	}
122505027adcSTejun Heo 
122605027adcSTejun Heo 	/* nothing to do? */
122705027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
122805027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
122905027adcSTejun Heo 			return 0;
123005027adcSTejun Heo 
123105027adcSTejun Heo 		if (native_sectors > sectors)
12321e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
123305027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
123405027adcSTejun Heo 				(unsigned long long)sectors,
123505027adcSTejun Heo 				(unsigned long long)native_sectors);
123605027adcSTejun Heo 		else if (native_sectors < sectors)
123705027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
123805027adcSTejun Heo 				"native sectors (%llu) is smaller than "
123905027adcSTejun Heo 				"sectors (%llu)\n",
124005027adcSTejun Heo 				(unsigned long long)native_sectors,
124105027adcSTejun Heo 				(unsigned long long)sectors);
124205027adcSTejun Heo 		return 0;
12431e999736SAlan Cox 	}
124437301a55STejun Heo 
124505027adcSTejun Heo 	/* let's unlock HPA */
124605027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
124705027adcSTejun Heo 	if (rc == -EACCES) {
124805027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
124905027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
125005027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
125105027adcSTejun Heo 			       (unsigned long long)sectors,
125205027adcSTejun Heo 			       (unsigned long long)native_sectors);
125305027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
125405027adcSTejun Heo 		return 0;
125505027adcSTejun Heo 	} else if (rc)
125605027adcSTejun Heo 		return rc;
125705027adcSTejun Heo 
125805027adcSTejun Heo 	/* re-read IDENTIFY data */
125905027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
126005027adcSTejun Heo 	if (rc) {
126105027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
126205027adcSTejun Heo 			       "data after HPA resizing\n");
126305027adcSTejun Heo 		return rc;
126405027adcSTejun Heo 	}
126505027adcSTejun Heo 
126605027adcSTejun Heo 	if (print_info) {
126705027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
126805027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
126905027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
127005027adcSTejun Heo 			(unsigned long long)sectors,
127105027adcSTejun Heo 			(unsigned long long)new_sectors,
127205027adcSTejun Heo 			(unsigned long long)native_sectors);
127305027adcSTejun Heo 	}
127405027adcSTejun Heo 
127505027adcSTejun Heo 	return 0;
12761e999736SAlan Cox }
12771e999736SAlan Cox 
1278c6fd2807SJeff Garzik /**
127910305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
128010305f0fSAlan  *	@dev: device to identify
1281cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
128210305f0fSAlan  *
128310305f0fSAlan  *	Set up the timing values for the device based upon the identify
128410305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
128510305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
128610305f0fSAlan  *	mode correctly when possible.
128710305f0fSAlan  *
128810305f0fSAlan  *	In addition we emit similarly formatted messages to the default
128910305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
129010305f0fSAlan  *	presentation.
129110305f0fSAlan  */
129210305f0fSAlan 
129310305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
129410305f0fSAlan {
129510305f0fSAlan 	unsigned int mask;
129610305f0fSAlan 	u8 mode;
129710305f0fSAlan 
129810305f0fSAlan 	/* Pack the DMA modes */
129910305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
130010305f0fSAlan 	if (dev->id[53] & 0x04)
130110305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
130210305f0fSAlan 
130310305f0fSAlan 	/* Select the mode in use */
130410305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
130510305f0fSAlan 
130610305f0fSAlan 	if (mode != 0) {
130710305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
130810305f0fSAlan 		       ata_mode_string(mask));
130910305f0fSAlan 	} else {
131010305f0fSAlan 		/* SWDMA perhaps ? */
131110305f0fSAlan 		mode = unknown;
131210305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
131310305f0fSAlan 	}
131410305f0fSAlan 
131510305f0fSAlan 	/* Configure the device reporting */
131610305f0fSAlan 	dev->xfer_mode = mode;
131710305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
131810305f0fSAlan }
131910305f0fSAlan 
132010305f0fSAlan /**
1321c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1322c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1323c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1324c6fd2807SJeff Garzik  *
1325c6fd2807SJeff Garzik  *	This function performs no actual function.
1326c6fd2807SJeff Garzik  *
1327c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1328c6fd2807SJeff Garzik  *
1329c6fd2807SJeff Garzik  *	LOCKING:
1330c6fd2807SJeff Garzik  *	caller.
1331c6fd2807SJeff Garzik  */
1332c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1333c6fd2807SJeff Garzik {
1334c6fd2807SJeff Garzik }
1335c6fd2807SJeff Garzik 
1336c6fd2807SJeff Garzik 
1337c6fd2807SJeff Garzik /**
1338c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1339c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1340c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1341c6fd2807SJeff Garzik  *
1342c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1343c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1344c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1345c6fd2807SJeff Garzik  *
1346c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1347c6fd2807SJeff Garzik  *
1348c6fd2807SJeff Garzik  *	LOCKING:
1349c6fd2807SJeff Garzik  *	caller.
1350c6fd2807SJeff Garzik  */
1351c6fd2807SJeff Garzik 
1352c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1353c6fd2807SJeff Garzik {
1354c6fd2807SJeff Garzik 	u8 tmp;
1355c6fd2807SJeff Garzik 
1356c6fd2807SJeff Garzik 	if (device == 0)
1357c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1358c6fd2807SJeff Garzik 	else
1359c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1360c6fd2807SJeff Garzik 
13610d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1362c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1363c6fd2807SJeff Garzik }
1364c6fd2807SJeff Garzik 
1365c6fd2807SJeff Garzik /**
1366c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1367c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1368c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1369c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1370c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1371c6fd2807SJeff Garzik  *
1372c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1373c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1374c6fd2807SJeff Garzik  *	ATA channel.
1375c6fd2807SJeff Garzik  *
1376c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1377c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1378c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1379c6fd2807SJeff Garzik  *
1380c6fd2807SJeff Garzik  *	LOCKING:
1381c6fd2807SJeff Garzik  *	caller.
1382c6fd2807SJeff Garzik  */
1383c6fd2807SJeff Garzik 
1384c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1385c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1386c6fd2807SJeff Garzik {
1387c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
138844877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
138944877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1390c6fd2807SJeff Garzik 
1391c6fd2807SJeff Garzik 	if (wait)
1392c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1393c6fd2807SJeff Garzik 
1394c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1395c6fd2807SJeff Garzik 
1396c6fd2807SJeff Garzik 	if (wait) {
13979af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1398c6fd2807SJeff Garzik 			msleep(150);
1399c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1400c6fd2807SJeff Garzik 	}
1401c6fd2807SJeff Garzik }
1402c6fd2807SJeff Garzik 
1403c6fd2807SJeff Garzik /**
1404c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1405c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1406c6fd2807SJeff Garzik  *
1407c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1408c6fd2807SJeff Garzik  *	page.
1409c6fd2807SJeff Garzik  *
1410c6fd2807SJeff Garzik  *	LOCKING:
1411c6fd2807SJeff Garzik  *	caller.
1412c6fd2807SJeff Garzik  */
1413c6fd2807SJeff Garzik 
1414c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1415c6fd2807SJeff Garzik {
1416c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1417c6fd2807SJeff Garzik 		"53==0x%04x  "
1418c6fd2807SJeff Garzik 		"63==0x%04x  "
1419c6fd2807SJeff Garzik 		"64==0x%04x  "
1420c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1421c6fd2807SJeff Garzik 		id[49],
1422c6fd2807SJeff Garzik 		id[53],
1423c6fd2807SJeff Garzik 		id[63],
1424c6fd2807SJeff Garzik 		id[64],
1425c6fd2807SJeff Garzik 		id[75]);
1426c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1427c6fd2807SJeff Garzik 		"81==0x%04x  "
1428c6fd2807SJeff Garzik 		"82==0x%04x  "
1429c6fd2807SJeff Garzik 		"83==0x%04x  "
1430c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1431c6fd2807SJeff Garzik 		id[80],
1432c6fd2807SJeff Garzik 		id[81],
1433c6fd2807SJeff Garzik 		id[82],
1434c6fd2807SJeff Garzik 		id[83],
1435c6fd2807SJeff Garzik 		id[84]);
1436c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1437c6fd2807SJeff Garzik 		"93==0x%04x\n",
1438c6fd2807SJeff Garzik 		id[88],
1439c6fd2807SJeff Garzik 		id[93]);
1440c6fd2807SJeff Garzik }
1441c6fd2807SJeff Garzik 
1442c6fd2807SJeff Garzik /**
1443c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1445c6fd2807SJeff Garzik  *
1446c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1447c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1448c6fd2807SJeff Garzik  *
1449c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1450c6fd2807SJeff Garzik  *
1451c6fd2807SJeff Garzik  *	LOCKING:
1452c6fd2807SJeff Garzik  *	None.
1453c6fd2807SJeff Garzik  *
1454c6fd2807SJeff Garzik  *	RETURNS:
1455c6fd2807SJeff Garzik  *	Computed xfermask
1456c6fd2807SJeff Garzik  */
1457c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1458c6fd2807SJeff Garzik {
1459c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1460c6fd2807SJeff Garzik 
1461c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1462c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464c6fd2807SJeff Garzik 		pio_mask <<= 3;
1465c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1466c6fd2807SJeff Garzik 	} else {
1467c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1468c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1469c6fd2807SJeff Garzik 		 * a mask.
1470c6fd2807SJeff Garzik 		 */
14717a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
147246767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
147346767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
147446767aebSAlan Cox 		else
147546767aebSAlan Cox 			pio_mask = 1;
1476c6fd2807SJeff Garzik 
1477c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1478c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1479c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1480c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1481c6fd2807SJeff Garzik 		 * will get this right anyway
1482c6fd2807SJeff Garzik 		 */
1483c6fd2807SJeff Garzik 	}
1484c6fd2807SJeff Garzik 
1485c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1486c6fd2807SJeff Garzik 
1487b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1488b352e57dSAlan Cox 		/*
1489b352e57dSAlan Cox 		 *	Process compact flash extended modes
1490b352e57dSAlan Cox 		 */
1491b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1492b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1493b352e57dSAlan Cox 
1494b352e57dSAlan Cox 		if (pio)
1495b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1496b352e57dSAlan Cox 		if (pio > 1)
1497b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1498b352e57dSAlan Cox 		if (dma)
1499b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1500b352e57dSAlan Cox 		if (dma > 1)
1501b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1502b352e57dSAlan Cox 	}
1503b352e57dSAlan Cox 
1504c6fd2807SJeff Garzik 	udma_mask = 0;
1505c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1507c6fd2807SJeff Garzik 
1508c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509c6fd2807SJeff Garzik }
1510c6fd2807SJeff Garzik 
1511c6fd2807SJeff Garzik /**
1512c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1513c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1514c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
151565f27f38SDavid Howells  *	@data: data for @fn to use
1516c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1517c6fd2807SJeff Garzik  *
1518c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1519c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1520c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1521c6fd2807SJeff Garzik  *	one task is active at any given time.
1522c6fd2807SJeff Garzik  *
1523c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1524c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1525c6fd2807SJeff Garzik  *	synchronization.
1526c6fd2807SJeff Garzik  *
1527c6fd2807SJeff Garzik  *	LOCKING:
1528c6fd2807SJeff Garzik  *	Inherited from caller.
1529c6fd2807SJeff Garzik  */
153065f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1531c6fd2807SJeff Garzik 			 unsigned long delay)
1532c6fd2807SJeff Garzik {
153365f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
153465f27f38SDavid Howells 	ap->port_task_data = data;
1535c6fd2807SJeff Garzik 
153645a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
153745a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1538c6fd2807SJeff Garzik }
1539c6fd2807SJeff Garzik 
1540c6fd2807SJeff Garzik /**
1541c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1542c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1543c6fd2807SJeff Garzik  *
1544c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1545c6fd2807SJeff Garzik  *	be running or scheduled.
1546c6fd2807SJeff Garzik  *
1547c6fd2807SJeff Garzik  *	LOCKING:
1548c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1549c6fd2807SJeff Garzik  */
1550c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1551c6fd2807SJeff Garzik {
1552c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1553c6fd2807SJeff Garzik 
155445a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1555c6fd2807SJeff Garzik 
1556c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1557c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1558c6fd2807SJeff Garzik }
1559c6fd2807SJeff Garzik 
15607102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1561c6fd2807SJeff Garzik {
1562c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1563c6fd2807SJeff Garzik 
1564c6fd2807SJeff Garzik 	complete(waiting);
1565c6fd2807SJeff Garzik }
1566c6fd2807SJeff Garzik 
1567c6fd2807SJeff Garzik /**
15682432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1569c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1570c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1571c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1572c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
15735c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
15742432697bSTejun Heo  *	@n_elem: Number of sg entries
15752b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1576c6fd2807SJeff Garzik  *
1577c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1578c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1579c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1580c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1581c6fd2807SJeff Garzik  *	clean up after timeout.
1582c6fd2807SJeff Garzik  *
1583c6fd2807SJeff Garzik  *	LOCKING:
1584c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1585c6fd2807SJeff Garzik  *
1586c6fd2807SJeff Garzik  *	RETURNS:
1587c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1588c6fd2807SJeff Garzik  */
15892432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1590c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
159187260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
15922b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1593c6fd2807SJeff Garzik {
15949af5c9c9STejun Heo 	struct ata_link *link = dev->link;
15959af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1596c6fd2807SJeff Garzik 	u8 command = tf->command;
1597c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1598c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1599c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1600da917d69STejun Heo 	int preempted_nr_active_links;
1601c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1602c6fd2807SJeff Garzik 	unsigned long flags;
1603c6fd2807SJeff Garzik 	unsigned int err_mask;
1604c6fd2807SJeff Garzik 	int rc;
1605c6fd2807SJeff Garzik 
1606c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1607c6fd2807SJeff Garzik 
1608c6fd2807SJeff Garzik 	/* no internal command while frozen */
1609c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1610c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1611c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1612c6fd2807SJeff Garzik 	}
1613c6fd2807SJeff Garzik 
1614c6fd2807SJeff Garzik 	/* initialize internal qc */
1615c6fd2807SJeff Garzik 
1616c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1617c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1618c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1619c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1620c6fd2807SJeff Garzik 	 */
1621c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1622c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1623c6fd2807SJeff Garzik 	else
1624c6fd2807SJeff Garzik 		tag = 0;
1625c6fd2807SJeff Garzik 
1626c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1627c6fd2807SJeff Garzik 		BUG();
1628c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1629c6fd2807SJeff Garzik 
1630c6fd2807SJeff Garzik 	qc->tag = tag;
1631c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1632c6fd2807SJeff Garzik 	qc->ap = ap;
1633c6fd2807SJeff Garzik 	qc->dev = dev;
1634c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1635c6fd2807SJeff Garzik 
16369af5c9c9STejun Heo 	preempted_tag = link->active_tag;
16379af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1638c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1639da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
16409af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
16419af5c9c9STejun Heo 	link->sactive = 0;
1642c6fd2807SJeff Garzik 	ap->qc_active = 0;
1643da917d69STejun Heo 	ap->nr_active_links = 0;
1644c6fd2807SJeff Garzik 
1645c6fd2807SJeff Garzik 	/* prepare & issue qc */
1646c6fd2807SJeff Garzik 	qc->tf = *tf;
1647c6fd2807SJeff Garzik 	if (cdb)
1648c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1649c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1650c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1651c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
16522432697bSTejun Heo 		unsigned int i, buflen = 0;
165387260216SJens Axboe 		struct scatterlist *sg;
16542432697bSTejun Heo 
165587260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
165687260216SJens Axboe 			buflen += sg->length;
16572432697bSTejun Heo 
165887260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
165949c80429SBrian King 		qc->nbytes = buflen;
1660c6fd2807SJeff Garzik 	}
1661c6fd2807SJeff Garzik 
1662c6fd2807SJeff Garzik 	qc->private_data = &wait;
1663c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1664c6fd2807SJeff Garzik 
1665c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1666c6fd2807SJeff Garzik 
1667c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1668c6fd2807SJeff Garzik 
16692b789108STejun Heo 	if (!timeout)
16702b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
16712b789108STejun Heo 
16722b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1673c6fd2807SJeff Garzik 
1674c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1675c6fd2807SJeff Garzik 
1676c6fd2807SJeff Garzik 	if (!rc) {
1677c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1678c6fd2807SJeff Garzik 
1679c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1680c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1681c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1682c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1683c6fd2807SJeff Garzik 		 */
1684c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1685c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1686c6fd2807SJeff Garzik 
1687c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1688c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1689c6fd2807SJeff Garzik 			else
1690c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1691c6fd2807SJeff Garzik 
1692c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1693c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1694c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1695c6fd2807SJeff Garzik 		}
1696c6fd2807SJeff Garzik 
1697c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1698c6fd2807SJeff Garzik 	}
1699c6fd2807SJeff Garzik 
1700c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1701c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1702c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1703c6fd2807SJeff Garzik 
1704a51d644aSTejun Heo 	/* perform minimal error analysis */
1705a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1706a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1707a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1708a51d644aSTejun Heo 
1709a51d644aSTejun Heo 		if (!qc->err_mask)
1710c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1711a51d644aSTejun Heo 
1712a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1713a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1714c6fd2807SJeff Garzik 	}
1715c6fd2807SJeff Garzik 
1716c6fd2807SJeff Garzik 	/* finish up */
1717c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1718c6fd2807SJeff Garzik 
1719c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1720c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1721c6fd2807SJeff Garzik 
1722c6fd2807SJeff Garzik 	ata_qc_free(qc);
17239af5c9c9STejun Heo 	link->active_tag = preempted_tag;
17249af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1725c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1726da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1727c6fd2807SJeff Garzik 
1728c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1729c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1730c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1731c6fd2807SJeff Garzik 	 * port.
1732c6fd2807SJeff Garzik 	 *
1733c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1734c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1735c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1736c6fd2807SJeff Garzik 	 *
1737c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1738c6fd2807SJeff Garzik 	 */
1739c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1740c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1741c6fd2807SJeff Garzik 		ata_port_probe(ap);
1742c6fd2807SJeff Garzik 	}
1743c6fd2807SJeff Garzik 
1744c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1745c6fd2807SJeff Garzik 
1746c6fd2807SJeff Garzik 	return err_mask;
1747c6fd2807SJeff Garzik }
1748c6fd2807SJeff Garzik 
1749c6fd2807SJeff Garzik /**
175033480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
17512432697bSTejun Heo  *	@dev: Device to which the command is sent
17522432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
17532432697bSTejun Heo  *	@cdb: CDB for packet command
17542432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
17552432697bSTejun Heo  *	@buf: Data buffer of the command
17562432697bSTejun Heo  *	@buflen: Length of data buffer
17572b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
17582432697bSTejun Heo  *
17592432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
17602432697bSTejun Heo  *	buffer instead of sg list.
17612432697bSTejun Heo  *
17622432697bSTejun Heo  *	LOCKING:
17632432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
17642432697bSTejun Heo  *
17652432697bSTejun Heo  *	RETURNS:
17662432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
17672432697bSTejun Heo  */
17682432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
17692432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
17702b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
17712b789108STejun Heo 			   unsigned long timeout)
17722432697bSTejun Heo {
177333480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
177433480a0eSTejun Heo 	unsigned int n_elem = 0;
17752432697bSTejun Heo 
177633480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
177733480a0eSTejun Heo 		WARN_ON(!buf);
17782432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
177933480a0eSTejun Heo 		psg = &sg;
178033480a0eSTejun Heo 		n_elem++;
178133480a0eSTejun Heo 	}
17822432697bSTejun Heo 
17832b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
17842b789108STejun Heo 				    timeout);
17852432697bSTejun Heo }
17862432697bSTejun Heo 
17872432697bSTejun Heo /**
1788c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1789c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1790c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1791c6fd2807SJeff Garzik  *
1792c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1793c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1794c6fd2807SJeff Garzik  *
1795c6fd2807SJeff Garzik  *	LOCKING:
1796c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1797c6fd2807SJeff Garzik  *
1798c6fd2807SJeff Garzik  *	RETURNS:
1799c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1800c6fd2807SJeff Garzik  */
1801c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1802c6fd2807SJeff Garzik {
1803c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1804c6fd2807SJeff Garzik 
1805c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1806c6fd2807SJeff Garzik 
1807c6fd2807SJeff Garzik 	tf.command = cmd;
1808c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1809c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1810c6fd2807SJeff Garzik 
18112b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1812c6fd2807SJeff Garzik }
1813c6fd2807SJeff Garzik 
1814c6fd2807SJeff Garzik /**
1815c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1816c6fd2807SJeff Garzik  *	@adev: ATA device
1817c6fd2807SJeff Garzik  *
1818c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1819c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1820c6fd2807SJeff Garzik  */
1821c6fd2807SJeff Garzik 
1822c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1823c6fd2807SJeff Garzik {
1824432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1825432729f0SAlan Cox 	   as the caller should know this */
18269af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1827c6fd2807SJeff Garzik 		return 0;
1828432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1829432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1830c6fd2807SJeff Garzik 		return 1;
1831432729f0SAlan Cox 	/* We turn it on when possible */
1832432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1833432729f0SAlan Cox 		return 1;
1834432729f0SAlan Cox 	return 0;
1835432729f0SAlan Cox }
1836c6fd2807SJeff Garzik 
1837432729f0SAlan Cox /**
1838432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1839432729f0SAlan Cox  *	@adev: ATA device
1840432729f0SAlan Cox  *
1841432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1842432729f0SAlan Cox  *	-1 if no iordy mode is available.
1843432729f0SAlan Cox  */
1844432729f0SAlan Cox 
1845432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1846432729f0SAlan Cox {
1847c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1848c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1849432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1850c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1851c6fd2807SJeff Garzik 		if (pio) {
1852c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1853c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1854432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1855432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1856c6fd2807SJeff Garzik 		}
1857c6fd2807SJeff Garzik 	}
1858432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1859c6fd2807SJeff Garzik }
1860c6fd2807SJeff Garzik 
1861c6fd2807SJeff Garzik /**
1862c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1863c6fd2807SJeff Garzik  *	@dev: target device
1864c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1865bff04647STejun Heo  *	@flags: ATA_READID_* flags
1866c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1867c6fd2807SJeff Garzik  *
1868c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1869c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1870c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1871c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1872c6fd2807SJeff Garzik  *
187350a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
187450a99018SAlan Cox  *	now we abort if we hit that case.
187550a99018SAlan Cox  *
1876c6fd2807SJeff Garzik  *	LOCKING:
1877c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1878c6fd2807SJeff Garzik  *
1879c6fd2807SJeff Garzik  *	RETURNS:
1880c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1881c6fd2807SJeff Garzik  */
1882c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1883bff04647STejun Heo 		    unsigned int flags, u16 *id)
1884c6fd2807SJeff Garzik {
18859af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1886c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1887c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1888c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1889c6fd2807SJeff Garzik 	const char *reason;
189054936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1891c6fd2807SJeff Garzik 	int rc;
1892c6fd2807SJeff Garzik 
1893c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
189444877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1895c6fd2807SJeff Garzik 
1896c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1897c6fd2807SJeff Garzik  retry:
1898c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1899c6fd2807SJeff Garzik 
1900c6fd2807SJeff Garzik 	switch (class) {
1901c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1902c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1903c6fd2807SJeff Garzik 		break;
1904c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1905c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1906c6fd2807SJeff Garzik 		break;
1907c6fd2807SJeff Garzik 	default:
1908c6fd2807SJeff Garzik 		rc = -ENODEV;
1909c6fd2807SJeff Garzik 		reason = "unsupported class";
1910c6fd2807SJeff Garzik 		goto err_out;
1911c6fd2807SJeff Garzik 	}
1912c6fd2807SJeff Garzik 
1913c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
191481afe893STejun Heo 
191581afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
191681afe893STejun Heo 	 * sure those are properly initialized.
191781afe893STejun Heo 	 */
191881afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
191981afe893STejun Heo 
192081afe893STejun Heo 	/* Device presence detection is unreliable on some
192181afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
192281afe893STejun Heo 	 */
192381afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1924c6fd2807SJeff Garzik 
1925c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
19262b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1927c6fd2807SJeff Garzik 	if (err_mask) {
1928800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
192955a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
193044877b4eSTejun Heo 				ap->print_id, dev->devno);
193155a8e2c8STejun Heo 			return -ENOENT;
193255a8e2c8STejun Heo 		}
193355a8e2c8STejun Heo 
193454936f8bSTejun Heo 		/* Device or controller might have reported the wrong
193554936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
193654936f8bSTejun Heo 		 * the current one is aborted by the device.
193754936f8bSTejun Heo 		 */
193854936f8bSTejun Heo 		if (may_fallback &&
193954936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
194054936f8bSTejun Heo 			may_fallback = 0;
194154936f8bSTejun Heo 
194254936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
194354936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
194454936f8bSTejun Heo 			else
194554936f8bSTejun Heo 				class = ATA_DEV_ATA;
194654936f8bSTejun Heo 			goto retry;
194754936f8bSTejun Heo 		}
194854936f8bSTejun Heo 
1949c6fd2807SJeff Garzik 		rc = -EIO;
1950c6fd2807SJeff Garzik 		reason = "I/O error";
1951c6fd2807SJeff Garzik 		goto err_out;
1952c6fd2807SJeff Garzik 	}
1953c6fd2807SJeff Garzik 
195454936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
195554936f8bSTejun Heo 	 * successfully at least once.
195654936f8bSTejun Heo 	 */
195754936f8bSTejun Heo 	may_fallback = 0;
195854936f8bSTejun Heo 
1959c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1960c6fd2807SJeff Garzik 
1961c6fd2807SJeff Garzik 	/* sanity check */
1962c6fd2807SJeff Garzik 	rc = -EINVAL;
19636070068bSAlan Cox 	reason = "device reports invalid type";
19644a3381feSJeff Garzik 
19654a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
19664a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
19674a3381feSJeff Garzik 			goto err_out;
19684a3381feSJeff Garzik 	} else {
19694a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1970c6fd2807SJeff Garzik 			goto err_out;
1971c6fd2807SJeff Garzik 	}
1972c6fd2807SJeff Garzik 
1973169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1974169439c2SMark Lord 		tried_spinup = 1;
1975169439c2SMark Lord 		/*
1976169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1977169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1978169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1979169439c2SMark Lord 		 */
1980218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1981fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1982169439c2SMark Lord 			rc = -EIO;
1983169439c2SMark Lord 			reason = "SPINUP failed";
1984169439c2SMark Lord 			goto err_out;
1985169439c2SMark Lord 		}
1986169439c2SMark Lord 		/*
1987169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1988169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1989169439c2SMark Lord 		 */
1990169439c2SMark Lord 		if (id[2] == 0x37c8)
1991169439c2SMark Lord 			goto retry;
1992169439c2SMark Lord 	}
1993169439c2SMark Lord 
1994bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1995c6fd2807SJeff Garzik 		/*
1996c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
1997c6fd2807SJeff Garzik 		 * SRST RESET
199850a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
199950a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2000c6fd2807SJeff Garzik 		 * anything else..
2001c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
200250a99018SAlan Cox 		 *
200350a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
200450a99018SAlan Cox 		 * shoud never trigger.
2005c6fd2807SJeff Garzik 		 */
2006c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2007c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2008c6fd2807SJeff Garzik 			if (err_mask) {
2009c6fd2807SJeff Garzik 				rc = -EIO;
2010c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
2011c6fd2807SJeff Garzik 				goto err_out;
2012c6fd2807SJeff Garzik 			}
2013c6fd2807SJeff Garzik 
2014c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
2015c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
2016c6fd2807SJeff Garzik 			 */
2017bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
2018c6fd2807SJeff Garzik 			goto retry;
2019c6fd2807SJeff Garzik 		}
2020c6fd2807SJeff Garzik 	}
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 	*p_class = class;
2023c6fd2807SJeff Garzik 
2024c6fd2807SJeff Garzik 	return 0;
2025c6fd2807SJeff Garzik 
2026c6fd2807SJeff Garzik  err_out:
2027c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
2028c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2029c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2030c6fd2807SJeff Garzik 	return rc;
2031c6fd2807SJeff Garzik }
2032c6fd2807SJeff Garzik 
2033c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
2034c6fd2807SJeff Garzik {
20359af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20369af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2037c6fd2807SJeff Garzik }
2038c6fd2807SJeff Garzik 
2039c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
2040c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
2041c6fd2807SJeff Garzik {
20429af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2043c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2044c6fd2807SJeff Garzik 
2045c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
2046c6fd2807SJeff Garzik 		desc[0] = '\0';
2047c6fd2807SJeff Garzik 		return;
2048c6fd2807SJeff Garzik 	}
204975683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
20506919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
20516919a0a6SAlan Cox 		return;
20526919a0a6SAlan Cox 	}
2053c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
2054cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2055c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
2056c6fd2807SJeff Garzik 	}
2057c6fd2807SJeff Garzik 
2058c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
2059c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2060c6fd2807SJeff Garzik 	else
2061c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2062c6fd2807SJeff Garzik }
2063c6fd2807SJeff Garzik 
2064c6fd2807SJeff Garzik /**
2065c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2066c6fd2807SJeff Garzik  *	@dev: Target device to configure
2067c6fd2807SJeff Garzik  *
2068c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
2069c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
2070c6fd2807SJeff Garzik  *
2071c6fd2807SJeff Garzik  *	LOCKING:
2072c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2073c6fd2807SJeff Garzik  *
2074c6fd2807SJeff Garzik  *	RETURNS:
2075c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
2076c6fd2807SJeff Garzik  */
2077efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
2078c6fd2807SJeff Garzik {
20799af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20809af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
20816746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2082c6fd2807SJeff Garzik 	const u16 *id = dev->id;
2083c6fd2807SJeff Garzik 	unsigned int xfer_mask;
2084b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
20853f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
20863f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
2087c6fd2807SJeff Garzik 	int rc;
2088c6fd2807SJeff Garzik 
2089c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
209044877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
209144877b4eSTejun Heo 			       __FUNCTION__);
2092c6fd2807SJeff Garzik 		return 0;
2093c6fd2807SJeff Garzik 	}
2094c6fd2807SJeff Garzik 
2095c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
209644877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2097c6fd2807SJeff Garzik 
209875683fe7STejun Heo 	/* set horkage */
209975683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
210075683fe7STejun Heo 
21016746544cSTejun Heo 	/* let ACPI work its magic */
21026746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
21036746544cSTejun Heo 	if (rc)
21046746544cSTejun Heo 		return rc;
210508573a86SKristen Carlson Accardi 
210605027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
210705027adcSTejun Heo 	rc = ata_hpa_resize(dev);
210805027adcSTejun Heo 	if (rc)
210905027adcSTejun Heo 		return rc;
211005027adcSTejun Heo 
2111c6fd2807SJeff Garzik 	/* print device capabilities */
2112c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2113c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2114c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2115c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2116c6fd2807SJeff Garzik 			       __FUNCTION__,
2117c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
2118c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
2119c6fd2807SJeff Garzik 
2120c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
2121c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2122c6fd2807SJeff Garzik 	dev->max_sectors = 0;
2123c6fd2807SJeff Garzik 	dev->cdb_len = 0;
2124c6fd2807SJeff Garzik 	dev->n_sectors = 0;
2125c6fd2807SJeff Garzik 	dev->cylinders = 0;
2126c6fd2807SJeff Garzik 	dev->heads = 0;
2127c6fd2807SJeff Garzik 	dev->sectors = 0;
2128c6fd2807SJeff Garzik 
2129c6fd2807SJeff Garzik 	/*
2130c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
2131c6fd2807SJeff Garzik 	 */
2132c6fd2807SJeff Garzik 
2133c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
2134c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
2135c6fd2807SJeff Garzik 
2136c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2137c6fd2807SJeff Garzik 		ata_dump_id(id);
2138c6fd2807SJeff Garzik 
2139ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2140ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2141ef143d57SAlbert Lee 			sizeof(fwrevbuf));
2142ef143d57SAlbert Lee 
2143ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2144ef143d57SAlbert Lee 			sizeof(modelbuf));
2145ef143d57SAlbert Lee 
2146c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
2147c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
2148b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
2149b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
215044877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
215144877b4eSTejun Heo 					       "supports DRM functions and may "
215244877b4eSTejun Heo 					       "not be fully accessable.\n");
2153b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
21542dcb407eSJeff Garzik 		} else
2155b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2156b352e57dSAlan Cox 
2157c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
2158c6fd2807SJeff Garzik 
21593f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
21603f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
21613f64f565SEric D. Mudama 
2162c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
2163c6fd2807SJeff Garzik 			const char *lba_desc;
2164c6fd2807SJeff Garzik 			char ncq_desc[20];
2165c6fd2807SJeff Garzik 
2166c6fd2807SJeff Garzik 			lba_desc = "LBA";
2167c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
2168c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
2169c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
2170c6fd2807SJeff Garzik 				lba_desc = "LBA48";
21716fc49adbSTejun Heo 
21726fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
21736fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
21746fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2175c6fd2807SJeff Garzik 			}
2176c6fd2807SJeff Garzik 
2177c6fd2807SJeff Garzik 			/* config NCQ */
2178c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2179c6fd2807SJeff Garzik 
2180c6fd2807SJeff Garzik 			/* print device info to dmesg */
21813f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
21823f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21833f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21843f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
21853f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21863f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21873f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2188c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
21893f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
21903f64f565SEric D. Mudama 			}
2191c6fd2807SJeff Garzik 		} else {
2192c6fd2807SJeff Garzik 			/* CHS */
2193c6fd2807SJeff Garzik 
2194c6fd2807SJeff Garzik 			/* Default translation */
2195c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2196c6fd2807SJeff Garzik 			dev->heads	= id[3];
2197c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2198c6fd2807SJeff Garzik 
2199c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2200c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2201c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2202c6fd2807SJeff Garzik 				dev->heads     = id[55];
2203c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2204c6fd2807SJeff Garzik 			}
2205c6fd2807SJeff Garzik 
2206c6fd2807SJeff Garzik 			/* print device info to dmesg */
22073f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2208c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
22093f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
22103f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
22113f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
22123f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
22133f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
22143f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
22153f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
22163f64f565SEric D. Mudama 					dev->heads, dev->sectors);
22173f64f565SEric D. Mudama 			}
2218c6fd2807SJeff Garzik 		}
2219c6fd2807SJeff Garzik 
2220c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2221c6fd2807SJeff Garzik 	}
2222c6fd2807SJeff Garzik 
2223c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2224c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2225854c73a2STejun Heo 		const char *cdb_intr_string = "";
2226854c73a2STejun Heo 		const char *atapi_an_string = "";
22277d77b247STejun Heo 		u32 sntf;
2228c6fd2807SJeff Garzik 
2229c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2230c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2231c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2232c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2233c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2234c6fd2807SJeff Garzik 			rc = -EINVAL;
2235c6fd2807SJeff Garzik 			goto err_out_nosup;
2236c6fd2807SJeff Garzik 		}
2237c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2238c6fd2807SJeff Garzik 
22397d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
22407d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
22417d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
22427d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
22439f45cbd3SKristen Carlson Accardi 		 */
22447d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
22457d77b247STejun Heo 		    (!ap->nr_pmp_links ||
22467d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2247854c73a2STejun Heo 			unsigned int err_mask;
2248854c73a2STejun Heo 
22499f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2250218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2251218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2252854c73a2STejun Heo 			if (err_mask)
22539f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2254854c73a2STejun Heo 					"failed to enable ATAPI AN "
2255854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2256854c73a2STejun Heo 			else {
22579f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2258854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2259854c73a2STejun Heo 			}
22609f45cbd3SKristen Carlson Accardi 		}
22619f45cbd3SKristen Carlson Accardi 
2262c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2263c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2264c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2265c6fd2807SJeff Garzik 		}
2266c6fd2807SJeff Garzik 
2267c6fd2807SJeff Garzik 		/* print device info to dmesg */
2268c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2269ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2270854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2271ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2272c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2273854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2274c6fd2807SJeff Garzik 	}
2275c6fd2807SJeff Garzik 
2276914ed354STejun Heo 	/* determine max_sectors */
2277914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2278914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2279914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2280914ed354STejun Heo 
2281ca77329fSKristen Carlson Accardi 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2282ca77329fSKristen Carlson Accardi 		if (ata_id_has_hipm(dev->id))
2283ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_HIPM;
2284ca77329fSKristen Carlson Accardi 		if (ata_id_has_dipm(dev->id))
2285ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_DIPM;
2286ca77329fSKristen Carlson Accardi 	}
2287ca77329fSKristen Carlson Accardi 
228893590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
228993590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
229093590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
229193590859SAlan Cox 		   idiot */
229293590859SAlan Cox 		if (print_info) {
229393590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
229493590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
229593590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
229693590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
229793590859SAlan Cox 		}
229893590859SAlan Cox 	}
229993590859SAlan Cox 
2300c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2301c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2302c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2303c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2304c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2305c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2306c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2307c6fd2807SJeff Garzik 	}
2308c6fd2807SJeff Garzik 
2309f8d8e579STony Battersby 	if ((dev->class == ATA_DEV_ATAPI) &&
2310f8d8e579STony Battersby 	    (atapi_command_packet_set(id) == TYPE_TAPE))
2311f8d8e579STony Battersby 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2312f8d8e579STony Battersby 
231375683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
231403ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
231503ec52deSTejun Heo 					 dev->max_sectors);
231618d6e9d5SAlbert Lee 
2317ca77329fSKristen Carlson Accardi 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2318ca77329fSKristen Carlson Accardi 		dev->horkage |= ATA_HORKAGE_IPM;
2319ca77329fSKristen Carlson Accardi 
2320ca77329fSKristen Carlson Accardi 		/* reset link pm_policy for this port to no pm */
2321ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
2322ca77329fSKristen Carlson Accardi 	}
2323ca77329fSKristen Carlson Accardi 
2324c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2325cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2326c6fd2807SJeff Garzik 
2327c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2328c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2329c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2330c6fd2807SJeff Garzik 	return 0;
2331c6fd2807SJeff Garzik 
2332c6fd2807SJeff Garzik err_out_nosup:
2333c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2334c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2335c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2336c6fd2807SJeff Garzik 	return rc;
2337c6fd2807SJeff Garzik }
2338c6fd2807SJeff Garzik 
2339c6fd2807SJeff Garzik /**
23402e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2341be0d18dfSAlan Cox  *	@ap: port
2342be0d18dfSAlan Cox  *
23432e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2344be0d18dfSAlan Cox  *	detection.
2345be0d18dfSAlan Cox  */
2346be0d18dfSAlan Cox 
2347be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2348be0d18dfSAlan Cox {
2349be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2350be0d18dfSAlan Cox }
2351be0d18dfSAlan Cox 
2352be0d18dfSAlan Cox /**
23532e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2354be0d18dfSAlan Cox  *	@ap: port
2355be0d18dfSAlan Cox  *
23562e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2357be0d18dfSAlan Cox  *	detection.
2358be0d18dfSAlan Cox  */
2359be0d18dfSAlan Cox 
2360be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2361be0d18dfSAlan Cox {
2362be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2363be0d18dfSAlan Cox }
2364be0d18dfSAlan Cox 
2365be0d18dfSAlan Cox /**
2366be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2367be0d18dfSAlan Cox  *	@ap: port
2368be0d18dfSAlan Cox  *
2369be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2370be0d18dfSAlan Cox  */
2371be0d18dfSAlan Cox 
2372be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2373be0d18dfSAlan Cox {
2374be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2375be0d18dfSAlan Cox }
2376be0d18dfSAlan Cox 
2377be0d18dfSAlan Cox /**
2378be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2379be0d18dfSAlan Cox  *	@ap: port
2380be0d18dfSAlan Cox  *
2381be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2382be0d18dfSAlan Cox  */
2383be0d18dfSAlan Cox 
2384be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2385be0d18dfSAlan Cox {
2386be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2387be0d18dfSAlan Cox }
2388be0d18dfSAlan Cox 
2389be0d18dfSAlan Cox /**
2390c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2391c6fd2807SJeff Garzik  *	@ap: Bus to probe
2392c6fd2807SJeff Garzik  *
2393c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2394c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2395c6fd2807SJeff Garzik  *	the bus.
2396c6fd2807SJeff Garzik  *
2397c6fd2807SJeff Garzik  *	LOCKING:
2398c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2399c6fd2807SJeff Garzik  *
2400c6fd2807SJeff Garzik  *	RETURNS:
2401c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2402c6fd2807SJeff Garzik  */
2403c6fd2807SJeff Garzik 
2404c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2405c6fd2807SJeff Garzik {
2406c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2407c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2408f58229f8STejun Heo 	int rc;
2409c6fd2807SJeff Garzik 	struct ata_device *dev;
2410c6fd2807SJeff Garzik 
2411c6fd2807SJeff Garzik 	ata_port_probe(ap);
2412c6fd2807SJeff Garzik 
2413f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2414f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2415c6fd2807SJeff Garzik 
2416c6fd2807SJeff Garzik  retry:
2417cdeab114STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2418cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2419cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2420cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2421cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2422cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2423cdeab114STejun Heo 		 * bus as we may be talking too fast.
2424cdeab114STejun Heo 		 */
2425cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2426cdeab114STejun Heo 
2427cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2428cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2429cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2430cdeab114STejun Heo 		 * configuring devices.
2431cdeab114STejun Heo 		 */
2432cdeab114STejun Heo 		if (ap->ops->set_piomode)
2433cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2434cdeab114STejun Heo 	}
2435cdeab114STejun Heo 
2436c6fd2807SJeff Garzik 	/* reset and determine device classes */
2437c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2438c6fd2807SJeff Garzik 
2439f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2440c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2441c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2442c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2443c6fd2807SJeff Garzik 		else
2444c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2445c6fd2807SJeff Garzik 
2446c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2447c6fd2807SJeff Garzik 	}
2448c6fd2807SJeff Garzik 
2449c6fd2807SJeff Garzik 	ata_port_probe(ap);
2450c6fd2807SJeff Garzik 
2451f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2452f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2453f31f0cc2SJeff Garzik 	   the slave device */
2454f31f0cc2SJeff Garzik 
2455f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2456f58229f8STejun Heo 		if (tries[dev->devno])
2457f58229f8STejun Heo 			dev->class = classes[dev->devno];
2458c6fd2807SJeff Garzik 
2459c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2460c6fd2807SJeff Garzik 			continue;
2461c6fd2807SJeff Garzik 
2462bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2463bff04647STejun Heo 				     dev->id);
2464c6fd2807SJeff Garzik 		if (rc)
2465c6fd2807SJeff Garzik 			goto fail;
2466f31f0cc2SJeff Garzik 	}
2467f31f0cc2SJeff Garzik 
2468be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2469be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2470be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2471be0d18dfSAlan Cox 
2472614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2473614fe29bSAlan Cox 	   reported cable types and sensed types */
2474614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2475614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2476614fe29bSAlan Cox 			continue;
2477614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2478614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2479614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2480614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2481614fe29bSAlan Cox 	}
2482614fe29bSAlan Cox 
2483f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2484f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2485f31f0cc2SJeff Garzik 
2486f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2487f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2488f31f0cc2SJeff Garzik 			continue;
2489c6fd2807SJeff Garzik 
24909af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2491efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
24929af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2493c6fd2807SJeff Garzik 		if (rc)
2494c6fd2807SJeff Garzik 			goto fail;
2495c6fd2807SJeff Garzik 	}
2496c6fd2807SJeff Garzik 
2497c6fd2807SJeff Garzik 	/* configure transfer mode */
24980260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
24994ae72a1eSTejun Heo 	if (rc)
2500c6fd2807SJeff Garzik 		goto fail;
2501c6fd2807SJeff Garzik 
2502f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2503f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2504c6fd2807SJeff Garzik 			return 0;
2505c6fd2807SJeff Garzik 
2506c6fd2807SJeff Garzik 	/* no device present, disable port */
2507c6fd2807SJeff Garzik 	ata_port_disable(ap);
2508c6fd2807SJeff Garzik 	return -ENODEV;
2509c6fd2807SJeff Garzik 
2510c6fd2807SJeff Garzik  fail:
25114ae72a1eSTejun Heo 	tries[dev->devno]--;
25124ae72a1eSTejun Heo 
2513c6fd2807SJeff Garzik 	switch (rc) {
2514c6fd2807SJeff Garzik 	case -EINVAL:
25154ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2516c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2517c6fd2807SJeff Garzik 		break;
25184ae72a1eSTejun Heo 
25194ae72a1eSTejun Heo 	case -ENODEV:
25204ae72a1eSTejun Heo 		/* give it just one more chance */
25214ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2522c6fd2807SJeff Garzik 	case -EIO:
25234ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
25244ae72a1eSTejun Heo 			/* This is the last chance, better to slow
25254ae72a1eSTejun Heo 			 * down than lose it.
25264ae72a1eSTejun Heo 			 */
2527936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
25284ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
25294ae72a1eSTejun Heo 		}
2530c6fd2807SJeff Garzik 	}
2531c6fd2807SJeff Garzik 
25324ae72a1eSTejun Heo 	if (!tries[dev->devno])
2533c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2534c6fd2807SJeff Garzik 
2535c6fd2807SJeff Garzik 	goto retry;
2536c6fd2807SJeff Garzik }
2537c6fd2807SJeff Garzik 
2538c6fd2807SJeff Garzik /**
2539c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2540c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2541c6fd2807SJeff Garzik  *
2542c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2543c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2544c6fd2807SJeff Garzik  *
2545cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2546c6fd2807SJeff Garzik  *	serialization.
2547c6fd2807SJeff Garzik  */
2548c6fd2807SJeff Garzik 
2549c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2550c6fd2807SJeff Garzik {
2551c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2552c6fd2807SJeff Garzik }
2553c6fd2807SJeff Garzik 
2554c6fd2807SJeff Garzik /**
2555c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2556936fd732STejun Heo  *	@link: SATA link to printk link status about
2557c6fd2807SJeff Garzik  *
2558c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2559c6fd2807SJeff Garzik  *
2560c6fd2807SJeff Garzik  *	LOCKING:
2561c6fd2807SJeff Garzik  *	None.
2562c6fd2807SJeff Garzik  */
2563936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2564c6fd2807SJeff Garzik {
2565c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2566c6fd2807SJeff Garzik 
2567936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2568c6fd2807SJeff Garzik 		return;
2569936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2570c6fd2807SJeff Garzik 
2571936fd732STejun Heo 	if (ata_link_online(link)) {
2572c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2573936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2574c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2575c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2576c6fd2807SJeff Garzik 	} else {
2577936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2578c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2579c6fd2807SJeff Garzik 				sstatus, scontrol);
2580c6fd2807SJeff Garzik 	}
2581c6fd2807SJeff Garzik }
2582c6fd2807SJeff Garzik 
2583c6fd2807SJeff Garzik /**
2584c6fd2807SJeff Garzik  *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2585c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2586c6fd2807SJeff Garzik  *
2587c6fd2807SJeff Garzik  *	This function issues commands to standard SATA Sxxx
2588c6fd2807SJeff Garzik  *	PHY registers, to wake up the phy (and device), and
2589c6fd2807SJeff Garzik  *	clear any reset condition.
2590c6fd2807SJeff Garzik  *
2591c6fd2807SJeff Garzik  *	LOCKING:
2592c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2593c6fd2807SJeff Garzik  *
2594c6fd2807SJeff Garzik  */
2595c6fd2807SJeff Garzik void __sata_phy_reset(struct ata_port *ap)
2596c6fd2807SJeff Garzik {
2597936fd732STejun Heo 	struct ata_link *link = &ap->link;
2598c6fd2807SJeff Garzik 	unsigned long timeout = jiffies + (HZ * 5);
2599936fd732STejun Heo 	u32 sstatus;
2600c6fd2807SJeff Garzik 
2601c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET) {
2602c6fd2807SJeff Garzik 		/* issue phy wake/reset */
2603936fd732STejun Heo 		sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2604c6fd2807SJeff Garzik 		/* Couldn't find anything in SATA I/II specs, but
2605c6fd2807SJeff Garzik 		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2606c6fd2807SJeff Garzik 		mdelay(1);
2607c6fd2807SJeff Garzik 	}
2608c6fd2807SJeff Garzik 	/* phy wake/clear reset */
2609936fd732STejun Heo 	sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2610c6fd2807SJeff Garzik 
2611c6fd2807SJeff Garzik 	/* wait for phy to become ready, if necessary */
2612c6fd2807SJeff Garzik 	do {
2613c6fd2807SJeff Garzik 		msleep(200);
2614936fd732STejun Heo 		sata_scr_read(link, SCR_STATUS, &sstatus);
2615c6fd2807SJeff Garzik 		if ((sstatus & 0xf) != 1)
2616c6fd2807SJeff Garzik 			break;
2617c6fd2807SJeff Garzik 	} while (time_before(jiffies, timeout));
2618c6fd2807SJeff Garzik 
2619c6fd2807SJeff Garzik 	/* print link status */
2620936fd732STejun Heo 	sata_print_link_status(link);
2621c6fd2807SJeff Garzik 
2622c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
2623936fd732STejun Heo 	if (!ata_link_offline(link))
2624c6fd2807SJeff Garzik 		ata_port_probe(ap);
2625c6fd2807SJeff Garzik 	else
2626c6fd2807SJeff Garzik 		ata_port_disable(ap);
2627c6fd2807SJeff Garzik 
2628c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2629c6fd2807SJeff Garzik 		return;
2630c6fd2807SJeff Garzik 
2631c6fd2807SJeff Garzik 	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2632c6fd2807SJeff Garzik 		ata_port_disable(ap);
2633c6fd2807SJeff Garzik 		return;
2634c6fd2807SJeff Garzik 	}
2635c6fd2807SJeff Garzik 
2636c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_SATA;
2637c6fd2807SJeff Garzik }
2638c6fd2807SJeff Garzik 
2639c6fd2807SJeff Garzik /**
2640c6fd2807SJeff Garzik  *	sata_phy_reset - Reset SATA bus.
2641c6fd2807SJeff Garzik  *	@ap: SATA port associated with target SATA PHY.
2642c6fd2807SJeff Garzik  *
2643c6fd2807SJeff Garzik  *	This function resets the SATA bus, and then probes
2644c6fd2807SJeff Garzik  *	the bus for devices.
2645c6fd2807SJeff Garzik  *
2646c6fd2807SJeff Garzik  *	LOCKING:
2647c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2648c6fd2807SJeff Garzik  *
2649c6fd2807SJeff Garzik  */
2650c6fd2807SJeff Garzik void sata_phy_reset(struct ata_port *ap)
2651c6fd2807SJeff Garzik {
2652c6fd2807SJeff Garzik 	__sata_phy_reset(ap);
2653c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED)
2654c6fd2807SJeff Garzik 		return;
2655c6fd2807SJeff Garzik 	ata_bus_reset(ap);
2656c6fd2807SJeff Garzik }
2657c6fd2807SJeff Garzik 
2658c6fd2807SJeff Garzik /**
2659c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2660c6fd2807SJeff Garzik  *	@adev: device
2661c6fd2807SJeff Garzik  *
2662c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2663c6fd2807SJeff Garzik  *	present NULL is returned
2664c6fd2807SJeff Garzik  */
2665c6fd2807SJeff Garzik 
2666c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2667c6fd2807SJeff Garzik {
26689af5c9c9STejun Heo 	struct ata_link *link = adev->link;
26699af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2670c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2671c6fd2807SJeff Garzik 		return NULL;
2672c6fd2807SJeff Garzik 	return pair;
2673c6fd2807SJeff Garzik }
2674c6fd2807SJeff Garzik 
2675c6fd2807SJeff Garzik /**
2676c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2677c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2678c6fd2807SJeff Garzik  *
2679c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2680c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2681c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2682c6fd2807SJeff Garzik  *	on this port.
2683c6fd2807SJeff Garzik  *
2684cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2685c6fd2807SJeff Garzik  *	serialization.
2686c6fd2807SJeff Garzik  */
2687c6fd2807SJeff Garzik 
2688c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2689c6fd2807SJeff Garzik {
26909af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
26919af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2692c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2693c6fd2807SJeff Garzik }
2694c6fd2807SJeff Garzik 
2695c6fd2807SJeff Garzik /**
2696c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2697936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2698c6fd2807SJeff Garzik  *
2699936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2700c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2701c6fd2807SJeff Garzik  *	using sata_set_spd().
2702c6fd2807SJeff Garzik  *
2703c6fd2807SJeff Garzik  *	LOCKING:
2704c6fd2807SJeff Garzik  *	Inherited from caller.
2705c6fd2807SJeff Garzik  *
2706c6fd2807SJeff Garzik  *	RETURNS:
2707c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2708c6fd2807SJeff Garzik  */
2709936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2710c6fd2807SJeff Garzik {
2711c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2712c6fd2807SJeff Garzik 	int rc, highbit;
2713c6fd2807SJeff Garzik 
2714936fd732STejun Heo 	if (!sata_scr_valid(link))
2715008a7896STejun Heo 		return -EOPNOTSUPP;
2716008a7896STejun Heo 
2717008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2718936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2719008a7896STejun Heo 	 */
2720936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2721008a7896STejun Heo 	if (rc == 0)
2722008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2723008a7896STejun Heo 	else
2724936fd732STejun Heo 		spd = link->sata_spd;
2725c6fd2807SJeff Garzik 
2726936fd732STejun Heo 	mask = link->sata_spd_limit;
2727c6fd2807SJeff Garzik 	if (mask <= 1)
2728c6fd2807SJeff Garzik 		return -EINVAL;
2729008a7896STejun Heo 
2730008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2731c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2732c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2733c6fd2807SJeff Garzik 
2734008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2735008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2736008a7896STejun Heo 	 */
2737008a7896STejun Heo 	if (spd > 1)
2738008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2739008a7896STejun Heo 	else
2740008a7896STejun Heo 		mask &= 1;
2741008a7896STejun Heo 
2742008a7896STejun Heo 	/* were we already at the bottom? */
2743c6fd2807SJeff Garzik 	if (!mask)
2744c6fd2807SJeff Garzik 		return -EINVAL;
2745c6fd2807SJeff Garzik 
2746936fd732STejun Heo 	link->sata_spd_limit = mask;
2747c6fd2807SJeff Garzik 
2748936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2749c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2750c6fd2807SJeff Garzik 
2751c6fd2807SJeff Garzik 	return 0;
2752c6fd2807SJeff Garzik }
2753c6fd2807SJeff Garzik 
2754936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2755c6fd2807SJeff Garzik {
27565270222fSTejun Heo 	struct ata_link *host_link = &link->ap->link;
27575270222fSTejun Heo 	u32 limit, target, spd;
2758c6fd2807SJeff Garzik 
27595270222fSTejun Heo 	limit = link->sata_spd_limit;
27605270222fSTejun Heo 
27615270222fSTejun Heo 	/* Don't configure downstream link faster than upstream link.
27625270222fSTejun Heo 	 * It doesn't speed up anything and some PMPs choke on such
27635270222fSTejun Heo 	 * configuration.
27645270222fSTejun Heo 	 */
27655270222fSTejun Heo 	if (!ata_is_host_link(link) && host_link->sata_spd)
27665270222fSTejun Heo 		limit &= (1 << host_link->sata_spd) - 1;
27675270222fSTejun Heo 
27685270222fSTejun Heo 	if (limit == UINT_MAX)
27695270222fSTejun Heo 		target = 0;
2770c6fd2807SJeff Garzik 	else
27715270222fSTejun Heo 		target = fls(limit);
2772c6fd2807SJeff Garzik 
2773c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
27745270222fSTejun Heo 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2775c6fd2807SJeff Garzik 
27765270222fSTejun Heo 	return spd != target;
2777c6fd2807SJeff Garzik }
2778c6fd2807SJeff Garzik 
2779c6fd2807SJeff Garzik /**
2780c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2781936fd732STejun Heo  *	@link: Link in question
2782c6fd2807SJeff Garzik  *
2783c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2784936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2785c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2786c6fd2807SJeff Garzik  *	configuration.
2787c6fd2807SJeff Garzik  *
2788c6fd2807SJeff Garzik  *	LOCKING:
2789c6fd2807SJeff Garzik  *	Inherited from caller.
2790c6fd2807SJeff Garzik  *
2791c6fd2807SJeff Garzik  *	RETURNS:
2792c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2793c6fd2807SJeff Garzik  */
2794936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2795c6fd2807SJeff Garzik {
2796c6fd2807SJeff Garzik 	u32 scontrol;
2797c6fd2807SJeff Garzik 
2798936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2799db64bcf3STejun Heo 		return 1;
2800c6fd2807SJeff Garzik 
2801936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2802c6fd2807SJeff Garzik }
2803c6fd2807SJeff Garzik 
2804c6fd2807SJeff Garzik /**
2805c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2806936fd732STejun Heo  *	@link: Link to set SATA spd for
2807c6fd2807SJeff Garzik  *
2808936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2809c6fd2807SJeff Garzik  *
2810c6fd2807SJeff Garzik  *	LOCKING:
2811c6fd2807SJeff Garzik  *	Inherited from caller.
2812c6fd2807SJeff Garzik  *
2813c6fd2807SJeff Garzik  *	RETURNS:
2814c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2815c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2816c6fd2807SJeff Garzik  */
2817936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2818c6fd2807SJeff Garzik {
2819c6fd2807SJeff Garzik 	u32 scontrol;
2820c6fd2807SJeff Garzik 	int rc;
2821c6fd2807SJeff Garzik 
2822936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2823c6fd2807SJeff Garzik 		return rc;
2824c6fd2807SJeff Garzik 
2825936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2826c6fd2807SJeff Garzik 		return 0;
2827c6fd2807SJeff Garzik 
2828936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2829c6fd2807SJeff Garzik 		return rc;
2830c6fd2807SJeff Garzik 
2831c6fd2807SJeff Garzik 	return 1;
2832c6fd2807SJeff Garzik }
2833c6fd2807SJeff Garzik 
2834c6fd2807SJeff Garzik /*
2835c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2836c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2837c6fd2807SJeff Garzik  */
2838c6fd2807SJeff Garzik /*
2839b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2840c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2841b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2842b352e57dSAlan Cox  *
2843b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2844c6fd2807SJeff Garzik  */
2845c6fd2807SJeff Garzik 
2846c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2847c6fd2807SJeff Garzik 
2848c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2849c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2850c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2851c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2852c6fd2807SJeff Garzik 
2853b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2854b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2855c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2856c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2857c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2858c6fd2807SJeff Garzik 
2859c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2860c6fd2807SJeff Garzik 
2861c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2862c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2863c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2864c6fd2807SJeff Garzik 
2865c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2866c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2867c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2868c6fd2807SJeff Garzik 
2869b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2870b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2871c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2872c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2873c6fd2807SJeff Garzik 
2874c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2875c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2876c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2877c6fd2807SJeff Garzik 
2878c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2879c6fd2807SJeff Garzik 
2880c6fd2807SJeff Garzik 	{ 0xFF }
2881c6fd2807SJeff Garzik };
2882c6fd2807SJeff Garzik 
2883c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2884c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2885c6fd2807SJeff Garzik 
2886c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2887c6fd2807SJeff Garzik {
2888c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2889c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2890c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2891c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2892c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2893c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2894c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2895c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2896c6fd2807SJeff Garzik }
2897c6fd2807SJeff Garzik 
2898c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2899c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2900c6fd2807SJeff Garzik {
2901c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2902c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2903c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2904c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2905c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2906c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2907c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2908c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2909c6fd2807SJeff Garzik }
2910c6fd2807SJeff Garzik 
2911c6fd2807SJeff Garzik static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
2912c6fd2807SJeff Garzik {
2913c6fd2807SJeff Garzik 	const struct ata_timing *t;
2914c6fd2807SJeff Garzik 
2915c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2916c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2917c6fd2807SJeff Garzik 			return NULL;
2918c6fd2807SJeff Garzik 	return t;
2919c6fd2807SJeff Garzik }
2920c6fd2807SJeff Garzik 
2921c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2922c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2923c6fd2807SJeff Garzik {
2924c6fd2807SJeff Garzik 	const struct ata_timing *s;
2925c6fd2807SJeff Garzik 	struct ata_timing p;
2926c6fd2807SJeff Garzik 
2927c6fd2807SJeff Garzik 	/*
2928c6fd2807SJeff Garzik 	 * Find the mode.
2929c6fd2807SJeff Garzik 	 */
2930c6fd2807SJeff Garzik 
2931c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2932c6fd2807SJeff Garzik 		return -EINVAL;
2933c6fd2807SJeff Garzik 
2934c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2935c6fd2807SJeff Garzik 
2936c6fd2807SJeff Garzik 	/*
2937c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2938c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2939c6fd2807SJeff Garzik 	 */
2940c6fd2807SJeff Garzik 
2941c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2942c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2943c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2944c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2945c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2946c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2947c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2948c6fd2807SJeff Garzik 		}
2949c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2950c6fd2807SJeff Garzik 	}
2951c6fd2807SJeff Garzik 
2952c6fd2807SJeff Garzik 	/*
2953c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2954c6fd2807SJeff Garzik 	 */
2955c6fd2807SJeff Garzik 
2956c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2957c6fd2807SJeff Garzik 
2958c6fd2807SJeff Garzik 	/*
2959c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2960c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2961c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2962c6fd2807SJeff Garzik 	 */
2963c6fd2807SJeff Garzik 
2964fd3367afSAlan 	if (speed > XFER_PIO_6) {
2965c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2966c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2967c6fd2807SJeff Garzik 	}
2968c6fd2807SJeff Garzik 
2969c6fd2807SJeff Garzik 	/*
2970c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2971c6fd2807SJeff Garzik 	 */
2972c6fd2807SJeff Garzik 
2973c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2974c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2975c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2976c6fd2807SJeff Garzik 	}
2977c6fd2807SJeff Garzik 
2978c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2979c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2980c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2981c6fd2807SJeff Garzik 	}
29824f701d1eSAlan Cox 
29834f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
29844f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
29854f701d1eSAlan Cox 	   if so we must correct this */
29864f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
29874f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2988c6fd2807SJeff Garzik 
2989c6fd2807SJeff Garzik 	return 0;
2990c6fd2807SJeff Garzik }
2991c6fd2807SJeff Garzik 
2992c6fd2807SJeff Garzik /**
2993c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2994c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2995458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2996c6fd2807SJeff Garzik  *
2997c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2998c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2999c6fd2807SJeff Garzik  *	will apply the limit.
3000c6fd2807SJeff Garzik  *
3001c6fd2807SJeff Garzik  *	LOCKING:
3002c6fd2807SJeff Garzik  *	Inherited from caller.
3003c6fd2807SJeff Garzik  *
3004c6fd2807SJeff Garzik  *	RETURNS:
3005c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
3006c6fd2807SJeff Garzik  */
3007458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3008c6fd2807SJeff Garzik {
3009458337dbSTejun Heo 	char buf[32];
3010458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
3011458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
3012458337dbSTejun Heo 	int quiet, highbit;
3013c6fd2807SJeff Garzik 
3014458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
3015458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
3016458337dbSTejun Heo 
3017458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3018458337dbSTejun Heo 						  dev->mwdma_mask,
3019c6fd2807SJeff Garzik 						  dev->udma_mask);
3020458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3021c6fd2807SJeff Garzik 
3022458337dbSTejun Heo 	switch (sel) {
3023458337dbSTejun Heo 	case ATA_DNXFER_PIO:
3024458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
3025458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
3026458337dbSTejun Heo 		break;
3027458337dbSTejun Heo 
3028458337dbSTejun Heo 	case ATA_DNXFER_DMA:
3029458337dbSTejun Heo 		if (udma_mask) {
3030458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
3031458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
3032458337dbSTejun Heo 			if (!udma_mask)
3033458337dbSTejun Heo 				return -ENOENT;
3034458337dbSTejun Heo 		} else if (mwdma_mask) {
3035458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
3036458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
3037458337dbSTejun Heo 			if (!mwdma_mask)
3038458337dbSTejun Heo 				return -ENOENT;
3039458337dbSTejun Heo 		}
3040458337dbSTejun Heo 		break;
3041458337dbSTejun Heo 
3042458337dbSTejun Heo 	case ATA_DNXFER_40C:
3043458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
3044458337dbSTejun Heo 		break;
3045458337dbSTejun Heo 
3046458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
3047458337dbSTejun Heo 		pio_mask &= 1;
3048458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
3049458337dbSTejun Heo 		mwdma_mask = 0;
3050458337dbSTejun Heo 		udma_mask = 0;
3051458337dbSTejun Heo 		break;
3052458337dbSTejun Heo 
3053458337dbSTejun Heo 	default:
3054458337dbSTejun Heo 		BUG();
3055458337dbSTejun Heo 	}
3056458337dbSTejun Heo 
3057458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3058458337dbSTejun Heo 
3059458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3060458337dbSTejun Heo 		return -ENOENT;
3061458337dbSTejun Heo 
3062458337dbSTejun Heo 	if (!quiet) {
3063458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3064458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
3065458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
3066458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3067458337dbSTejun Heo 		else
3068458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
3069458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
3070458337dbSTejun Heo 
3071458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
3072458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
3073458337dbSTejun Heo 	}
3074c6fd2807SJeff Garzik 
3075c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3076c6fd2807SJeff Garzik 			    &dev->udma_mask);
3077c6fd2807SJeff Garzik 
3078c6fd2807SJeff Garzik 	return 0;
3079c6fd2807SJeff Garzik }
3080c6fd2807SJeff Garzik 
3081c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
3082c6fd2807SJeff Garzik {
30839af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3084c6fd2807SJeff Garzik 	unsigned int err_mask;
3085c6fd2807SJeff Garzik 	int rc;
3086c6fd2807SJeff Garzik 
3087c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
3088c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3089c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
3090c6fd2807SJeff Garzik 
3091c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
30922dcb407eSJeff Garzik 
309311750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
309411750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
309511750a40SAlan 		err_mask &= ~AC_ERR_DEV;
30962dcb407eSJeff Garzik 
30970bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
30980bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
30990bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
31000bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
31010bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
31022dcb407eSJeff Garzik 
31033acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
31043acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
31053acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
31063acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
31073acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
31083acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
31093acaf94bSAlan Cox 
3110c6fd2807SJeff Garzik 	if (err_mask) {
3111c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3112c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
3113c6fd2807SJeff Garzik 		return -EIO;
3114c6fd2807SJeff Garzik 	}
3115c6fd2807SJeff Garzik 
3116baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3117422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3118baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3119c6fd2807SJeff Garzik 	if (rc)
3120c6fd2807SJeff Garzik 		return rc;
3121c6fd2807SJeff Garzik 
3122c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3123c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
3124c6fd2807SJeff Garzik 
3125c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3126c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3127c6fd2807SJeff Garzik 	return 0;
3128c6fd2807SJeff Garzik }
3129c6fd2807SJeff Garzik 
3130c6fd2807SJeff Garzik /**
313104351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
31320260731fSTejun Heo  *	@link: link on which timings will be programmed
3133c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
3134c6fd2807SJeff Garzik  *
313504351821SAlan  *	Standard implementation of the function used to tune and set
313604351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
313704351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
3138c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
3139c6fd2807SJeff Garzik  *
3140c6fd2807SJeff Garzik  *	LOCKING:
3141c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3142c6fd2807SJeff Garzik  *
3143c6fd2807SJeff Garzik  *	RETURNS:
3144c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3145c6fd2807SJeff Garzik  */
314604351821SAlan 
31470260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3148c6fd2807SJeff Garzik {
31490260731fSTejun Heo 	struct ata_port *ap = link->ap;
3150c6fd2807SJeff Garzik 	struct ata_device *dev;
3151f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
3152c6fd2807SJeff Garzik 
3153c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
3154f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3155c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
3156b3a70601SAlan Cox 		unsigned int mode_mask;
3157c6fd2807SJeff Garzik 
3158c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3159c6fd2807SJeff Garzik 			continue;
3160c6fd2807SJeff Garzik 
3161b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
3162b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
3163b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
3164b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
3165b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
3166b3a70601SAlan Cox 
3167c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
3168c6fd2807SJeff Garzik 
3169c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3170c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3171b3a70601SAlan Cox 
3172b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
3173b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3174b3a70601SAlan Cox 		else
3175b3a70601SAlan Cox 			dma_mask = 0;
3176b3a70601SAlan Cox 
3177c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3178c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3179c6fd2807SJeff Garzik 
3180c6fd2807SJeff Garzik 		found = 1;
3181c6fd2807SJeff Garzik 		if (dev->dma_mode)
3182c6fd2807SJeff Garzik 			used_dma = 1;
3183c6fd2807SJeff Garzik 	}
3184c6fd2807SJeff Garzik 	if (!found)
3185c6fd2807SJeff Garzik 		goto out;
3186c6fd2807SJeff Garzik 
3187c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
3188f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3189c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3190c6fd2807SJeff Garzik 			continue;
3191c6fd2807SJeff Garzik 
3192c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
3193c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3194c6fd2807SJeff Garzik 			rc = -EINVAL;
3195c6fd2807SJeff Garzik 			goto out;
3196c6fd2807SJeff Garzik 		}
3197c6fd2807SJeff Garzik 
3198c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
3199c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
3200c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
3201c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
3202c6fd2807SJeff Garzik 	}
3203c6fd2807SJeff Garzik 
3204c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
3205f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3206c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
3207c6fd2807SJeff Garzik 			continue;
3208c6fd2807SJeff Garzik 
3209c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
3210c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3211c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
3212c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
3213c6fd2807SJeff Garzik 	}
3214c6fd2807SJeff Garzik 
3215c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
3216f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
321718d90debSAlan 		/* don't update suspended devices' xfer mode */
32189666f400STejun Heo 		if (!ata_dev_enabled(dev))
3219c6fd2807SJeff Garzik 			continue;
3220c6fd2807SJeff Garzik 
3221c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3222c6fd2807SJeff Garzik 		if (rc)
3223c6fd2807SJeff Garzik 			goto out;
3224c6fd2807SJeff Garzik 	}
3225c6fd2807SJeff Garzik 
3226c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3227c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3228c6fd2807SJeff Garzik 	 */
3229cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3230032af1ceSAlan 		ap->host->simplex_claimed = ap;
3231c6fd2807SJeff Garzik 
3232c6fd2807SJeff Garzik  out:
3233c6fd2807SJeff Garzik 	if (rc)
3234c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3235c6fd2807SJeff Garzik 	return rc;
3236c6fd2807SJeff Garzik }
3237c6fd2807SJeff Garzik 
3238c6fd2807SJeff Garzik /**
323904351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
32400260731fSTejun Heo  *	@link: link on which timings will be programmed
324104351821SAlan  *	@r_failed_dev: out paramter for failed device
324204351821SAlan  *
324304351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
324404351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
324504351821SAlan  *	returned in @r_failed_dev.
324604351821SAlan  *
324704351821SAlan  *	LOCKING:
324804351821SAlan  *	PCI/etc. bus probe sem.
324904351821SAlan  *
325004351821SAlan  *	RETURNS:
325104351821SAlan  *	0 on success, negative errno otherwise
325204351821SAlan  */
32530260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
325404351821SAlan {
32550260731fSTejun Heo 	struct ata_port *ap = link->ap;
32560260731fSTejun Heo 
325704351821SAlan 	/* has private set_mode? */
325804351821SAlan 	if (ap->ops->set_mode)
32590260731fSTejun Heo 		return ap->ops->set_mode(link, r_failed_dev);
32600260731fSTejun Heo 	return ata_do_set_mode(link, r_failed_dev);
326104351821SAlan }
326204351821SAlan 
326304351821SAlan /**
3264c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3265c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3266c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3267c6fd2807SJeff Garzik  *
3268c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3269c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3270c6fd2807SJeff Garzik  *	other threads.
3271c6fd2807SJeff Garzik  *
3272c6fd2807SJeff Garzik  *	LOCKING:
3273cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3274c6fd2807SJeff Garzik  */
3275c6fd2807SJeff Garzik 
3276c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3277c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3278c6fd2807SJeff Garzik {
3279c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3280c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3281c6fd2807SJeff Garzik }
3282c6fd2807SJeff Garzik 
3283c6fd2807SJeff Garzik /**
3284c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3285c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3286c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3287c6fd2807SJeff Garzik  *	@tmout: overall timeout
3288c6fd2807SJeff Garzik  *
3289c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3290c6fd2807SJeff Garzik  *	or a timeout occurs.
3291c6fd2807SJeff Garzik  *
3292d1adc1bbSTejun Heo  *	LOCKING:
3293d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3294d1adc1bbSTejun Heo  *
3295d1adc1bbSTejun Heo  *	RETURNS:
3296d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3297c6fd2807SJeff Garzik  */
3298d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3299c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3300c6fd2807SJeff Garzik {
3301c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3302c6fd2807SJeff Garzik 	u8 status;
3303c6fd2807SJeff Garzik 
3304c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3305c6fd2807SJeff Garzik 	timer_start = jiffies;
3306c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3307d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3308d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3309c6fd2807SJeff Garzik 		msleep(50);
3310c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3311c6fd2807SJeff Garzik 	}
3312c6fd2807SJeff Garzik 
3313d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3314c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
331535aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
331635aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3317c6fd2807SJeff Garzik 
3318c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3319d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3320d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3321c6fd2807SJeff Garzik 		msleep(50);
3322c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3323c6fd2807SJeff Garzik 	}
3324c6fd2807SJeff Garzik 
3325d1adc1bbSTejun Heo 	if (status == 0xff)
3326d1adc1bbSTejun Heo 		return -ENODEV;
3327d1adc1bbSTejun Heo 
3328c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3329c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
333035aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
333135aa7a43SJeff Garzik 				tmout / HZ, status);
3332d1adc1bbSTejun Heo 		return -EBUSY;
3333c6fd2807SJeff Garzik 	}
3334c6fd2807SJeff Garzik 
3335c6fd2807SJeff Garzik 	return 0;
3336c6fd2807SJeff Garzik }
3337c6fd2807SJeff Garzik 
3338d4b2bab4STejun Heo /**
333988ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
334088ff6eafSTejun Heo  *	@ap: port containing status register to be polled
334188ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
334288ff6eafSTejun Heo  *
334388ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
334488ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
334588ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
334688ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
334788ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
334888ff6eafSTejun Heo  *	status.
334988ff6eafSTejun Heo  *
335088ff6eafSTejun Heo  *	LOCKING:
335188ff6eafSTejun Heo  *	Kernel thread context (may sleep).
335288ff6eafSTejun Heo  */
335388ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
335488ff6eafSTejun Heo {
335588ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
335688ff6eafSTejun Heo 
335788ff6eafSTejun Heo 	if (time_before(until, deadline))
335888ff6eafSTejun Heo 		deadline = until;
335988ff6eafSTejun Heo 
336088ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
336188ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
336288ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
336388ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
336488ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
336588ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
336688ff6eafSTejun Heo 	 * delay here as well.
336788ff6eafSTejun Heo 	 *
336888ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
336988ff6eafSTejun Heo 	 */
337088ff6eafSTejun Heo 	msleep(150);
337188ff6eafSTejun Heo 
337288ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
337388ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
337488ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
337588ff6eafSTejun Heo 	 * than that.
33761974e201STejun Heo 	 *
33771974e201STejun Heo 	 * Note that some PATA controllers (pata_ali) explode if
33781974e201STejun Heo 	 * status register is read more than once when there's no
33791974e201STejun Heo 	 * device attached.
338088ff6eafSTejun Heo 	 */
33811974e201STejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
338288ff6eafSTejun Heo 		while (1) {
338388ff6eafSTejun Heo 			u8 status = ata_chk_status(ap);
338488ff6eafSTejun Heo 
338588ff6eafSTejun Heo 			if (status != 0xff || time_after(jiffies, deadline))
338688ff6eafSTejun Heo 				return;
338788ff6eafSTejun Heo 
338888ff6eafSTejun Heo 			msleep(50);
338988ff6eafSTejun Heo 		}
339088ff6eafSTejun Heo 	}
33911974e201STejun Heo }
339288ff6eafSTejun Heo 
339388ff6eafSTejun Heo /**
3394d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3395d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3396d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3397d4b2bab4STejun Heo  *
3398d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3399d4b2bab4STejun Heo  *	occurs.
3400d4b2bab4STejun Heo  *
3401d4b2bab4STejun Heo  *	LOCKING:
3402d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3403d4b2bab4STejun Heo  *
3404d4b2bab4STejun Heo  *	RETURNS:
3405d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3406d4b2bab4STejun Heo  */
3407d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3408d4b2bab4STejun Heo {
3409d4b2bab4STejun Heo 	unsigned long start = jiffies;
3410d4b2bab4STejun Heo 	int warned = 0;
3411d4b2bab4STejun Heo 
3412d4b2bab4STejun Heo 	while (1) {
3413d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3414d4b2bab4STejun Heo 		unsigned long now = jiffies;
3415d4b2bab4STejun Heo 
3416d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3417d4b2bab4STejun Heo 			return 0;
3418936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3419d4b2bab4STejun Heo 			return -ENODEV;
3420d4b2bab4STejun Heo 		if (time_after(now, deadline))
3421d4b2bab4STejun Heo 			return -EBUSY;
3422d4b2bab4STejun Heo 
3423d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3424d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3425d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3426d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3427d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3428d4b2bab4STejun Heo 			warned = 1;
3429d4b2bab4STejun Heo 		}
3430d4b2bab4STejun Heo 
3431d4b2bab4STejun Heo 		msleep(50);
3432d4b2bab4STejun Heo 	}
3433d4b2bab4STejun Heo }
3434d4b2bab4STejun Heo 
3435d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3436d4b2bab4STejun Heo 			      unsigned long deadline)
3437c6fd2807SJeff Garzik {
3438c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3439c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3440c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
34419b89391cSTejun Heo 	int rc, ret = 0;
3442c6fd2807SJeff Garzik 
3443c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3444c6fd2807SJeff Garzik 	 * BSY bit to clear
3445c6fd2807SJeff Garzik 	 */
3446d4b2bab4STejun Heo 	if (dev0) {
3447d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
34489b89391cSTejun Heo 		if (rc) {
34499b89391cSTejun Heo 			if (rc != -ENODEV)
3450d4b2bab4STejun Heo 				return rc;
34519b89391cSTejun Heo 			ret = rc;
34529b89391cSTejun Heo 		}
3453d4b2bab4STejun Heo 	}
3454c6fd2807SJeff Garzik 
3455e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3456e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3457c6fd2807SJeff Garzik 	 */
3458e141d999STejun Heo 	if (dev1) {
3459e141d999STejun Heo 		int i;
3460c6fd2807SJeff Garzik 
3461c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3462e141d999STejun Heo 
3463e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3464e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3465e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3466e141d999STejun Heo 		 */
3467e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3468e141d999STejun Heo 			u8 nsect, lbal;
3469e141d999STejun Heo 
34700d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
34710d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3472c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3473c6fd2807SJeff Garzik 				break;
3474c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3475c6fd2807SJeff Garzik 		}
3476e141d999STejun Heo 
3477d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
34789b89391cSTejun Heo 		if (rc) {
34799b89391cSTejun Heo 			if (rc != -ENODEV)
3480d4b2bab4STejun Heo 				return rc;
34819b89391cSTejun Heo 			ret = rc;
34829b89391cSTejun Heo 		}
3483d4b2bab4STejun Heo 	}
3484c6fd2807SJeff Garzik 
3485c6fd2807SJeff Garzik 	/* is all this really necessary? */
3486c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3487c6fd2807SJeff Garzik 	if (dev1)
3488c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3489c6fd2807SJeff Garzik 	if (dev0)
3490c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3491d4b2bab4STejun Heo 
34929b89391cSTejun Heo 	return ret;
3493c6fd2807SJeff Garzik }
3494c6fd2807SJeff Garzik 
3495d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3496d4b2bab4STejun Heo 			     unsigned long deadline)
3497c6fd2807SJeff Garzik {
3498c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3499c6fd2807SJeff Garzik 
350044877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3501c6fd2807SJeff Garzik 
3502c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
35030d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3504c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
35050d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3506c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
35070d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3508c6fd2807SJeff Garzik 
350988ff6eafSTejun Heo 	/* wait a while before checking status */
351088ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3511c6fd2807SJeff Garzik 
3512c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3513c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3514c6fd2807SJeff Garzik 	 * pulldown resistor.
3515c6fd2807SJeff Garzik 	 */
3516150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
35179b89391cSTejun Heo 		return -ENODEV;
3518c6fd2807SJeff Garzik 
3519d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3520c6fd2807SJeff Garzik }
3521c6fd2807SJeff Garzik 
3522c6fd2807SJeff Garzik /**
3523c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3524c6fd2807SJeff Garzik  *	@ap: port to reset
3525c6fd2807SJeff Garzik  *
3526c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3527c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3528c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3529c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3530c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3531c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3532c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3533c6fd2807SJeff Garzik  *
3534c6fd2807SJeff Garzik  *	LOCKING:
3535c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3536cca3974eSJeff Garzik  *	Obtains host lock.
3537c6fd2807SJeff Garzik  *
3538c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3539c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3540c6fd2807SJeff Garzik  */
3541c6fd2807SJeff Garzik 
3542c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3543c6fd2807SJeff Garzik {
35449af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3545c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3546c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3547c6fd2807SJeff Garzik 	u8 err;
3548c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
35499b89391cSTejun Heo 	int rc;
3550c6fd2807SJeff Garzik 
355144877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3552c6fd2807SJeff Garzik 
3553c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3554c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3555c6fd2807SJeff Garzik 		dev0 = 1;
3556c6fd2807SJeff Garzik 	else {
3557c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3558c6fd2807SJeff Garzik 		if (slave_possible)
3559c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3560c6fd2807SJeff Garzik 	}
3561c6fd2807SJeff Garzik 
3562c6fd2807SJeff Garzik 	if (dev0)
3563c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3564c6fd2807SJeff Garzik 	if (dev1)
3565c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3566c6fd2807SJeff Garzik 
3567c6fd2807SJeff Garzik 	/* select device 0 again */
3568c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3569c6fd2807SJeff Garzik 
3570c6fd2807SJeff Garzik 	/* issue bus reset */
35719b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
35729b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
35739b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3574c6fd2807SJeff Garzik 			goto err_out;
35759b89391cSTejun Heo 	}
3576c6fd2807SJeff Garzik 
3577c6fd2807SJeff Garzik 	/*
3578c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3579c6fd2807SJeff Garzik 	 */
35803f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3581c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
35823f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3583c6fd2807SJeff Garzik 
3584c6fd2807SJeff Garzik 	/* is double-select really necessary? */
35859af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3586c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
35879af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3588c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3589c6fd2807SJeff Garzik 
3590c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
35919af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
35929af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3593c6fd2807SJeff Garzik 		goto err_out;
3594c6fd2807SJeff Garzik 
3595c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3596c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
35970d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3598c6fd2807SJeff Garzik 	}
3599c6fd2807SJeff Garzik 
3600c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3601c6fd2807SJeff Garzik 	return;
3602c6fd2807SJeff Garzik 
3603c6fd2807SJeff Garzik err_out:
3604c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3605ac8869d5SJeff Garzik 	ata_port_disable(ap);
3606c6fd2807SJeff Garzik 
3607c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3608c6fd2807SJeff Garzik }
3609c6fd2807SJeff Garzik 
3610c6fd2807SJeff Garzik /**
3611936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3612936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3613c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3614d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3615c6fd2807SJeff Garzik  *
3616936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3617c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3618c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3619d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3620d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3621c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3622c6fd2807SJeff Garzik  *
3623d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3624d4b2bab4STejun Heo  *	two is used.
3625d4b2bab4STejun Heo  *
3626c6fd2807SJeff Garzik  *	LOCKING:
3627c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3628c6fd2807SJeff Garzik  *
3629c6fd2807SJeff Garzik  *	RETURNS:
3630c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3631c6fd2807SJeff Garzik  */
3632936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3633d4b2bab4STejun Heo 		       unsigned long deadline)
3634c6fd2807SJeff Garzik {
3635c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3636d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3637d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3638c6fd2807SJeff Garzik 	u32 last, cur;
3639c6fd2807SJeff Garzik 	int rc;
3640c6fd2807SJeff Garzik 
3641d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3642d4b2bab4STejun Heo 	if (time_before(t, deadline))
3643d4b2bab4STejun Heo 		deadline = t;
3644d4b2bab4STejun Heo 
3645936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3646c6fd2807SJeff Garzik 		return rc;
3647c6fd2807SJeff Garzik 	cur &= 0xf;
3648c6fd2807SJeff Garzik 
3649c6fd2807SJeff Garzik 	last = cur;
3650c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3651c6fd2807SJeff Garzik 
3652c6fd2807SJeff Garzik 	while (1) {
3653c6fd2807SJeff Garzik 		msleep(interval_msec);
3654936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3655c6fd2807SJeff Garzik 			return rc;
3656c6fd2807SJeff Garzik 		cur &= 0xf;
3657c6fd2807SJeff Garzik 
3658c6fd2807SJeff Garzik 		/* DET stable? */
3659c6fd2807SJeff Garzik 		if (cur == last) {
3660d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3661c6fd2807SJeff Garzik 				continue;
3662c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3663c6fd2807SJeff Garzik 				return 0;
3664c6fd2807SJeff Garzik 			continue;
3665c6fd2807SJeff Garzik 		}
3666c6fd2807SJeff Garzik 
3667c6fd2807SJeff Garzik 		/* unstable, start over */
3668c6fd2807SJeff Garzik 		last = cur;
3669c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3670c6fd2807SJeff Garzik 
3671f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3672f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3673f1545154STejun Heo 		 */
3674d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3675f1545154STejun Heo 			return -EPIPE;
3676c6fd2807SJeff Garzik 	}
3677c6fd2807SJeff Garzik }
3678c6fd2807SJeff Garzik 
3679c6fd2807SJeff Garzik /**
3680936fd732STejun Heo  *	sata_link_resume - resume SATA link
3681936fd732STejun Heo  *	@link: ATA link to resume SATA
3682c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3683d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3684c6fd2807SJeff Garzik  *
3685936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3686c6fd2807SJeff Garzik  *
3687c6fd2807SJeff Garzik  *	LOCKING:
3688c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3689c6fd2807SJeff Garzik  *
3690c6fd2807SJeff Garzik  *	RETURNS:
3691c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3692c6fd2807SJeff Garzik  */
3693936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3694d4b2bab4STejun Heo 		     unsigned long deadline)
3695c6fd2807SJeff Garzik {
3696c6fd2807SJeff Garzik 	u32 scontrol;
3697c6fd2807SJeff Garzik 	int rc;
3698c6fd2807SJeff Garzik 
3699936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3700c6fd2807SJeff Garzik 		return rc;
3701c6fd2807SJeff Garzik 
3702c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3703c6fd2807SJeff Garzik 
3704936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3705c6fd2807SJeff Garzik 		return rc;
3706c6fd2807SJeff Garzik 
3707c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3708c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3709c6fd2807SJeff Garzik 	 */
3710c6fd2807SJeff Garzik 	msleep(200);
3711c6fd2807SJeff Garzik 
3712936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3713c6fd2807SJeff Garzik }
3714c6fd2807SJeff Garzik 
3715c6fd2807SJeff Garzik /**
3716c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3717cc0680a5STejun Heo  *	@link: ATA link to be reset
3718d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3719c6fd2807SJeff Garzik  *
3720cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3721b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3722b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3723b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3724b8cffc6aSTejun Heo  *	should just whine, not fail.
3725c6fd2807SJeff Garzik  *
3726c6fd2807SJeff Garzik  *	LOCKING:
3727c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3728c6fd2807SJeff Garzik  *
3729c6fd2807SJeff Garzik  *	RETURNS:
3730c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3731c6fd2807SJeff Garzik  */
3732cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3733c6fd2807SJeff Garzik {
3734cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3735936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3736c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3737c6fd2807SJeff Garzik 	int rc;
3738c6fd2807SJeff Garzik 
373931daabdaSTejun Heo 	/* handle link resume */
3740c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
37410c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3742c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3743c6fd2807SJeff Garzik 
3744633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3745633273a3STejun Heo 	 * is supported.
3746633273a3STejun Heo 	 */
3747633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3748633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3749633273a3STejun Heo 
3750c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3751c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3752c6fd2807SJeff Garzik 		return 0;
3753c6fd2807SJeff Garzik 
3754936fd732STejun Heo 	/* if SATA, resume link */
3755a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3756936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3757b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3758b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3759cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3760c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3761c6fd2807SJeff Garzik 	}
3762c6fd2807SJeff Garzik 
3763c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3764c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3765c6fd2807SJeff Garzik 	 */
37660c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3767b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
37686dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3769cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3770b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3771b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3772b8cffc6aSTejun Heo 		}
3773b8cffc6aSTejun Heo 	}
3774c6fd2807SJeff Garzik 
3775c6fd2807SJeff Garzik 	return 0;
3776c6fd2807SJeff Garzik }
3777c6fd2807SJeff Garzik 
3778c6fd2807SJeff Garzik /**
3779c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3780cc0680a5STejun Heo  *	@link: ATA link to reset
3781c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3782d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3783c6fd2807SJeff Garzik  *
3784c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3785c6fd2807SJeff Garzik  *
3786c6fd2807SJeff Garzik  *	LOCKING:
3787c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3788c6fd2807SJeff Garzik  *
3789c6fd2807SJeff Garzik  *	RETURNS:
3790c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3791c6fd2807SJeff Garzik  */
3792cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3793d4b2bab4STejun Heo 		      unsigned long deadline)
3794c6fd2807SJeff Garzik {
3795cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3796c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3797d4b2bab4STejun Heo 	unsigned int devmask = 0;
3798d4b2bab4STejun Heo 	int rc;
3799c6fd2807SJeff Garzik 	u8 err;
3800c6fd2807SJeff Garzik 
3801c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3802c6fd2807SJeff Garzik 
3803936fd732STejun Heo 	if (ata_link_offline(link)) {
3804c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3805c6fd2807SJeff Garzik 		goto out;
3806c6fd2807SJeff Garzik 	}
3807c6fd2807SJeff Garzik 
3808c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3809c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3810c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3811c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3812c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3813c6fd2807SJeff Garzik 
3814c6fd2807SJeff Garzik 	/* select device 0 again */
3815c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3816c6fd2807SJeff Garzik 
3817c6fd2807SJeff Garzik 	/* issue bus reset */
3818c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3819d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
38209b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3821936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3822cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3823d4b2bab4STejun Heo 		return rc;
3824c6fd2807SJeff Garzik 	}
3825c6fd2807SJeff Garzik 
3826c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
38273f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
38283f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3829c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
38303f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
38313f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3832c6fd2807SJeff Garzik 
3833c6fd2807SJeff Garzik  out:
3834c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3835c6fd2807SJeff Garzik 	return 0;
3836c6fd2807SJeff Garzik }
3837c6fd2807SJeff Garzik 
3838c6fd2807SJeff Garzik /**
3839cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3840cc0680a5STejun Heo  *	@link: link to reset
3841b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3842d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3843c6fd2807SJeff Garzik  *
3844cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3845c6fd2807SJeff Garzik  *
3846c6fd2807SJeff Garzik  *	LOCKING:
3847c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3848c6fd2807SJeff Garzik  *
3849c6fd2807SJeff Garzik  *	RETURNS:
3850c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3851c6fd2807SJeff Garzik  */
3852cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3853d4b2bab4STejun Heo 			unsigned long deadline)
3854c6fd2807SJeff Garzik {
3855c6fd2807SJeff Garzik 	u32 scontrol;
3856c6fd2807SJeff Garzik 	int rc;
3857c6fd2807SJeff Garzik 
3858c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3859c6fd2807SJeff Garzik 
3860936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3861c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3862c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3863c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3864c6fd2807SJeff Garzik 		 * and Sil3124.
3865c6fd2807SJeff Garzik 		 */
3866936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3867b6103f6dSTejun Heo 			goto out;
3868c6fd2807SJeff Garzik 
3869cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3870c6fd2807SJeff Garzik 
3871936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3872b6103f6dSTejun Heo 			goto out;
3873c6fd2807SJeff Garzik 
3874936fd732STejun Heo 		sata_set_spd(link);
3875c6fd2807SJeff Garzik 	}
3876c6fd2807SJeff Garzik 
3877c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3878936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3879b6103f6dSTejun Heo 		goto out;
3880c6fd2807SJeff Garzik 
3881c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3882c6fd2807SJeff Garzik 
3883936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3884b6103f6dSTejun Heo 		goto out;
3885c6fd2807SJeff Garzik 
3886c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3887c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3888c6fd2807SJeff Garzik 	 */
3889c6fd2807SJeff Garzik 	msleep(1);
3890c6fd2807SJeff Garzik 
3891936fd732STejun Heo 	/* bring link back */
3892936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3893b6103f6dSTejun Heo  out:
3894b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3895b6103f6dSTejun Heo 	return rc;
3896b6103f6dSTejun Heo }
3897b6103f6dSTejun Heo 
3898b6103f6dSTejun Heo /**
3899b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3900cc0680a5STejun Heo  *	@link: link to reset
3901b6103f6dSTejun Heo  *	@class: resulting class of attached device
3902d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3903b6103f6dSTejun Heo  *
3904b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3905b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3906b6103f6dSTejun Heo  *
3907b6103f6dSTejun Heo  *	LOCKING:
3908b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3909b6103f6dSTejun Heo  *
3910b6103f6dSTejun Heo  *	RETURNS:
3911b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3912b6103f6dSTejun Heo  */
3913cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3914d4b2bab4STejun Heo 		       unsigned long deadline)
3915b6103f6dSTejun Heo {
3916cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3917936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3918b6103f6dSTejun Heo 	int rc;
3919b6103f6dSTejun Heo 
3920b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3921b6103f6dSTejun Heo 
3922b6103f6dSTejun Heo 	/* do hardreset */
3923cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3924b6103f6dSTejun Heo 	if (rc) {
3925cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3926b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3927b6103f6dSTejun Heo 		return rc;
3928b6103f6dSTejun Heo 	}
3929c6fd2807SJeff Garzik 
3930c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3931936fd732STejun Heo 	if (ata_link_offline(link)) {
3932c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3933c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3934c6fd2807SJeff Garzik 		return 0;
3935c6fd2807SJeff Garzik 	}
3936c6fd2807SJeff Garzik 
393788ff6eafSTejun Heo 	/* wait a while before checking status */
393888ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
393934fee227STejun Heo 
3940633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3941633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3942633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3943633273a3STejun Heo 	 * second and request follow-up SRST.
3944633273a3STejun Heo 	 */
3945633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3946633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3947633273a3STejun Heo 		return -EAGAIN;
3948633273a3STejun Heo 	}
3949633273a3STejun Heo 
3950d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
39519b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
39529b89391cSTejun Heo 	if (rc) {
3953cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3954d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3955d4b2bab4STejun Heo 		return rc;
3956c6fd2807SJeff Garzik 	}
3957c6fd2807SJeff Garzik 
3958c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3959c6fd2807SJeff Garzik 
39603f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3961c6fd2807SJeff Garzik 
3962c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3963c6fd2807SJeff Garzik 	return 0;
3964c6fd2807SJeff Garzik }
3965c6fd2807SJeff Garzik 
3966c6fd2807SJeff Garzik /**
3967c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3968cc0680a5STejun Heo  *	@link: the target ata_link
3969c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3970c6fd2807SJeff Garzik  *
3971c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3972c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3973c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3974c6fd2807SJeff Garzik  *
3975c6fd2807SJeff Garzik  *	LOCKING:
3976c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3977c6fd2807SJeff Garzik  */
3978cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3979c6fd2807SJeff Garzik {
3980cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3981c6fd2807SJeff Garzik 	u32 serror;
3982c6fd2807SJeff Garzik 
3983c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3984c6fd2807SJeff Garzik 
3985c6fd2807SJeff Garzik 	/* print link status */
3986936fd732STejun Heo 	sata_print_link_status(link);
3987c6fd2807SJeff Garzik 
3988c6fd2807SJeff Garzik 	/* clear SError */
3989936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3990936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3991c6fd2807SJeff Garzik 
3992c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3993c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3994c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3995c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3996c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3997c6fd2807SJeff Garzik 
3998c6fd2807SJeff Garzik 	/* bail out if no device is present */
3999c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4000c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
4001c6fd2807SJeff Garzik 		return;
4002c6fd2807SJeff Garzik 	}
4003c6fd2807SJeff Garzik 
4004c6fd2807SJeff Garzik 	/* set up device control */
40050d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
40060d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
4007c6fd2807SJeff Garzik 
4008c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
4009c6fd2807SJeff Garzik }
4010c6fd2807SJeff Garzik 
4011c6fd2807SJeff Garzik /**
4012c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
4013c6fd2807SJeff Garzik  *	@dev: device to compare against
4014c6fd2807SJeff Garzik  *	@new_class: class of the new device
4015c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
4016c6fd2807SJeff Garzik  *
4017c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
4018c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
4019c6fd2807SJeff Garzik  *	@new_id.
4020c6fd2807SJeff Garzik  *
4021c6fd2807SJeff Garzik  *	LOCKING:
4022c6fd2807SJeff Garzik  *	None.
4023c6fd2807SJeff Garzik  *
4024c6fd2807SJeff Garzik  *	RETURNS:
4025c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
4026c6fd2807SJeff Garzik  */
4027c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4028c6fd2807SJeff Garzik 			       const u16 *new_id)
4029c6fd2807SJeff Garzik {
4030c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
4031a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
4032a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4033c6fd2807SJeff Garzik 
4034c6fd2807SJeff Garzik 	if (dev->class != new_class) {
4035c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4036c6fd2807SJeff Garzik 			       dev->class, new_class);
4037c6fd2807SJeff Garzik 		return 0;
4038c6fd2807SJeff Garzik 	}
4039c6fd2807SJeff Garzik 
4040a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4041a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4042a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4043a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4044c6fd2807SJeff Garzik 
4045c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
4046c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4047c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
4048c6fd2807SJeff Garzik 		return 0;
4049c6fd2807SJeff Garzik 	}
4050c6fd2807SJeff Garzik 
4051c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
4052c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4053c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
4054c6fd2807SJeff Garzik 		return 0;
4055c6fd2807SJeff Garzik 	}
4056c6fd2807SJeff Garzik 
4057c6fd2807SJeff Garzik 	return 1;
4058c6fd2807SJeff Garzik }
4059c6fd2807SJeff Garzik 
4060c6fd2807SJeff Garzik /**
4061fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
40623fae450cSHenrik Kretzschmar  *	@dev: target ATA device
4063bff04647STejun Heo  *	@readid_flags: read ID flags
4064c6fd2807SJeff Garzik  *
4065c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
4066c6fd2807SJeff Garzik  *	the port.
4067c6fd2807SJeff Garzik  *
4068c6fd2807SJeff Garzik  *	LOCKING:
4069c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4070c6fd2807SJeff Garzik  *
4071c6fd2807SJeff Garzik  *	RETURNS:
4072c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
4073c6fd2807SJeff Garzik  */
4074fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4075c6fd2807SJeff Garzik {
4076c6fd2807SJeff Garzik 	unsigned int class = dev->class;
40779af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
4078c6fd2807SJeff Garzik 	int rc;
4079c6fd2807SJeff Garzik 
4080c6fd2807SJeff Garzik 	/* read ID data */
4081bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4082c6fd2807SJeff Garzik 	if (rc)
4083fe30911bSTejun Heo 		return rc;
4084c6fd2807SJeff Garzik 
4085c6fd2807SJeff Garzik 	/* is the device still there? */
4086fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
4087fe30911bSTejun Heo 		return -ENODEV;
4088c6fd2807SJeff Garzik 
4089c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4090fe30911bSTejun Heo 	return 0;
4091fe30911bSTejun Heo }
4092fe30911bSTejun Heo 
4093fe30911bSTejun Heo /**
4094fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
4095fe30911bSTejun Heo  *	@dev: device to revalidate
4096422c9daaSTejun Heo  *	@new_class: new class code
4097fe30911bSTejun Heo  *	@readid_flags: read ID flags
4098fe30911bSTejun Heo  *
4099fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4100fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
4101fe30911bSTejun Heo  *
4102fe30911bSTejun Heo  *	LOCKING:
4103fe30911bSTejun Heo  *	Kernel thread context (may sleep)
4104fe30911bSTejun Heo  *
4105fe30911bSTejun Heo  *	RETURNS:
4106fe30911bSTejun Heo  *	0 on success, negative errno otherwise
4107fe30911bSTejun Heo  */
4108422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4109422c9daaSTejun Heo 		       unsigned int readid_flags)
4110fe30911bSTejun Heo {
41116ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
4112fe30911bSTejun Heo 	int rc;
4113fe30911bSTejun Heo 
4114fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
4115fe30911bSTejun Heo 		return -ENODEV;
4116fe30911bSTejun Heo 
4117422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4118422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
4119422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4120422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4121422c9daaSTejun Heo 			       dev->class, new_class);
4122422c9daaSTejun Heo 		rc = -ENODEV;
4123422c9daaSTejun Heo 		goto fail;
4124422c9daaSTejun Heo 	}
4125422c9daaSTejun Heo 
4126fe30911bSTejun Heo 	/* re-read ID */
4127fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
4128fe30911bSTejun Heo 	if (rc)
4129fe30911bSTejun Heo 		goto fail;
4130c6fd2807SJeff Garzik 
4131c6fd2807SJeff Garzik 	/* configure device according to the new ID */
4132efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
41336ddcd3b0STejun Heo 	if (rc)
41346ddcd3b0STejun Heo 		goto fail;
41356ddcd3b0STejun Heo 
41366ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
4137b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4138b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
41396ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
41406ddcd3b0STejun Heo 			       "%llu != %llu\n",
41416ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
41426ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
41438270bec4STejun Heo 
41448270bec4STejun Heo 		/* restore original n_sectors */
41458270bec4STejun Heo 		dev->n_sectors = n_sectors;
41468270bec4STejun Heo 
41476ddcd3b0STejun Heo 		rc = -ENODEV;
41486ddcd3b0STejun Heo 		goto fail;
41496ddcd3b0STejun Heo 	}
41506ddcd3b0STejun Heo 
4151c6fd2807SJeff Garzik 	return 0;
4152c6fd2807SJeff Garzik 
4153c6fd2807SJeff Garzik  fail:
4154c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4155c6fd2807SJeff Garzik 	return rc;
4156c6fd2807SJeff Garzik }
4157c6fd2807SJeff Garzik 
41586919a0a6SAlan Cox struct ata_blacklist_entry {
41596919a0a6SAlan Cox 	const char *model_num;
41606919a0a6SAlan Cox 	const char *model_rev;
41616919a0a6SAlan Cox 	unsigned long horkage;
41626919a0a6SAlan Cox };
41636919a0a6SAlan Cox 
41646919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
41656919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
41666919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
41676919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
41686919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
41696919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
41706919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
41716919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
41726919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
41736919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
41746919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
41756919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
41766919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
41776919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
41786919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
41796919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
41806919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
41816919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
41826919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
41836919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
41846919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
41856919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
41866919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
41876919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
41886919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
41896919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
41906919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
41916919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
41926919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
41936919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
41946919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
419539f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
41963af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
41973af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
41983af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
41996919a0a6SAlan Cox 
420018d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
420140a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
420218d6e9d5SAlbert Lee 
42036919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
42046919a0a6SAlan Cox 
42056919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
42066919a0a6SAlan Cox 	/* NCQ is slow */
42076919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
420809125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
420909125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
42107acfaf30SPaul Rolland 	/* NCQ is broken */
4211539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
42120e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
42130b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
42140b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
4215da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4216539cc7c7SJeff Garzik 
421736e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
421836e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
421936e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
422036e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
422136e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
4222bd9c5a39STejun Heo 	/* Drives which do spurious command completion */
4223bd9c5a39STejun Heo 	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
42242f8fcebbSTejun Heo 	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
422570edb185STejun Heo 	{ "HDT722516DLA380",	"V43OA96A",	ATA_HORKAGE_NONCQ, },
4226e14cbfa6STejun Heo 	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
42270c173174STejun Heo 	{ "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
42282f8fcebbSTejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
42297f567620STejun Heo 	{ "WDC WD3200AAJS-00RYA0", "12.01B01",	ATA_HORKAGE_NONCQ, },
4230a520f261STejun Heo 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
42317f567620STejun Heo 	{ "ST9120822AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
42323fb6589cSTejun Heo 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
4233954bb005STejun Heo 	{ "ST9160821AS",	"3.ALD",	ATA_HORKAGE_NONCQ, },
423413587960STejun Heo 	{ "ST9160821AS",	"3.CCD",	ATA_HORKAGE_NONCQ, },
42357f567620STejun Heo 	{ "ST3160812AS",	"3.ADJ",	ATA_HORKAGE_NONCQ, },
42367f567620STejun Heo 	{ "ST980813AS",		"3.ADB",	ATA_HORKAGE_NONCQ, },
42375d6aca8dSTejun Heo 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
423812850ffeSTejun Heo 	{ "Maxtor 7V300F0",	"VA111900",	ATA_HORKAGE_NONCQ, },
42396919a0a6SAlan Cox 
424016c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
424116c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
424216c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
424316c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
424416c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
42456919a0a6SAlan Cox 
424693328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
424793328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
424893328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
424993328e11SAlan Cox 
42506bbfd53dSAlan Cox 	/* Devices which get the IVB wrong */
42516bbfd53dSAlan Cox 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
42526bbfd53dSAlan Cox 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
42536bbfd53dSAlan Cox 
42546919a0a6SAlan Cox 	/* End Marker */
42556919a0a6SAlan Cox 	{ }
4256c6fd2807SJeff Garzik };
4257c6fd2807SJeff Garzik 
4258741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4259539cc7c7SJeff Garzik {
4260539cc7c7SJeff Garzik 	const char *p;
4261539cc7c7SJeff Garzik 	int len;
4262539cc7c7SJeff Garzik 
4263539cc7c7SJeff Garzik 	/*
4264539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4265539cc7c7SJeff Garzik 	 */
4266539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4267539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4268539cc7c7SJeff Garzik 		len = p - patt;
4269317b50b8SAndrew Paprocki 	else {
4270539cc7c7SJeff Garzik 		len = strlen(name);
4271317b50b8SAndrew Paprocki 		if (!len) {
4272317b50b8SAndrew Paprocki 			if (!*patt)
4273317b50b8SAndrew Paprocki 				return 0;
4274317b50b8SAndrew Paprocki 			return -1;
4275317b50b8SAndrew Paprocki 		}
4276317b50b8SAndrew Paprocki 	}
4277539cc7c7SJeff Garzik 
4278539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4279539cc7c7SJeff Garzik }
4280539cc7c7SJeff Garzik 
428175683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4282c6fd2807SJeff Garzik {
42838bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
42848bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
42856919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4286c6fd2807SJeff Garzik 
42878bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
42888bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4289c6fd2807SJeff Garzik 
42906919a0a6SAlan Cox 	while (ad->model_num) {
4291539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
42926919a0a6SAlan Cox 			if (ad->model_rev == NULL)
42936919a0a6SAlan Cox 				return ad->horkage;
4294539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
42956919a0a6SAlan Cox 				return ad->horkage;
4296c6fd2807SJeff Garzik 		}
42976919a0a6SAlan Cox 		ad++;
4298c6fd2807SJeff Garzik 	}
4299c6fd2807SJeff Garzik 	return 0;
4300c6fd2807SJeff Garzik }
4301c6fd2807SJeff Garzik 
43026919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
43036919a0a6SAlan Cox {
43046919a0a6SAlan Cox 	/* We don't support polling DMA.
43056919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
43066919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
43076919a0a6SAlan Cox 	 */
43089af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
43096919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
43106919a0a6SAlan Cox 		return 1;
431175683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
43126919a0a6SAlan Cox }
43136919a0a6SAlan Cox 
4314c6fd2807SJeff Garzik /**
43156bbfd53dSAlan Cox  *	ata_is_40wire		-	check drive side detection
43166bbfd53dSAlan Cox  *	@dev: device
43176bbfd53dSAlan Cox  *
43186bbfd53dSAlan Cox  *	Perform drive side detection decoding, allowing for device vendors
43196bbfd53dSAlan Cox  *	who can't follow the documentation.
43206bbfd53dSAlan Cox  */
43216bbfd53dSAlan Cox 
43226bbfd53dSAlan Cox static int ata_is_40wire(struct ata_device *dev)
43236bbfd53dSAlan Cox {
43246bbfd53dSAlan Cox 	if (dev->horkage & ATA_HORKAGE_IVB)
43256bbfd53dSAlan Cox 		return ata_drive_40wire_relaxed(dev->id);
43266bbfd53dSAlan Cox 	return ata_drive_40wire(dev->id);
43276bbfd53dSAlan Cox }
43286bbfd53dSAlan Cox 
43296bbfd53dSAlan Cox /**
4330c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4331c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4332c6fd2807SJeff Garzik  *
4333c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4334c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4335c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4336c6fd2807SJeff Garzik  *	blacklist, etc...
4337c6fd2807SJeff Garzik  *
4338c6fd2807SJeff Garzik  *	LOCKING:
4339c6fd2807SJeff Garzik  *	None.
4340c6fd2807SJeff Garzik  */
4341c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4342c6fd2807SJeff Garzik {
43439af5c9c9STejun Heo 	struct ata_link *link = dev->link;
43449af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4345cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4346c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4347c6fd2807SJeff Garzik 
4348c6fd2807SJeff Garzik 	/* controller modes available */
4349c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4350c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4351c6fd2807SJeff Garzik 
43528343f889SRobert Hancock 	/* drive modes available */
4353c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4354c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4355c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4356c6fd2807SJeff Garzik 
4357b352e57dSAlan Cox 	/*
4358b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4359b352e57dSAlan Cox 	 *	cable
4360b352e57dSAlan Cox 	 */
4361b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4362b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4363b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4364b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4365b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4366b352e57dSAlan Cox 	}
4367b352e57dSAlan Cox 
4368c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4369c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4370c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4371c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4372c6fd2807SJeff Garzik 	}
4373c6fd2807SJeff Garzik 
437414d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
437514d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4376c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4377c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4378c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4379c6fd2807SJeff Garzik 	}
4380c6fd2807SJeff Garzik 
4381e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4382e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4383e424675fSJeff Garzik 
4384c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4385a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4386c6fd2807SJeff Garzik 
43878343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
43888343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
43898343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
43908343f889SRobert Hancock 	 * solely limited by the cable.
43918343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
43928343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
43938343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
43948343f889SRobert Hancock 	 */
43958343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
43968343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
43978343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
43986bbfd53dSAlan Cox 		    (ata_is_40wire(dev) &&
43998343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
44008343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
44018343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
44028343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
44038343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
44048343f889SRobert Hancock 		}
44058343f889SRobert Hancock 
4406c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4407c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4408c6fd2807SJeff Garzik }
4409c6fd2807SJeff Garzik 
4410c6fd2807SJeff Garzik /**
4411c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4412c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4413c6fd2807SJeff Garzik  *
4414c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4415c6fd2807SJeff Garzik  *	on port @ap.
4416c6fd2807SJeff Garzik  *
4417c6fd2807SJeff Garzik  *	LOCKING:
4418c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4419c6fd2807SJeff Garzik  *
4420c6fd2807SJeff Garzik  *	RETURNS:
4421c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4422c6fd2807SJeff Garzik  */
4423c6fd2807SJeff Garzik 
4424c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4425c6fd2807SJeff Garzik {
4426c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4427c6fd2807SJeff Garzik 	unsigned int err_mask;
4428c6fd2807SJeff Garzik 
4429c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4430c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4431c6fd2807SJeff Garzik 
4432464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4433464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4434464cf177STejun Heo 	 */
4435c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4436c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4437c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4438464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4439c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4440c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
4441c6fd2807SJeff Garzik 
44422b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4443c6fd2807SJeff Garzik 
4444c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4445c6fd2807SJeff Garzik 	return err_mask;
4446c6fd2807SJeff Garzik }
4447c6fd2807SJeff Garzik /**
4448218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
44499f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
44509f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4451218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
44529f45cbd3SKristen Carlson Accardi  *
44539f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4454218f3d30SJeff Garzik  *	on port @ap with sector count
44559f45cbd3SKristen Carlson Accardi  *
44569f45cbd3SKristen Carlson Accardi  *	LOCKING:
44579f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
44589f45cbd3SKristen Carlson Accardi  *
44599f45cbd3SKristen Carlson Accardi  *	RETURNS:
44609f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
44619f45cbd3SKristen Carlson Accardi  */
4462218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4463218f3d30SJeff Garzik 					u8 feature)
44649f45cbd3SKristen Carlson Accardi {
44659f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
44669f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
44679f45cbd3SKristen Carlson Accardi 
44689f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
44699f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
44709f45cbd3SKristen Carlson Accardi 
44719f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
44729f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
44739f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
44749f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
44759f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4476218f3d30SJeff Garzik 	tf.nsect = feature;
44779f45cbd3SKristen Carlson Accardi 
44782b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
44799f45cbd3SKristen Carlson Accardi 
44809f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
44819f45cbd3SKristen Carlson Accardi 	return err_mask;
44829f45cbd3SKristen Carlson Accardi }
44839f45cbd3SKristen Carlson Accardi 
44849f45cbd3SKristen Carlson Accardi /**
4485c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4486c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4487c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4488c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4489c6fd2807SJeff Garzik  *
4490c6fd2807SJeff Garzik  *	LOCKING:
4491c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4492c6fd2807SJeff Garzik  *
4493c6fd2807SJeff Garzik  *	RETURNS:
4494c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4495c6fd2807SJeff Garzik  */
4496c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4497c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4498c6fd2807SJeff Garzik {
4499c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4500c6fd2807SJeff Garzik 	unsigned int err_mask;
4501c6fd2807SJeff Garzik 
4502c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4503c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4504c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4505c6fd2807SJeff Garzik 
4506c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4507c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4508c6fd2807SJeff Garzik 
4509c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4510c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4511c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4512c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4513c6fd2807SJeff Garzik 	tf.nsect = sectors;
4514c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4515c6fd2807SJeff Garzik 
45162b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
451718b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
451818b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
451918b2466cSAlan Cox 	   drive reported working geometry */
452018b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
452118b2466cSAlan Cox 		err_mask = 0;
4522c6fd2807SJeff Garzik 
4523c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4524c6fd2807SJeff Garzik 	return err_mask;
4525c6fd2807SJeff Garzik }
4526c6fd2807SJeff Garzik 
4527c6fd2807SJeff Garzik /**
4528c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4529c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4530c6fd2807SJeff Garzik  *
4531c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4532c6fd2807SJeff Garzik  *
4533c6fd2807SJeff Garzik  *	LOCKING:
4534cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4535c6fd2807SJeff Garzik  */
453670e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4537c6fd2807SJeff Garzik {
4538c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4539c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4540c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4541c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4542c6fd2807SJeff Garzik 
4543c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4544c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4545c6fd2807SJeff Garzik 
4546c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4547c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4548c6fd2807SJeff Garzik 
4549c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4550c6fd2807SJeff Garzik 
4551c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4552c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4553c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4554c6fd2807SJeff Garzik 	 */
4555c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4556c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4557c6fd2807SJeff Garzik 
4558c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4559c6fd2807SJeff Garzik 		if (qc->n_elem)
4560c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4561c6fd2807SJeff Garzik 		/* restore last sg */
456287260216SJens Axboe 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4563c6fd2807SJeff Garzik 		if (pad_buf) {
4564c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
456545711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4566c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4567c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4568c6fd2807SJeff Garzik 		}
4569c6fd2807SJeff Garzik 	} else {
4570c6fd2807SJeff Garzik 		if (qc->n_elem)
4571c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4572c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4573c6fd2807SJeff Garzik 				dir);
4574c6fd2807SJeff Garzik 		/* restore sg */
4575c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4576c6fd2807SJeff Garzik 		if (pad_buf)
4577c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4578c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4579c6fd2807SJeff Garzik 	}
4580c6fd2807SJeff Garzik 
4581c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4582c6fd2807SJeff Garzik 	qc->__sg = NULL;
4583c6fd2807SJeff Garzik }
4584c6fd2807SJeff Garzik 
4585c6fd2807SJeff Garzik /**
4586c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4587c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4588c6fd2807SJeff Garzik  *
4589c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4590c6fd2807SJeff Garzik  *	associated with the current disk command.
4591c6fd2807SJeff Garzik  *
4592c6fd2807SJeff Garzik  *	LOCKING:
4593cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4594c6fd2807SJeff Garzik  *
4595c6fd2807SJeff Garzik  */
4596c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4597c6fd2807SJeff Garzik {
4598c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4599c6fd2807SJeff Garzik 	struct scatterlist *sg;
4600c6fd2807SJeff Garzik 	unsigned int idx;
4601c6fd2807SJeff Garzik 
4602c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4603c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4604c6fd2807SJeff Garzik 
4605c6fd2807SJeff Garzik 	idx = 0;
4606c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4607c6fd2807SJeff Garzik 		u32 addr, offset;
4608c6fd2807SJeff Garzik 		u32 sg_len, len;
4609c6fd2807SJeff Garzik 
4610c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4611c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4612c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4613c6fd2807SJeff Garzik 		 */
4614c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4615c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4616c6fd2807SJeff Garzik 
4617c6fd2807SJeff Garzik 		while (sg_len) {
4618c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4619c6fd2807SJeff Garzik 			len = sg_len;
4620c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4621c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4622c6fd2807SJeff Garzik 
4623c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4624c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4625c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4626c6fd2807SJeff Garzik 
4627c6fd2807SJeff Garzik 			idx++;
4628c6fd2807SJeff Garzik 			sg_len -= len;
4629c6fd2807SJeff Garzik 			addr += len;
4630c6fd2807SJeff Garzik 		}
4631c6fd2807SJeff Garzik 	}
4632c6fd2807SJeff Garzik 
4633c6fd2807SJeff Garzik 	if (idx)
4634c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4635c6fd2807SJeff Garzik }
4636b9a4197eSTejun Heo 
4637c6fd2807SJeff Garzik /**
4638d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4639d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4640d26fc955SAlan Cox  *
4641d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4642d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4643d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4644d26fc955SAlan Cox  *	controllers that don't follow the spec.
4645d26fc955SAlan Cox  *
4646d26fc955SAlan Cox  *	LOCKING:
4647d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4648d26fc955SAlan Cox  *
4649d26fc955SAlan Cox  */
4650d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4651d26fc955SAlan Cox {
4652d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4653d26fc955SAlan Cox 	struct scatterlist *sg;
4654d26fc955SAlan Cox 	unsigned int idx;
4655d26fc955SAlan Cox 
4656d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4657d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4658d26fc955SAlan Cox 
4659d26fc955SAlan Cox 	idx = 0;
4660d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4661d26fc955SAlan Cox 		u32 addr, offset;
4662d26fc955SAlan Cox 		u32 sg_len, len, blen;
4663d26fc955SAlan Cox 
4664d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4665d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4666d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4667d26fc955SAlan Cox 		 */
4668d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4669d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4670d26fc955SAlan Cox 
4671d26fc955SAlan Cox 		while (sg_len) {
4672d26fc955SAlan Cox 			offset = addr & 0xffff;
4673d26fc955SAlan Cox 			len = sg_len;
4674d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4675d26fc955SAlan Cox 				len = 0x10000 - offset;
4676d26fc955SAlan Cox 
4677d26fc955SAlan Cox 			blen = len & 0xffff;
4678d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4679d26fc955SAlan Cox 			if (blen == 0) {
4680d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4681d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4682d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4683d26fc955SAlan Cox 				blen = 0x8000;
4684d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4685d26fc955SAlan Cox 			}
4686d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4687d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4688d26fc955SAlan Cox 
4689d26fc955SAlan Cox 			idx++;
4690d26fc955SAlan Cox 			sg_len -= len;
4691d26fc955SAlan Cox 			addr += len;
4692d26fc955SAlan Cox 		}
4693d26fc955SAlan Cox 	}
4694d26fc955SAlan Cox 
4695d26fc955SAlan Cox 	if (idx)
4696d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4697d26fc955SAlan Cox }
4698d26fc955SAlan Cox 
4699d26fc955SAlan Cox /**
4700c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4701c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4702c6fd2807SJeff Garzik  *
4703c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4704c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4705c6fd2807SJeff Garzik  *	supplied PACKET command.
4706c6fd2807SJeff Garzik  *
4707c6fd2807SJeff Garzik  *	LOCKING:
4708cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4709c6fd2807SJeff Garzik  *
4710c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4711c6fd2807SJeff Garzik  *               nonzero otherwise
4712c6fd2807SJeff Garzik  */
4713c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4714c6fd2807SJeff Garzik {
4715c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4716c6fd2807SJeff Garzik 
4717b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4718b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4719b9a4197eSTejun Heo 	 */
4720b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
47216f23a31dSAlbert Lee 		return 1;
47226f23a31dSAlbert Lee 
4723c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4724b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4725c6fd2807SJeff Garzik 
4726b9a4197eSTejun Heo 	return 0;
4727c6fd2807SJeff Garzik }
4728b9a4197eSTejun Heo 
4729c6fd2807SJeff Garzik /**
473031cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
473131cc23b3STejun Heo  *	@qc: ATA command in question
473231cc23b3STejun Heo  *
473331cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
473431cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
473531cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
473631cc23b3STejun Heo  *	whether a new command @qc can be issued.
473731cc23b3STejun Heo  *
473831cc23b3STejun Heo  *	LOCKING:
473931cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
474031cc23b3STejun Heo  *
474131cc23b3STejun Heo  *	RETURNS:
474231cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
474331cc23b3STejun Heo  */
474431cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
474531cc23b3STejun Heo {
474631cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
474731cc23b3STejun Heo 
474831cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
474931cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
475031cc23b3STejun Heo 			return 0;
475131cc23b3STejun Heo 	} else {
475231cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
475331cc23b3STejun Heo 			return 0;
475431cc23b3STejun Heo 	}
475531cc23b3STejun Heo 
475631cc23b3STejun Heo 	return ATA_DEFER_LINK;
475731cc23b3STejun Heo }
475831cc23b3STejun Heo 
475931cc23b3STejun Heo /**
4760c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4761c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4762c6fd2807SJeff Garzik  *
4763c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4764c6fd2807SJeff Garzik  *
4765c6fd2807SJeff Garzik  *	LOCKING:
4766cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4767c6fd2807SJeff Garzik  */
4768c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4769c6fd2807SJeff Garzik {
4770c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4771c6fd2807SJeff Garzik 		return;
4772c6fd2807SJeff Garzik 
4773c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4774c6fd2807SJeff Garzik }
4775c6fd2807SJeff Garzik 
4776d26fc955SAlan Cox /**
4777d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4778d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4779d26fc955SAlan Cox  *
4780d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4781d26fc955SAlan Cox  *
4782d26fc955SAlan Cox  *	LOCKING:
4783d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4784d26fc955SAlan Cox  */
4785d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4786d26fc955SAlan Cox {
4787d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4788d26fc955SAlan Cox 		return;
4789d26fc955SAlan Cox 
4790d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4791d26fc955SAlan Cox }
4792d26fc955SAlan Cox 
4793c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4794c6fd2807SJeff Garzik 
4795c6fd2807SJeff Garzik /**
4796c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4797c6fd2807SJeff Garzik  *	@qc: Command to be associated
4798c6fd2807SJeff Garzik  *	@buf: Memory buffer
4799c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4800c6fd2807SJeff Garzik  *
4801c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4802c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4803c6fd2807SJeff Garzik  *
4804c6fd2807SJeff Garzik  *	LOCKING:
4805cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4806c6fd2807SJeff Garzik  */
4807c6fd2807SJeff Garzik 
4808c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4809c6fd2807SJeff Garzik {
4810c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4811c6fd2807SJeff Garzik 
4812c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4813c6fd2807SJeff Garzik 	qc->n_elem = 1;
4814c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4815c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4816c6fd2807SJeff Garzik 	qc->nbytes = buflen;
481787260216SJens Axboe 	qc->cursg = qc->__sg;
4818c6fd2807SJeff Garzik 
481961c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4820c6fd2807SJeff Garzik }
4821c6fd2807SJeff Garzik 
4822c6fd2807SJeff Garzik /**
4823c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4824c6fd2807SJeff Garzik  *	@qc: Command to be associated
4825c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4826c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4827c6fd2807SJeff Garzik  *
4828c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4829c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4830c6fd2807SJeff Garzik  *	elements.
4831c6fd2807SJeff Garzik  *
4832c6fd2807SJeff Garzik  *	LOCKING:
4833cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4834c6fd2807SJeff Garzik  */
4835c6fd2807SJeff Garzik 
4836c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4837c6fd2807SJeff Garzik 		 unsigned int n_elem)
4838c6fd2807SJeff Garzik {
4839c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4840c6fd2807SJeff Garzik 	qc->__sg = sg;
4841c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4842c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
484387260216SJens Axboe 	qc->cursg = qc->__sg;
4844c6fd2807SJeff Garzik }
4845c6fd2807SJeff Garzik 
4846c6fd2807SJeff Garzik /**
4847c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4848c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4849c6fd2807SJeff Garzik  *
4850c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4851c6fd2807SJeff Garzik  *
4852c6fd2807SJeff Garzik  *	LOCKING:
4853cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4854c6fd2807SJeff Garzik  *
4855c6fd2807SJeff Garzik  *	RETURNS:
4856c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4857c6fd2807SJeff Garzik  */
4858c6fd2807SJeff Garzik 
4859c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4860c6fd2807SJeff Garzik {
4861c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4862c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4863c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4864c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4865c6fd2807SJeff Garzik 	int trim_sg = 0;
4866c6fd2807SJeff Garzik 
4867c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4868c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4869c6fd2807SJeff Garzik 	if (qc->pad_len) {
4870c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4871c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4872c6fd2807SJeff Garzik 
4873c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4874c6fd2807SJeff Garzik 
4875c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4876c6fd2807SJeff Garzik 
4877c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4878c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4879c6fd2807SJeff Garzik 			       qc->pad_len);
4880c6fd2807SJeff Garzik 
4881c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4882c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4883c6fd2807SJeff Garzik 		/* trim sg */
4884c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4885c6fd2807SJeff Garzik 		if (sg->length == 0)
4886c6fd2807SJeff Garzik 			trim_sg = 1;
4887c6fd2807SJeff Garzik 
4888c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4889c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4890c6fd2807SJeff Garzik 	}
4891c6fd2807SJeff Garzik 
4892c6fd2807SJeff Garzik 	if (trim_sg) {
4893c6fd2807SJeff Garzik 		qc->n_elem--;
4894c6fd2807SJeff Garzik 		goto skip_map;
4895c6fd2807SJeff Garzik 	}
4896c6fd2807SJeff Garzik 
4897c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4898c6fd2807SJeff Garzik 				     sg->length, dir);
4899c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4900c6fd2807SJeff Garzik 		/* restore sg */
4901c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4902c6fd2807SJeff Garzik 		return -1;
4903c6fd2807SJeff Garzik 	}
4904c6fd2807SJeff Garzik 
4905c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4906c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4907c6fd2807SJeff Garzik 
4908c6fd2807SJeff Garzik skip_map:
4909c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4910c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4911c6fd2807SJeff Garzik 
4912c6fd2807SJeff Garzik 	return 0;
4913c6fd2807SJeff Garzik }
4914c6fd2807SJeff Garzik 
4915c6fd2807SJeff Garzik /**
4916c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4917c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4918c6fd2807SJeff Garzik  *
4919c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4920c6fd2807SJeff Garzik  *
4921c6fd2807SJeff Garzik  *	LOCKING:
4922cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4923c6fd2807SJeff Garzik  *
4924c6fd2807SJeff Garzik  *	RETURNS:
4925c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4926c6fd2807SJeff Garzik  *
4927c6fd2807SJeff Garzik  */
4928c6fd2807SJeff Garzik 
4929c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4930c6fd2807SJeff Garzik {
4931c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4932c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
493387260216SJens Axboe 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4934c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4935c6fd2807SJeff Garzik 
493644877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4937c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4938c6fd2807SJeff Garzik 
4939c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4940c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4941c6fd2807SJeff Garzik 	if (qc->pad_len) {
4942c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4943c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4944c6fd2807SJeff Garzik 		unsigned int offset;
4945c6fd2807SJeff Garzik 
4946c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4947c6fd2807SJeff Garzik 
4948c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4949c6fd2807SJeff Garzik 
4950c6fd2807SJeff Garzik 		/*
4951c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4952c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4953c6fd2807SJeff Garzik 		 */
4954c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4955acd054a5SAnton Blanchard 		sg_init_table(psg, 1);
4956642f1490SJens Axboe 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4957642f1490SJens Axboe 				qc->pad_len, offset_in_page(offset));
4958c6fd2807SJeff Garzik 
4959c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
496045711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4961c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4962c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4963c6fd2807SJeff Garzik 		}
4964c6fd2807SJeff Garzik 
4965c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4966c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4967c6fd2807SJeff Garzik 		/* trim last sg */
4968c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4969c6fd2807SJeff Garzik 		if (lsg->length == 0)
4970c6fd2807SJeff Garzik 			trim_sg = 1;
4971c6fd2807SJeff Garzik 
4972c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4973c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4974c6fd2807SJeff Garzik 	}
4975c6fd2807SJeff Garzik 
4976c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4977c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4978c6fd2807SJeff Garzik 		pre_n_elem--;
4979c6fd2807SJeff Garzik 
4980c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4981c6fd2807SJeff Garzik 		n_elem = 0;
4982c6fd2807SJeff Garzik 		goto skip_map;
4983c6fd2807SJeff Garzik 	}
4984c6fd2807SJeff Garzik 
4985c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4986c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4987c6fd2807SJeff Garzik 	if (n_elem < 1) {
4988c6fd2807SJeff Garzik 		/* restore last sg */
4989c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4990c6fd2807SJeff Garzik 		return -1;
4991c6fd2807SJeff Garzik 	}
4992c6fd2807SJeff Garzik 
4993c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4994c6fd2807SJeff Garzik 
4995c6fd2807SJeff Garzik skip_map:
4996c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4997c6fd2807SJeff Garzik 
4998c6fd2807SJeff Garzik 	return 0;
4999c6fd2807SJeff Garzik }
5000c6fd2807SJeff Garzik 
5001c6fd2807SJeff Garzik /**
5002c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
5003c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
5004c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
5005c6fd2807SJeff Garzik  *
5006c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
5007c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
5008c6fd2807SJeff Garzik  *	vice-versa.
5009c6fd2807SJeff Garzik  *
5010c6fd2807SJeff Garzik  *	LOCKING:
5011c6fd2807SJeff Garzik  *	Inherited from caller.
5012c6fd2807SJeff Garzik  */
5013c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
5014c6fd2807SJeff Garzik {
5015c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
5016c6fd2807SJeff Garzik 	unsigned int i;
5017c6fd2807SJeff Garzik 
5018c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
5019c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
5020c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
5021c6fd2807SJeff Garzik }
5022c6fd2807SJeff Garzik 
5023c6fd2807SJeff Garzik /**
50240d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
5025c6fd2807SJeff Garzik  *	@adev: device to target
5026c6fd2807SJeff Garzik  *	@buf: data buffer
5027c6fd2807SJeff Garzik  *	@buflen: buffer length
5028c6fd2807SJeff Garzik  *	@write_data: read/write
5029c6fd2807SJeff Garzik  *
5030c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
5031c6fd2807SJeff Garzik  *
5032c6fd2807SJeff Garzik  *	LOCKING:
5033c6fd2807SJeff Garzik  *	Inherited from caller.
5034c6fd2807SJeff Garzik  */
50350d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
5036c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
5037c6fd2807SJeff Garzik {
50389af5c9c9STejun Heo 	struct ata_port *ap = adev->link->ap;
5039c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
5040c6fd2807SJeff Garzik 
5041c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
5042c6fd2807SJeff Garzik 	if (write_data)
50430d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
5044c6fd2807SJeff Garzik 	else
50450d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
5046c6fd2807SJeff Garzik 
5047c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
5048c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
5049c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
5050c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
5051c6fd2807SJeff Garzik 
5052c6fd2807SJeff Garzik 		if (write_data) {
5053c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
50540d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
5055c6fd2807SJeff Garzik 		} else {
50560d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
5057c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
5058c6fd2807SJeff Garzik 		}
5059c6fd2807SJeff Garzik 	}
5060c6fd2807SJeff Garzik }
5061c6fd2807SJeff Garzik 
5062c6fd2807SJeff Garzik /**
50630d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
5064c6fd2807SJeff Garzik  *	@adev: device to target
5065c6fd2807SJeff Garzik  *	@buf: data buffer
5066c6fd2807SJeff Garzik  *	@buflen: buffer length
5067c6fd2807SJeff Garzik  *	@write_data: read/write
5068c6fd2807SJeff Garzik  *
5069c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
5070c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
5071c6fd2807SJeff Garzik  *
5072c6fd2807SJeff Garzik  *	LOCKING:
5073c6fd2807SJeff Garzik  *	Inherited from caller.
5074c6fd2807SJeff Garzik  */
50750d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5076c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
5077c6fd2807SJeff Garzik {
5078c6fd2807SJeff Garzik 	unsigned long flags;
5079c6fd2807SJeff Garzik 	local_irq_save(flags);
50800d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
5081c6fd2807SJeff Garzik 	local_irq_restore(flags);
5082c6fd2807SJeff Garzik }
5083c6fd2807SJeff Garzik 
5084c6fd2807SJeff Garzik 
5085c6fd2807SJeff Garzik /**
50865a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
5087c6fd2807SJeff Garzik  *	@qc: Command on going
5088c6fd2807SJeff Garzik  *
50895a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
5090c6fd2807SJeff Garzik  *
5091c6fd2807SJeff Garzik  *	LOCKING:
5092c6fd2807SJeff Garzik  *	Inherited from caller.
5093c6fd2807SJeff Garzik  */
5094c6fd2807SJeff Garzik 
5095c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
5096c6fd2807SJeff Garzik {
5097c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5098c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5099c6fd2807SJeff Garzik 	struct page *page;
5100c6fd2807SJeff Garzik 	unsigned int offset;
5101c6fd2807SJeff Garzik 	unsigned char *buf;
5102c6fd2807SJeff Garzik 
51035a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
5104c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5105c6fd2807SJeff Garzik 
510645711f1aSJens Axboe 	page = sg_page(qc->cursg);
510787260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
5108c6fd2807SJeff Garzik 
5109c6fd2807SJeff Garzik 	/* get the current page and offset */
5110c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5111c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5112c6fd2807SJeff Garzik 
5113c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5114c6fd2807SJeff Garzik 
5115c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5116c6fd2807SJeff Garzik 		unsigned long flags;
5117c6fd2807SJeff Garzik 
5118c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
5119c6fd2807SJeff Garzik 		local_irq_save(flags);
5120c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5121c6fd2807SJeff Garzik 
5122c6fd2807SJeff Garzik 		/* do the actual data transfer */
51235a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5124c6fd2807SJeff Garzik 
5125c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5126c6fd2807SJeff Garzik 		local_irq_restore(flags);
5127c6fd2807SJeff Garzik 	} else {
5128c6fd2807SJeff Garzik 		buf = page_address(page);
51295a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5130c6fd2807SJeff Garzik 	}
5131c6fd2807SJeff Garzik 
51325a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
51335a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
5134c6fd2807SJeff Garzik 
513587260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
513687260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5137c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5138c6fd2807SJeff Garzik 	}
5139c6fd2807SJeff Garzik }
5140c6fd2807SJeff Garzik 
5141c6fd2807SJeff Garzik /**
51425a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
5143c6fd2807SJeff Garzik  *	@qc: Command on going
5144c6fd2807SJeff Garzik  *
51455a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
5146c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
5147c6fd2807SJeff Garzik  *
5148c6fd2807SJeff Garzik  *	LOCKING:
5149c6fd2807SJeff Garzik  *	Inherited from caller.
5150c6fd2807SJeff Garzik  */
5151c6fd2807SJeff Garzik 
5152c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
5153c6fd2807SJeff Garzik {
5154c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
5155c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
5156c6fd2807SJeff Garzik 		unsigned int nsect;
5157c6fd2807SJeff Garzik 
5158c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
5159c6fd2807SJeff Garzik 
51605a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5161726f0785STejun Heo 			    qc->dev->multi_count);
5162c6fd2807SJeff Garzik 		while (nsect--)
5163c6fd2807SJeff Garzik 			ata_pio_sector(qc);
5164c6fd2807SJeff Garzik 	} else
5165c6fd2807SJeff Garzik 		ata_pio_sector(qc);
51664cc980b3SAlbert Lee 
51674cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
5168c6fd2807SJeff Garzik }
5169c6fd2807SJeff Garzik 
5170c6fd2807SJeff Garzik /**
5171c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
5172c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
5173c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
5174c6fd2807SJeff Garzik  *
5175c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
5176c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
5177c6fd2807SJeff Garzik  *
5178c6fd2807SJeff Garzik  *	LOCKING:
5179c6fd2807SJeff Garzik  *	caller.
5180c6fd2807SJeff Garzik  */
5181c6fd2807SJeff Garzik 
5182c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5183c6fd2807SJeff Garzik {
5184c6fd2807SJeff Garzik 	/* send SCSI cdb */
5185c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
5186c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
5187c6fd2807SJeff Garzik 
5188c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5189c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
5190c6fd2807SJeff Garzik 
5191c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5192c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5193c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
5194c6fd2807SJeff Garzik 		break;
5195c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5196c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5197c6fd2807SJeff Garzik 		break;
5198c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5199c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5200c6fd2807SJeff Garzik 		/* initiate bmdma */
5201c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
5202c6fd2807SJeff Garzik 		break;
5203c6fd2807SJeff Garzik 	}
5204c6fd2807SJeff Garzik }
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik /**
5207c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
5208c6fd2807SJeff Garzik  *	@qc: Command on going
5209c6fd2807SJeff Garzik  *	@bytes: number of bytes
5210c6fd2807SJeff Garzik  *
5211c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5212c6fd2807SJeff Garzik  *
5213c6fd2807SJeff Garzik  *	LOCKING:
5214c6fd2807SJeff Garzik  *	Inherited from caller.
5215c6fd2807SJeff Garzik  *
5216c6fd2807SJeff Garzik  */
5217c6fd2807SJeff Garzik 
5218c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5219c6fd2807SJeff Garzik {
5220c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5221c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
52220874ee76SFUJITA Tomonori 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
5223c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5224c6fd2807SJeff Garzik 	struct page *page;
5225c6fd2807SJeff Garzik 	unsigned char *buf;
5226c6fd2807SJeff Garzik 	unsigned int offset, count;
52270874ee76SFUJITA Tomonori 	int no_more_sg = 0;
5228c6fd2807SJeff Garzik 
5229c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
5230c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5231c6fd2807SJeff Garzik 
5232c6fd2807SJeff Garzik next_sg:
52330874ee76SFUJITA Tomonori 	if (unlikely(no_more_sg)) {
5234c6fd2807SJeff Garzik 		/*
5235c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
5236c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
5237c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
5238c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
5239c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
5240c6fd2807SJeff Garzik 		 */
5241c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
5242c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
5243c6fd2807SJeff Garzik 		unsigned int i;
5244c6fd2807SJeff Garzik 
5245c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
5246c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
5247c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
5248c6fd2807SJeff Garzik 
5249c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
5250c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5251c6fd2807SJeff Garzik 
5252c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5253c6fd2807SJeff Garzik 		return;
5254c6fd2807SJeff Garzik 	}
5255c6fd2807SJeff Garzik 
525687260216SJens Axboe 	sg = qc->cursg;
5257c6fd2807SJeff Garzik 
525845711f1aSJens Axboe 	page = sg_page(sg);
5259c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5260c6fd2807SJeff Garzik 
5261c6fd2807SJeff Garzik 	/* get the current page and offset */
5262c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5263c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5264c6fd2807SJeff Garzik 
5265c6fd2807SJeff Garzik 	/* don't overrun current sg */
5266c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5267c6fd2807SJeff Garzik 
5268c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5269c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5270c6fd2807SJeff Garzik 
5271c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5272c6fd2807SJeff Garzik 
5273c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5274c6fd2807SJeff Garzik 		unsigned long flags;
5275c6fd2807SJeff Garzik 
5276c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5277c6fd2807SJeff Garzik 		local_irq_save(flags);
5278c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5279c6fd2807SJeff Garzik 
5280c6fd2807SJeff Garzik 		/* do the actual data transfer */
5281c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5282c6fd2807SJeff Garzik 
5283c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5284c6fd2807SJeff Garzik 		local_irq_restore(flags);
5285c6fd2807SJeff Garzik 	} else {
5286c6fd2807SJeff Garzik 		buf = page_address(page);
5287c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5288c6fd2807SJeff Garzik 	}
5289c6fd2807SJeff Garzik 
5290c6fd2807SJeff Garzik 	bytes -= count;
5291c6fd2807SJeff Garzik 	qc->curbytes += count;
5292c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5293c6fd2807SJeff Garzik 
5294c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
52950874ee76SFUJITA Tomonori 		if (qc->cursg == lsg)
52960874ee76SFUJITA Tomonori 			no_more_sg = 1;
52970874ee76SFUJITA Tomonori 
529887260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5299c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5300c6fd2807SJeff Garzik 	}
5301c6fd2807SJeff Garzik 
5302c6fd2807SJeff Garzik 	if (bytes)
5303c6fd2807SJeff Garzik 		goto next_sg;
5304c6fd2807SJeff Garzik }
5305c6fd2807SJeff Garzik 
5306c6fd2807SJeff Garzik /**
5307c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5308c6fd2807SJeff Garzik  *	@qc: Command on going
5309c6fd2807SJeff Garzik  *
5310c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5311c6fd2807SJeff Garzik  *
5312c6fd2807SJeff Garzik  *	LOCKING:
5313c6fd2807SJeff Garzik  *	Inherited from caller.
5314c6fd2807SJeff Garzik  */
5315c6fd2807SJeff Garzik 
5316c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5317c6fd2807SJeff Garzik {
5318c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5319c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5320c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5321c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5322c6fd2807SJeff Garzik 
5323c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5324c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5325c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5326c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5327c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5328c6fd2807SJeff Garzik 	 */
5329c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5330c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5331c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5332c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5333c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5334c6fd2807SJeff Garzik 
5335c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
5336c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
5337c6fd2807SJeff Garzik 		goto err_out;
5338c6fd2807SJeff Garzik 
5339c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5340c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5341c6fd2807SJeff Garzik 	if (do_write != i_write)
5342c6fd2807SJeff Garzik 		goto err_out;
5343c6fd2807SJeff Garzik 
534444877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5345c6fd2807SJeff Garzik 
5346c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
53474cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5348c6fd2807SJeff Garzik 
5349c6fd2807SJeff Garzik 	return;
5350c6fd2807SJeff Garzik 
5351c6fd2807SJeff Garzik err_out:
5352c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5353c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5354c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5355c6fd2807SJeff Garzik }
5356c6fd2807SJeff Garzik 
5357c6fd2807SJeff Garzik /**
5358c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5359c6fd2807SJeff Garzik  *	@ap: the target ata_port
5360c6fd2807SJeff Garzik  *	@qc: qc on going
5361c6fd2807SJeff Garzik  *
5362c6fd2807SJeff Garzik  *	RETURNS:
5363c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5364c6fd2807SJeff Garzik  */
5365c6fd2807SJeff Garzik 
5366c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5367c6fd2807SJeff Garzik {
5368c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5369c6fd2807SJeff Garzik 		return 1;
5370c6fd2807SJeff Garzik 
5371c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5372c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5373c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5374c6fd2807SJeff Garzik 		    return 1;
5375c6fd2807SJeff Garzik 
5376c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
5377c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5378c6fd2807SJeff Garzik 			return 1;
5379c6fd2807SJeff Garzik 	}
5380c6fd2807SJeff Garzik 
5381c6fd2807SJeff Garzik 	return 0;
5382c6fd2807SJeff Garzik }
5383c6fd2807SJeff Garzik 
5384c6fd2807SJeff Garzik /**
5385c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5386c6fd2807SJeff Garzik  *	@qc: Command to complete
5387c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5388c6fd2807SJeff Garzik  *
5389c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5390c6fd2807SJeff Garzik  *
5391c6fd2807SJeff Garzik  *	LOCKING:
5392cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5393c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5394c6fd2807SJeff Garzik  */
5395c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5396c6fd2807SJeff Garzik {
5397c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5398c6fd2807SJeff Garzik 	unsigned long flags;
5399c6fd2807SJeff Garzik 
5400c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5401c6fd2807SJeff Garzik 		if (in_wq) {
5402c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5403c6fd2807SJeff Garzik 
5404cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5405cca3974eSJeff Garzik 			 * released.
5406c6fd2807SJeff Garzik 			 */
5407c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5408c6fd2807SJeff Garzik 			if (qc) {
5409c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
541083625006SAkira Iguchi 					ap->ops->irq_on(ap);
5411c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5412c6fd2807SJeff Garzik 				} else
5413c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5414c6fd2807SJeff Garzik 			}
5415c6fd2807SJeff Garzik 
5416c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5417c6fd2807SJeff Garzik 		} else {
5418c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5419c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5420c6fd2807SJeff Garzik 			else
5421c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5422c6fd2807SJeff Garzik 		}
5423c6fd2807SJeff Garzik 	} else {
5424c6fd2807SJeff Garzik 		if (in_wq) {
5425c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
542683625006SAkira Iguchi 			ap->ops->irq_on(ap);
5427c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5428c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5429c6fd2807SJeff Garzik 		} else
5430c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5431c6fd2807SJeff Garzik 	}
5432c6fd2807SJeff Garzik }
5433c6fd2807SJeff Garzik 
5434c6fd2807SJeff Garzik /**
5435c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5436c6fd2807SJeff Garzik  *	@ap: the target ata_port
5437c6fd2807SJeff Garzik  *	@qc: qc on going
5438c6fd2807SJeff Garzik  *	@status: current device status
5439c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5440c6fd2807SJeff Garzik  *
5441c6fd2807SJeff Garzik  *	RETURNS:
5442c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5443c6fd2807SJeff Garzik  */
5444c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5445c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5446c6fd2807SJeff Garzik {
5447c6fd2807SJeff Garzik 	unsigned long flags = 0;
5448c6fd2807SJeff Garzik 	int poll_next;
5449c6fd2807SJeff Garzik 
5450c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5451c6fd2807SJeff Garzik 
5452c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5453c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5454c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5455c6fd2807SJeff Garzik 	 */
5456c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5457c6fd2807SJeff Garzik 
5458c6fd2807SJeff Garzik fsm_start:
5459c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
546044877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5461c6fd2807SJeff Garzik 
5462c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5463c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5464c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5465c6fd2807SJeff Garzik 
5466c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5467c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5468c6fd2807SJeff Garzik 		 * takes over after sending the data.
5469c6fd2807SJeff Garzik 		 */
5470c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5471c6fd2807SJeff Garzik 
5472c6fd2807SJeff Garzik 		/* check device status */
5473c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5474c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5475c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5476c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5477c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5478c6fd2807SJeff Garzik 			else
5479c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5480c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5481c6fd2807SJeff Garzik 
5482c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5483c6fd2807SJeff Garzik 			goto fsm_start;
5484c6fd2807SJeff Garzik 		}
5485c6fd2807SJeff Garzik 
5486c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5487c6fd2807SJeff Garzik 		 * when it finds something wrong.
5488c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5489c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5490c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5491c6fd2807SJeff Garzik 		 */
5492c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
549344877b4eSTejun Heo 			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
549444877b4eSTejun Heo 					"error, dev_stat 0x%X\n", status);
5495c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_HSM;
5496c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5497c6fd2807SJeff Garzik 			goto fsm_start;
5498c6fd2807SJeff Garzik 		}
5499c6fd2807SJeff Garzik 
5500c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5501c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5502c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5503c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5504c6fd2807SJeff Garzik 		 */
5505c6fd2807SJeff Garzik 		if (in_wq)
5506c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5507c6fd2807SJeff Garzik 
5508c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5509c6fd2807SJeff Garzik 			/* PIO data out protocol.
5510c6fd2807SJeff Garzik 			 * send first data block.
5511c6fd2807SJeff Garzik 			 */
5512c6fd2807SJeff Garzik 
5513c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5514c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5515c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5516c6fd2807SJeff Garzik 			 */
5517c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5518c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5519c6fd2807SJeff Garzik 		} else
5520c6fd2807SJeff Garzik 			/* send CDB */
5521c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5522c6fd2807SJeff Garzik 
5523c6fd2807SJeff Garzik 		if (in_wq)
5524c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5525c6fd2807SJeff Garzik 
5526c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5527c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5528c6fd2807SJeff Garzik 		 */
5529c6fd2807SJeff Garzik 		break;
5530c6fd2807SJeff Garzik 
5531c6fd2807SJeff Garzik 	case HSM_ST:
5532c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
5533c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
5534c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5535c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5536c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5537c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5538c6fd2807SJeff Garzik 				 */
5539c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5540c6fd2807SJeff Garzik 				goto fsm_start;
5541c6fd2807SJeff Garzik 			}
5542c6fd2807SJeff Garzik 
5543c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5544c6fd2807SJeff Garzik 			 * when it finds something wrong.
5545c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5546c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5547c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5548c6fd2807SJeff Garzik 			 */
5549c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
555044877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
555144877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
555244877b4eSTejun Heo 						status);
5553c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5554c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5555c6fd2807SJeff Garzik 				goto fsm_start;
5556c6fd2807SJeff Garzik 			}
5557c6fd2807SJeff Garzik 
5558c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5559c6fd2807SJeff Garzik 
5560c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5561c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5562c6fd2807SJeff Garzik 				goto fsm_start;
5563c6fd2807SJeff Garzik 
5564c6fd2807SJeff Garzik 		} else {
5565c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5566c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5567c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5568c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5569c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5570c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5571c6fd2807SJeff Garzik 				else
557255a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
557355a8e2c8STejun Heo 					 * Phantom devices also trigger this
557455a8e2c8STejun Heo 					 * condition.  Mark hint.
557555a8e2c8STejun Heo 					 */
557655a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
557755a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5578c6fd2807SJeff Garzik 
5579c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5580c6fd2807SJeff Garzik 				goto fsm_start;
5581c6fd2807SJeff Garzik 			}
5582c6fd2807SJeff Garzik 
5583c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5584c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5585c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5586c6fd2807SJeff Garzik 			 * block of junk data before changing the
5587c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5588c6fd2807SJeff Garzik 			 *
5589c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5590c6fd2807SJeff Garzik 			 * sense since the data block has been
5591c6fd2807SJeff Garzik 			 * transferred to the device.
5592c6fd2807SJeff Garzik 			 */
5593c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5594c6fd2807SJeff Garzik 				/* data might be corrputed */
5595c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5596c6fd2807SJeff Garzik 
5597c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5598c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5599c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5600c6fd2807SJeff Garzik 				}
5601c6fd2807SJeff Garzik 
5602c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5603c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5604c6fd2807SJeff Garzik 
5605c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5606c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5607c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5608c6fd2807SJeff Garzik 				 */
5609c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5610c6fd2807SJeff Garzik 				goto fsm_start;
5611c6fd2807SJeff Garzik 			}
5612c6fd2807SJeff Garzik 
5613c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5614c6fd2807SJeff Garzik 
5615c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5616c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5617c6fd2807SJeff Garzik 				/* all data read */
5618c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5619c6fd2807SJeff Garzik 				goto fsm_start;
5620c6fd2807SJeff Garzik 			}
5621c6fd2807SJeff Garzik 		}
5622c6fd2807SJeff Garzik 
5623c6fd2807SJeff Garzik 		poll_next = 1;
5624c6fd2807SJeff Garzik 		break;
5625c6fd2807SJeff Garzik 
5626c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5627c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5628c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5629c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5630c6fd2807SJeff Garzik 			goto fsm_start;
5631c6fd2807SJeff Garzik 		}
5632c6fd2807SJeff Garzik 
5633c6fd2807SJeff Garzik 		/* no more data to transfer */
5634c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
563544877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5636c6fd2807SJeff Garzik 
5637c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5638c6fd2807SJeff Garzik 
5639c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5640c6fd2807SJeff Garzik 
5641c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5642c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5643c6fd2807SJeff Garzik 
5644c6fd2807SJeff Garzik 		poll_next = 0;
5645c6fd2807SJeff Garzik 		break;
5646c6fd2807SJeff Garzik 
5647c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5648c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5649c6fd2807SJeff Garzik 		 * know what's wrong and recover
5650c6fd2807SJeff Garzik 		 */
5651c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5652c6fd2807SJeff Garzik 
5653c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5654c6fd2807SJeff Garzik 
5655c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5656c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5657c6fd2807SJeff Garzik 
5658c6fd2807SJeff Garzik 		poll_next = 0;
5659c6fd2807SJeff Garzik 		break;
5660c6fd2807SJeff Garzik 	default:
5661c6fd2807SJeff Garzik 		poll_next = 0;
5662c6fd2807SJeff Garzik 		BUG();
5663c6fd2807SJeff Garzik 	}
5664c6fd2807SJeff Garzik 
5665c6fd2807SJeff Garzik 	return poll_next;
5666c6fd2807SJeff Garzik }
5667c6fd2807SJeff Garzik 
566865f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5669c6fd2807SJeff Garzik {
567065f27f38SDavid Howells 	struct ata_port *ap =
567165f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
567265f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5673c6fd2807SJeff Garzik 	u8 status;
5674c6fd2807SJeff Garzik 	int poll_next;
5675c6fd2807SJeff Garzik 
5676c6fd2807SJeff Garzik fsm_start:
5677c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5678c6fd2807SJeff Garzik 
5679c6fd2807SJeff Garzik 	/*
5680c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5681c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5682c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5683c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5684c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5685c6fd2807SJeff Garzik 	 */
5686c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5687c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5688c6fd2807SJeff Garzik 		msleep(2);
5689c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5690c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5691c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5692c6fd2807SJeff Garzik 			return;
5693c6fd2807SJeff Garzik 		}
5694c6fd2807SJeff Garzik 	}
5695c6fd2807SJeff Garzik 
5696c6fd2807SJeff Garzik 	/* move the HSM */
5697c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5698c6fd2807SJeff Garzik 
5699c6fd2807SJeff Garzik 	/* another command or interrupt handler
5700c6fd2807SJeff Garzik 	 * may be running at this point.
5701c6fd2807SJeff Garzik 	 */
5702c6fd2807SJeff Garzik 	if (poll_next)
5703c6fd2807SJeff Garzik 		goto fsm_start;
5704c6fd2807SJeff Garzik }
5705c6fd2807SJeff Garzik 
5706c6fd2807SJeff Garzik /**
5707c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5708c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5709c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5710c6fd2807SJeff Garzik  *
5711c6fd2807SJeff Garzik  *	LOCKING:
5712c6fd2807SJeff Garzik  *	None.
5713c6fd2807SJeff Garzik  */
5714c6fd2807SJeff Garzik 
5715c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5716c6fd2807SJeff Garzik {
5717c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5718c6fd2807SJeff Garzik 	unsigned int i;
5719c6fd2807SJeff Garzik 
5720c6fd2807SJeff Garzik 	/* no command while frozen */
5721c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5722c6fd2807SJeff Garzik 		return NULL;
5723c6fd2807SJeff Garzik 
5724c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5725c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5726c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5727c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5728c6fd2807SJeff Garzik 			break;
5729c6fd2807SJeff Garzik 		}
5730c6fd2807SJeff Garzik 
5731c6fd2807SJeff Garzik 	if (qc)
5732c6fd2807SJeff Garzik 		qc->tag = i;
5733c6fd2807SJeff Garzik 
5734c6fd2807SJeff Garzik 	return qc;
5735c6fd2807SJeff Garzik }
5736c6fd2807SJeff Garzik 
5737c6fd2807SJeff Garzik /**
5738c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5739c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5740c6fd2807SJeff Garzik  *
5741c6fd2807SJeff Garzik  *	LOCKING:
5742c6fd2807SJeff Garzik  *	None.
5743c6fd2807SJeff Garzik  */
5744c6fd2807SJeff Garzik 
5745c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5746c6fd2807SJeff Garzik {
57479af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5748c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5749c6fd2807SJeff Garzik 
5750c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5751c6fd2807SJeff Garzik 	if (qc) {
5752c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5753c6fd2807SJeff Garzik 		qc->ap = ap;
5754c6fd2807SJeff Garzik 		qc->dev = dev;
5755c6fd2807SJeff Garzik 
5756c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5757c6fd2807SJeff Garzik 	}
5758c6fd2807SJeff Garzik 
5759c6fd2807SJeff Garzik 	return qc;
5760c6fd2807SJeff Garzik }
5761c6fd2807SJeff Garzik 
5762c6fd2807SJeff Garzik /**
5763c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5764c6fd2807SJeff Garzik  *	@qc: Command to complete
5765c6fd2807SJeff Garzik  *
5766c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5767c6fd2807SJeff Garzik  *	in case something prevents using it.
5768c6fd2807SJeff Garzik  *
5769c6fd2807SJeff Garzik  *	LOCKING:
5770cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5771c6fd2807SJeff Garzik  */
5772c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5773c6fd2807SJeff Garzik {
5774c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5775c6fd2807SJeff Garzik 	unsigned int tag;
5776c6fd2807SJeff Garzik 
5777c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5778c6fd2807SJeff Garzik 
5779c6fd2807SJeff Garzik 	qc->flags = 0;
5780c6fd2807SJeff Garzik 	tag = qc->tag;
5781c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5782c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5783c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5784c6fd2807SJeff Garzik 	}
5785c6fd2807SJeff Garzik }
5786c6fd2807SJeff Garzik 
5787c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5788c6fd2807SJeff Garzik {
5789c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57909af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5791c6fd2807SJeff Garzik 
5792c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5793c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5794c6fd2807SJeff Garzik 
5795c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5796c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5797c6fd2807SJeff Garzik 
5798c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5799da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
58009af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5801da917d69STejun Heo 		if (!link->sactive)
5802da917d69STejun Heo 			ap->nr_active_links--;
5803da917d69STejun Heo 	} else {
58049af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5805da917d69STejun Heo 		ap->nr_active_links--;
5806da917d69STejun Heo 	}
5807da917d69STejun Heo 
5808da917d69STejun Heo 	/* clear exclusive status */
5809da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5810da917d69STejun Heo 		     ap->excl_link == link))
5811da917d69STejun Heo 		ap->excl_link = NULL;
5812c6fd2807SJeff Garzik 
5813c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5814c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5815c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5816c6fd2807SJeff Garzik 	 */
5817c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5818c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5819c6fd2807SJeff Garzik 
5820c6fd2807SJeff Garzik 	/* call completion callback */
5821c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5822c6fd2807SJeff Garzik }
5823c6fd2807SJeff Garzik 
582439599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
582539599a53STejun Heo {
582639599a53STejun Heo 	struct ata_port *ap = qc->ap;
582739599a53STejun Heo 
582839599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
58294742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
583039599a53STejun Heo }
583139599a53STejun Heo 
5832c6fd2807SJeff Garzik /**
5833c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5834c6fd2807SJeff Garzik  *	@qc: Command to complete
5835c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5836c6fd2807SJeff Garzik  *
5837c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5838c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5839c6fd2807SJeff Garzik  *
5840c6fd2807SJeff Garzik  *	LOCKING:
5841cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5842c6fd2807SJeff Garzik  */
5843c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5844c6fd2807SJeff Garzik {
5845c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5846c6fd2807SJeff Garzik 
5847c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5848c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5849c6fd2807SJeff Garzik 	 *
5850c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5851c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5852c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5853c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5854c6fd2807SJeff Garzik 	 *
5855c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5856c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5857c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5858c6fd2807SJeff Garzik 	 * taken care of.
5859c6fd2807SJeff Garzik 	 */
5860c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
58614dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
58624dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
58634dbfa39bSTejun Heo 
5864c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5865c6fd2807SJeff Garzik 
5866c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5867c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5868c6fd2807SJeff Garzik 
5869c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5870c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5871c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
587239599a53STejun Heo 				fill_result_tf(qc);
5873c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5874c6fd2807SJeff Garzik 				return;
5875c6fd2807SJeff Garzik 			}
5876c6fd2807SJeff Garzik 		}
5877c6fd2807SJeff Garzik 
5878c6fd2807SJeff Garzik 		/* read result TF if requested */
5879c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
588039599a53STejun Heo 			fill_result_tf(qc);
5881c6fd2807SJeff Garzik 
58824dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
58834dbfa39bSTejun Heo 		 * completion.
58844dbfa39bSTejun Heo 		 */
58854dbfa39bSTejun Heo 		switch (qc->tf.command) {
58864dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
58874dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
58884dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
58894dbfa39bSTejun Heo 				break;
58904dbfa39bSTejun Heo 			/* fall through */
58914dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
58924dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
58934dbfa39bSTejun Heo 			/* revalidate device */
58944dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
58954dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
58964dbfa39bSTejun Heo 			break;
5897054a5fbaSTejun Heo 
5898054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5899054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5900054a5fbaSTejun Heo 			break;
59014dbfa39bSTejun Heo 		}
59024dbfa39bSTejun Heo 
5903c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5904c6fd2807SJeff Garzik 	} else {
5905c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5906c6fd2807SJeff Garzik 			return;
5907c6fd2807SJeff Garzik 
5908c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5909c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
591039599a53STejun Heo 			fill_result_tf(qc);
5911c6fd2807SJeff Garzik 
5912c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5913c6fd2807SJeff Garzik 	}
5914c6fd2807SJeff Garzik }
5915c6fd2807SJeff Garzik 
5916c6fd2807SJeff Garzik /**
5917c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5918c6fd2807SJeff Garzik  *	@ap: port in question
5919c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5920c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5921c6fd2807SJeff Garzik  *
5922c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5923c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5924c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5925c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5926c6fd2807SJeff Garzik  *
5927c6fd2807SJeff Garzik  *	LOCKING:
5928cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5929c6fd2807SJeff Garzik  *
5930c6fd2807SJeff Garzik  *	RETURNS:
5931c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5932c6fd2807SJeff Garzik  */
5933c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5934c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5935c6fd2807SJeff Garzik {
5936c6fd2807SJeff Garzik 	int nr_done = 0;
5937c6fd2807SJeff Garzik 	u32 done_mask;
5938c6fd2807SJeff Garzik 	int i;
5939c6fd2807SJeff Garzik 
5940c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5941c6fd2807SJeff Garzik 
5942c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5943c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5944c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5945c6fd2807SJeff Garzik 		return -EINVAL;
5946c6fd2807SJeff Garzik 	}
5947c6fd2807SJeff Garzik 
5948c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5949c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5950c6fd2807SJeff Garzik 
5951c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5952c6fd2807SJeff Garzik 			continue;
5953c6fd2807SJeff Garzik 
5954c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5955c6fd2807SJeff Garzik 			if (finish_qc)
5956c6fd2807SJeff Garzik 				finish_qc(qc);
5957c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5958c6fd2807SJeff Garzik 			nr_done++;
5959c6fd2807SJeff Garzik 		}
5960c6fd2807SJeff Garzik 	}
5961c6fd2807SJeff Garzik 
5962c6fd2807SJeff Garzik 	return nr_done;
5963c6fd2807SJeff Garzik }
5964c6fd2807SJeff Garzik 
5965c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5966c6fd2807SJeff Garzik {
5967c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5968c6fd2807SJeff Garzik 
5969c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5970c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5971c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5972c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5973c6fd2807SJeff Garzik 		return 1;
5974c6fd2807SJeff Garzik 
5975c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5976c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5977c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5978c6fd2807SJeff Garzik 			return 1;
5979c6fd2807SJeff Garzik 
5980c6fd2807SJeff Garzik 		/* fall through */
5981c6fd2807SJeff Garzik 
5982c6fd2807SJeff Garzik 	default:
5983c6fd2807SJeff Garzik 		return 0;
5984c6fd2807SJeff Garzik 	}
5985c6fd2807SJeff Garzik 
5986c6fd2807SJeff Garzik 	/* never reached */
5987c6fd2807SJeff Garzik }
5988c6fd2807SJeff Garzik 
5989c6fd2807SJeff Garzik /**
5990c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5991c6fd2807SJeff Garzik  *	@qc: command to issue to device
5992c6fd2807SJeff Garzik  *
5993c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5994c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5995c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5996c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5997c6fd2807SJeff Garzik  *
5998c6fd2807SJeff Garzik  *	LOCKING:
5999cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6000c6fd2807SJeff Garzik  */
6001c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
6002c6fd2807SJeff Garzik {
6003c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
60049af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
6005c6fd2807SJeff Garzik 
6006c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
6007c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
6008c6fd2807SJeff Garzik 	 * request ATAPI sense.
6009c6fd2807SJeff Garzik 	 */
60109af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
6011c6fd2807SJeff Garzik 
6012c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
60139af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
6014da917d69STejun Heo 
6015da917d69STejun Heo 		if (!link->sactive)
6016da917d69STejun Heo 			ap->nr_active_links++;
60179af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
6018c6fd2807SJeff Garzik 	} else {
60199af5c9c9STejun Heo 		WARN_ON(link->sactive);
6020da917d69STejun Heo 
6021da917d69STejun Heo 		ap->nr_active_links++;
60229af5c9c9STejun Heo 		link->active_tag = qc->tag;
6023c6fd2807SJeff Garzik 	}
6024c6fd2807SJeff Garzik 
6025c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
6026c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
6027c6fd2807SJeff Garzik 
6028c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
6029c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
6030c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
6031c6fd2807SJeff Garzik 				goto sg_err;
6032c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
6033c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
6034c6fd2807SJeff Garzik 				goto sg_err;
6035c6fd2807SJeff Garzik 		}
6036c6fd2807SJeff Garzik 	} else {
6037c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
6038c6fd2807SJeff Garzik 	}
6039c6fd2807SJeff Garzik 
6040054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
6041054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6042054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
6043054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6044054a5fbaSTejun Heo 		ata_link_abort(link);
6045054a5fbaSTejun Heo 		return;
6046054a5fbaSTejun Heo 	}
6047054a5fbaSTejun Heo 
6048c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
6049c6fd2807SJeff Garzik 
6050c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
6051c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
6052c6fd2807SJeff Garzik 		goto err;
6053c6fd2807SJeff Garzik 	return;
6054c6fd2807SJeff Garzik 
6055c6fd2807SJeff Garzik sg_err:
6056c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
6057c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
6058c6fd2807SJeff Garzik err:
6059c6fd2807SJeff Garzik 	ata_qc_complete(qc);
6060c6fd2807SJeff Garzik }
6061c6fd2807SJeff Garzik 
6062c6fd2807SJeff Garzik /**
6063c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6064c6fd2807SJeff Garzik  *	@qc: command to issue to device
6065c6fd2807SJeff Garzik  *
6066c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
6067c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
6068c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
6069c6fd2807SJeff Garzik  *	is slightly different.
6070c6fd2807SJeff Garzik  *
6071c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
6072c6fd2807SJeff Garzik  *
6073c6fd2807SJeff Garzik  *	LOCKING:
6074cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6075c6fd2807SJeff Garzik  *
6076c6fd2807SJeff Garzik  *	RETURNS:
6077c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
6078c6fd2807SJeff Garzik  */
6079c6fd2807SJeff Garzik 
6080c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6081c6fd2807SJeff Garzik {
6082c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
6083c6fd2807SJeff Garzik 
6084c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
6085c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
6086c6fd2807SJeff Garzik 	 */
6087c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
6088c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
6089c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
6090e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
6091c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
6092c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
6093c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
6094c6fd2807SJeff Garzik 			break;
6095c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
6096c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6097c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
6098c6fd2807SJeff Garzik 				BUG();
6099c6fd2807SJeff Garzik 			break;
6100c6fd2807SJeff Garzik 		default:
6101c6fd2807SJeff Garzik 			break;
6102c6fd2807SJeff Garzik 		}
6103c6fd2807SJeff Garzik 	}
6104c6fd2807SJeff Garzik 
6105c6fd2807SJeff Garzik 	/* select the device */
6106c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
6107c6fd2807SJeff Garzik 
6108c6fd2807SJeff Garzik 	/* start the command */
6109c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
6110c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
6111c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6112c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6113c6fd2807SJeff Garzik 
6114c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6115c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6116c6fd2807SJeff Garzik 
6117c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6118c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6119c6fd2807SJeff Garzik 
6120c6fd2807SJeff Garzik 		break;
6121c6fd2807SJeff Garzik 
6122c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
6123c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6124c6fd2807SJeff Garzik 
6125c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6126c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6127c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
6128c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6129c6fd2807SJeff Garzik 		break;
6130c6fd2807SJeff Garzik 
6131c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
6132c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6133c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6134c6fd2807SJeff Garzik 
6135c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6136c6fd2807SJeff Garzik 
6137c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
6138c6fd2807SJeff Garzik 			/* PIO data out protocol */
6139c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
6140c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6141c6fd2807SJeff Garzik 
6142c6fd2807SJeff Garzik 			/* always send first data block using
6143c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
6144c6fd2807SJeff Garzik 			 */
6145c6fd2807SJeff Garzik 		} else {
6146c6fd2807SJeff Garzik 			/* PIO data in protocol */
6147c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
6148c6fd2807SJeff Garzik 
6149c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
6150c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
6151c6fd2807SJeff Garzik 
6152c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
6153c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
6154c6fd2807SJeff Garzik 			 */
6155c6fd2807SJeff Garzik 		}
6156c6fd2807SJeff Garzik 
6157c6fd2807SJeff Garzik 		break;
6158c6fd2807SJeff Garzik 
6159c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
6160c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
6161c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6162c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6163c6fd2807SJeff Garzik 
6164c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6165c6fd2807SJeff Garzik 
6166c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6167c6fd2807SJeff Garzik 
6168c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6169c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6170c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
6171c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6172c6fd2807SJeff Garzik 		break;
6173c6fd2807SJeff Garzik 
6174c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
6175c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6176c6fd2807SJeff Garzik 
6177c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6178c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6179c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6180c6fd2807SJeff Garzik 
6181c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6182c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6183c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6184c6fd2807SJeff Garzik 		break;
6185c6fd2807SJeff Garzik 
6186c6fd2807SJeff Garzik 	default:
6187c6fd2807SJeff Garzik 		WARN_ON(1);
6188c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
6189c6fd2807SJeff Garzik 	}
6190c6fd2807SJeff Garzik 
6191c6fd2807SJeff Garzik 	return 0;
6192c6fd2807SJeff Garzik }
6193c6fd2807SJeff Garzik 
6194c6fd2807SJeff Garzik /**
6195c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
6196c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
6197c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
6198c6fd2807SJeff Garzik  *
6199c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
6200c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
6201c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
6202c6fd2807SJeff Garzik  *
6203c6fd2807SJeff Garzik  *	LOCKING:
6204cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6205c6fd2807SJeff Garzik  *
6206c6fd2807SJeff Garzik  *	RETURNS:
6207c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
6208c6fd2807SJeff Garzik  */
6209c6fd2807SJeff Garzik 
6210c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
6211c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
6212c6fd2807SJeff Garzik {
62139af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
6214c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
6215c6fd2807SJeff Garzik 
6216c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
621744877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6218c6fd2807SJeff Garzik 
6219c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
6220c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
6221c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
6222c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
6223c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6224c6fd2807SJeff Garzik 		 */
6225c6fd2807SJeff Garzik 
6226c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6227c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
6228c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
6229c6fd2807SJeff Garzik 		 */
6230c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6231c6fd2807SJeff Garzik 			goto idle_irq;
6232c6fd2807SJeff Garzik 		break;
6233c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6234c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
6235c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6236c6fd2807SJeff Garzik 			/* check status of DMA engine */
6237c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
623844877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
623944877b4eSTejun Heo 				ap->print_id, host_stat);
6240c6fd2807SJeff Garzik 
6241c6fd2807SJeff Garzik 			/* if it's not our irq... */
6242c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6243c6fd2807SJeff Garzik 				goto idle_irq;
6244c6fd2807SJeff Garzik 
6245c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6246c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6247c6fd2807SJeff Garzik 
6248c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6249c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6250c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6251c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6252c6fd2807SJeff Garzik 			}
6253c6fd2807SJeff Garzik 		}
6254c6fd2807SJeff Garzik 		break;
6255c6fd2807SJeff Garzik 	case HSM_ST:
6256c6fd2807SJeff Garzik 		break;
6257c6fd2807SJeff Garzik 	default:
6258c6fd2807SJeff Garzik 		goto idle_irq;
6259c6fd2807SJeff Garzik 	}
6260c6fd2807SJeff Garzik 
6261c6fd2807SJeff Garzik 	/* check altstatus */
6262c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6263c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6264c6fd2807SJeff Garzik 		goto idle_irq;
6265c6fd2807SJeff Garzik 
6266c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6267c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6268c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6269c6fd2807SJeff Garzik 		goto idle_irq;
6270c6fd2807SJeff Garzik 
6271c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6272c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6273c6fd2807SJeff Garzik 
6274c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6275ea54763fSTejun Heo 
6276ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6277ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6278ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6279ea54763fSTejun Heo 
6280c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6281c6fd2807SJeff Garzik 
6282c6fd2807SJeff Garzik idle_irq:
6283c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6284c6fd2807SJeff Garzik 
6285c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6286c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
62876d32d30fSJeff Garzik 		ata_chk_status(ap);
62886d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6289c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6290c6fd2807SJeff Garzik 		return 1;
6291c6fd2807SJeff Garzik 	}
6292c6fd2807SJeff Garzik #endif
6293c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6294c6fd2807SJeff Garzik }
6295c6fd2807SJeff Garzik 
6296c6fd2807SJeff Garzik /**
6297c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6298c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6299cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6300c6fd2807SJeff Garzik  *
6301c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6302c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6303c6fd2807SJeff Garzik  *
6304c6fd2807SJeff Garzik  *	LOCKING:
6305cca3974eSJeff Garzik  *	Obtains host lock during operation.
6306c6fd2807SJeff Garzik  *
6307c6fd2807SJeff Garzik  *	RETURNS:
6308c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6309c6fd2807SJeff Garzik  */
6310c6fd2807SJeff Garzik 
63117d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6312c6fd2807SJeff Garzik {
6313cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6314c6fd2807SJeff Garzik 	unsigned int i;
6315c6fd2807SJeff Garzik 	unsigned int handled = 0;
6316c6fd2807SJeff Garzik 	unsigned long flags;
6317c6fd2807SJeff Garzik 
6318c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6319cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6320c6fd2807SJeff Garzik 
6321cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6322c6fd2807SJeff Garzik 		struct ata_port *ap;
6323c6fd2807SJeff Garzik 
6324cca3974eSJeff Garzik 		ap = host->ports[i];
6325c6fd2807SJeff Garzik 		if (ap &&
6326c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6327c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6328c6fd2807SJeff Garzik 
63299af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6330c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6331c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6332c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6333c6fd2807SJeff Garzik 		}
6334c6fd2807SJeff Garzik 	}
6335c6fd2807SJeff Garzik 
6336cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6337c6fd2807SJeff Garzik 
6338c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6339c6fd2807SJeff Garzik }
6340c6fd2807SJeff Garzik 
6341c6fd2807SJeff Garzik /**
6342c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6343936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6344c6fd2807SJeff Garzik  *
6345936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6346c6fd2807SJeff Garzik  *
6347c6fd2807SJeff Garzik  *	LOCKING:
6348c6fd2807SJeff Garzik  *	None.
6349c6fd2807SJeff Garzik  *
6350c6fd2807SJeff Garzik  *	RETURNS:
6351c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6352c6fd2807SJeff Garzik  */
6353936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6354c6fd2807SJeff Garzik {
6355936fd732STejun Heo 	struct ata_port *ap = link->ap;
6356936fd732STejun Heo 
6357a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6358c6fd2807SJeff Garzik }
6359c6fd2807SJeff Garzik 
6360c6fd2807SJeff Garzik /**
6361c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6362936fd732STejun Heo  *	@link: ATA link to read SCR for
6363c6fd2807SJeff Garzik  *	@reg: SCR to read
6364c6fd2807SJeff Garzik  *	@val: Place to store read value
6365c6fd2807SJeff Garzik  *
6366936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6367633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6368633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6369c6fd2807SJeff Garzik  *
6370c6fd2807SJeff Garzik  *	LOCKING:
6371633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6372c6fd2807SJeff Garzik  *
6373c6fd2807SJeff Garzik  *	RETURNS:
6374c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6375c6fd2807SJeff Garzik  */
6376936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6377c6fd2807SJeff Garzik {
6378633273a3STejun Heo 	if (ata_is_host_link(link)) {
6379936fd732STejun Heo 		struct ata_port *ap = link->ap;
6380936fd732STejun Heo 
6381936fd732STejun Heo 		if (sata_scr_valid(link))
6382da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6383c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6384c6fd2807SJeff Garzik 	}
6385c6fd2807SJeff Garzik 
6386633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6387633273a3STejun Heo }
6388633273a3STejun Heo 
6389c6fd2807SJeff Garzik /**
6390c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6391936fd732STejun Heo  *	@link: ATA link to write SCR for
6392c6fd2807SJeff Garzik  *	@reg: SCR to write
6393c6fd2807SJeff Garzik  *	@val: value to write
6394c6fd2807SJeff Garzik  *
6395936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6396633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6397633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6398c6fd2807SJeff Garzik  *
6399c6fd2807SJeff Garzik  *	LOCKING:
6400633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6401c6fd2807SJeff Garzik  *
6402c6fd2807SJeff Garzik  *	RETURNS:
6403c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6404c6fd2807SJeff Garzik  */
6405936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6406c6fd2807SJeff Garzik {
6407633273a3STejun Heo 	if (ata_is_host_link(link)) {
6408936fd732STejun Heo 		struct ata_port *ap = link->ap;
6409936fd732STejun Heo 
6410936fd732STejun Heo 		if (sata_scr_valid(link))
6411da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6412c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6413c6fd2807SJeff Garzik 	}
6414c6fd2807SJeff Garzik 
6415633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6416633273a3STejun Heo }
6417633273a3STejun Heo 
6418c6fd2807SJeff Garzik /**
6419c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6420936fd732STejun Heo  *	@link: ATA link to write SCR for
6421c6fd2807SJeff Garzik  *	@reg: SCR to write
6422c6fd2807SJeff Garzik  *	@val: value to write
6423c6fd2807SJeff Garzik  *
6424c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6425c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6426c6fd2807SJeff Garzik  *
6427c6fd2807SJeff Garzik  *	LOCKING:
6428633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6429c6fd2807SJeff Garzik  *
6430c6fd2807SJeff Garzik  *	RETURNS:
6431c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6432c6fd2807SJeff Garzik  */
6433936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6434c6fd2807SJeff Garzik {
6435633273a3STejun Heo 	if (ata_is_host_link(link)) {
6436936fd732STejun Heo 		struct ata_port *ap = link->ap;
6437da3dbb17STejun Heo 		int rc;
6438da3dbb17STejun Heo 
6439936fd732STejun Heo 		if (sata_scr_valid(link)) {
6440da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6441da3dbb17STejun Heo 			if (rc == 0)
6442da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6443da3dbb17STejun Heo 			return rc;
6444c6fd2807SJeff Garzik 		}
6445c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6446c6fd2807SJeff Garzik 	}
6447c6fd2807SJeff Garzik 
6448633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6449633273a3STejun Heo }
6450633273a3STejun Heo 
6451c6fd2807SJeff Garzik /**
6452936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6453936fd732STejun Heo  *	@link: ATA link to test
6454c6fd2807SJeff Garzik  *
6455936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6456936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6457936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6458c6fd2807SJeff Garzik  *
6459c6fd2807SJeff Garzik  *	LOCKING:
6460c6fd2807SJeff Garzik  *	None.
6461c6fd2807SJeff Garzik  *
6462c6fd2807SJeff Garzik  *	RETURNS:
6463c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6464c6fd2807SJeff Garzik  */
6465936fd732STejun Heo int ata_link_online(struct ata_link *link)
6466c6fd2807SJeff Garzik {
6467c6fd2807SJeff Garzik 	u32 sstatus;
6468c6fd2807SJeff Garzik 
6469936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6470936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6471c6fd2807SJeff Garzik 		return 1;
6472c6fd2807SJeff Garzik 	return 0;
6473c6fd2807SJeff Garzik }
6474c6fd2807SJeff Garzik 
6475c6fd2807SJeff Garzik /**
6476936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6477936fd732STejun Heo  *	@link: ATA link to test
6478c6fd2807SJeff Garzik  *
6479936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6480936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6481936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6482c6fd2807SJeff Garzik  *
6483c6fd2807SJeff Garzik  *	LOCKING:
6484c6fd2807SJeff Garzik  *	None.
6485c6fd2807SJeff Garzik  *
6486c6fd2807SJeff Garzik  *	RETURNS:
6487c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6488c6fd2807SJeff Garzik  */
6489936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6490c6fd2807SJeff Garzik {
6491c6fd2807SJeff Garzik 	u32 sstatus;
6492c6fd2807SJeff Garzik 
6493936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6494936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6495c6fd2807SJeff Garzik 		return 1;
6496c6fd2807SJeff Garzik 	return 0;
6497c6fd2807SJeff Garzik }
6498c6fd2807SJeff Garzik 
6499c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6500c6fd2807SJeff Garzik {
6501c6fd2807SJeff Garzik 	unsigned int err_mask;
6502c6fd2807SJeff Garzik 	u8 cmd;
6503c6fd2807SJeff Garzik 
6504c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6505c6fd2807SJeff Garzik 		return 0;
6506c6fd2807SJeff Garzik 
65076fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6508c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6509c6fd2807SJeff Garzik 	else
6510c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6511c6fd2807SJeff Garzik 
65124f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
65134f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
65144f34337bSAlan Cox 	   a further flush command to continue the writeback until it
65154f34337bSAlan Cox 	   does not error */
6516c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6517c6fd2807SJeff Garzik 	if (err_mask) {
6518c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6519c6fd2807SJeff Garzik 		return -EIO;
6520c6fd2807SJeff Garzik 	}
6521c6fd2807SJeff Garzik 
6522c6fd2807SJeff Garzik 	return 0;
6523c6fd2807SJeff Garzik }
6524c6fd2807SJeff Garzik 
65256ffa01d8STejun Heo #ifdef CONFIG_PM
6526cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6527cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6528cca3974eSJeff Garzik 			       int wait)
6529c6fd2807SJeff Garzik {
6530c6fd2807SJeff Garzik 	unsigned long flags;
6531c6fd2807SJeff Garzik 	int i, rc;
6532c6fd2807SJeff Garzik 
6533cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6534cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6535e3667ebfSTejun Heo 		struct ata_link *link;
6536c6fd2807SJeff Garzik 
6537c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6538c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6539c6fd2807SJeff Garzik 		 */
6540c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6541c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6542c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6543c6fd2807SJeff Garzik 		}
6544c6fd2807SJeff Garzik 
6545c6fd2807SJeff Garzik 		/* request PM ops to EH */
6546c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6547c6fd2807SJeff Garzik 
6548c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6549c6fd2807SJeff Garzik 		if (wait) {
6550c6fd2807SJeff Garzik 			rc = 0;
6551c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6552c6fd2807SJeff Garzik 		}
6553c6fd2807SJeff Garzik 
6554c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6555e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6556e3667ebfSTejun Heo 			link->eh_info.action |= action;
6557e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6558e3667ebfSTejun Heo 		}
6559c6fd2807SJeff Garzik 
6560c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6561c6fd2807SJeff Garzik 
6562c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6563c6fd2807SJeff Garzik 
6564c6fd2807SJeff Garzik 		/* wait and check result */
6565c6fd2807SJeff Garzik 		if (wait) {
6566c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6567c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6568c6fd2807SJeff Garzik 			if (rc)
6569c6fd2807SJeff Garzik 				return rc;
6570c6fd2807SJeff Garzik 		}
6571c6fd2807SJeff Garzik 	}
6572c6fd2807SJeff Garzik 
6573c6fd2807SJeff Garzik 	return 0;
6574c6fd2807SJeff Garzik }
6575c6fd2807SJeff Garzik 
6576c6fd2807SJeff Garzik /**
6577cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6578cca3974eSJeff Garzik  *	@host: host to suspend
6579c6fd2807SJeff Garzik  *	@mesg: PM message
6580c6fd2807SJeff Garzik  *
6581cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6582c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6583c6fd2807SJeff Garzik  *	to finish.
6584c6fd2807SJeff Garzik  *
6585c6fd2807SJeff Garzik  *	LOCKING:
6586c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6587c6fd2807SJeff Garzik  *
6588c6fd2807SJeff Garzik  *	RETURNS:
6589c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6590c6fd2807SJeff Garzik  */
6591cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6592c6fd2807SJeff Garzik {
65939666f400STejun Heo 	int rc;
6594c6fd2807SJeff Garzik 
6595ca77329fSKristen Carlson Accardi 	/*
6596ca77329fSKristen Carlson Accardi 	 * disable link pm on all ports before requesting
6597ca77329fSKristen Carlson Accardi 	 * any pm activity
6598ca77329fSKristen Carlson Accardi 	 */
6599ca77329fSKristen Carlson Accardi 	ata_lpm_enable(host);
6600ca77329fSKristen Carlson Accardi 
6601cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
66029666f400STejun Heo 	if (rc == 0)
6603cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6604c6fd2807SJeff Garzik 	return rc;
6605c6fd2807SJeff Garzik }
6606c6fd2807SJeff Garzik 
6607c6fd2807SJeff Garzik /**
6608cca3974eSJeff Garzik  *	ata_host_resume - resume host
6609cca3974eSJeff Garzik  *	@host: host to resume
6610c6fd2807SJeff Garzik  *
6611cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6612c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6613c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6614c6fd2807SJeff Garzik  *
6615c6fd2807SJeff Garzik  *	LOCKING:
6616c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6617c6fd2807SJeff Garzik  */
6618cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6619c6fd2807SJeff Garzik {
6620cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6621c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6622cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6623ca77329fSKristen Carlson Accardi 
6624ca77329fSKristen Carlson Accardi 	/* reenable link pm */
6625ca77329fSKristen Carlson Accardi 	ata_lpm_disable(host);
6626c6fd2807SJeff Garzik }
66276ffa01d8STejun Heo #endif
6628c6fd2807SJeff Garzik 
6629c6fd2807SJeff Garzik /**
6630c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6631c6fd2807SJeff Garzik  *	@ap: Port to initialize
6632c6fd2807SJeff Garzik  *
6633c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6634c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6635c6fd2807SJeff Garzik  *
6636c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6637c6fd2807SJeff Garzik  *
6638c6fd2807SJeff Garzik  *	LOCKING:
6639c6fd2807SJeff Garzik  *	Inherited from caller.
6640c6fd2807SJeff Garzik  */
6641c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6642c6fd2807SJeff Garzik {
6643c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6644c6fd2807SJeff Garzik 	int rc;
6645c6fd2807SJeff Garzik 
6646f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6647f0d36efdSTejun Heo 				      GFP_KERNEL);
6648c6fd2807SJeff Garzik 	if (!ap->prd)
6649c6fd2807SJeff Garzik 		return -ENOMEM;
6650c6fd2807SJeff Garzik 
6651c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6652f0d36efdSTejun Heo 	if (rc)
6653c6fd2807SJeff Garzik 		return rc;
6654c6fd2807SJeff Garzik 
6655f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6656f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6657c6fd2807SJeff Garzik 	return 0;
6658c6fd2807SJeff Garzik }
6659c6fd2807SJeff Garzik 
6660c6fd2807SJeff Garzik /**
6661c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6662c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6663c6fd2807SJeff Garzik  *
6664c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6665c6fd2807SJeff Garzik  *
6666c6fd2807SJeff Garzik  *	LOCKING:
6667c6fd2807SJeff Garzik  *	Inherited from caller.
6668c6fd2807SJeff Garzik  */
6669c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6670c6fd2807SJeff Garzik {
66719af5c9c9STejun Heo 	struct ata_link *link = dev->link;
66729af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6673c6fd2807SJeff Garzik 	unsigned long flags;
6674c6fd2807SJeff Garzik 
6675c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
66769af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66779af5c9c9STejun Heo 	link->sata_spd = 0;
6678c6fd2807SJeff Garzik 
6679c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6680c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6681cca3974eSJeff Garzik 	 * host lock.
6682c6fd2807SJeff Garzik 	 */
6683c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6684c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
66853dcc323fSTejun Heo 	dev->horkage = 0;
6686c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6687c6fd2807SJeff Garzik 
6688c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6689c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6690c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6691c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6692c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6693c6fd2807SJeff Garzik }
6694c6fd2807SJeff Garzik 
6695c6fd2807SJeff Garzik /**
66964fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
66974fb37a25STejun Heo  *	@ap: ATA port link is attached to
66984fb37a25STejun Heo  *	@link: Link structure to initialize
66998989805dSTejun Heo  *	@pmp: Port multiplier port number
67004fb37a25STejun Heo  *
67014fb37a25STejun Heo  *	Initialize @link.
67024fb37a25STejun Heo  *
67034fb37a25STejun Heo  *	LOCKING:
67044fb37a25STejun Heo  *	Kernel thread context (may sleep)
67054fb37a25STejun Heo  */
6706fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
67074fb37a25STejun Heo {
67084fb37a25STejun Heo 	int i;
67094fb37a25STejun Heo 
67104fb37a25STejun Heo 	/* clear everything except for devices */
67114fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
67124fb37a25STejun Heo 
67134fb37a25STejun Heo 	link->ap = ap;
67148989805dSTejun Heo 	link->pmp = pmp;
67154fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
67164fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
67174fb37a25STejun Heo 
67184fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
67194fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
67204fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
67214fb37a25STejun Heo 
67224fb37a25STejun Heo 		dev->link = link;
67234fb37a25STejun Heo 		dev->devno = dev - link->device;
67244fb37a25STejun Heo 		ata_dev_init(dev);
67254fb37a25STejun Heo 	}
67264fb37a25STejun Heo }
67274fb37a25STejun Heo 
67284fb37a25STejun Heo /**
67294fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
67304fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
67314fb37a25STejun Heo  *
67324fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
67334fb37a25STejun Heo  *	configured value.
67344fb37a25STejun Heo  *
67354fb37a25STejun Heo  *	LOCKING:
67364fb37a25STejun Heo  *	Kernel thread context (may sleep).
67374fb37a25STejun Heo  *
67384fb37a25STejun Heo  *	RETURNS:
67394fb37a25STejun Heo  *	0 on success, -errno on failure.
67404fb37a25STejun Heo  */
6741fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
67424fb37a25STejun Heo {
67434fb37a25STejun Heo 	u32 scontrol, spd;
67444fb37a25STejun Heo 	int rc;
67454fb37a25STejun Heo 
67464fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
67474fb37a25STejun Heo 	if (rc)
67484fb37a25STejun Heo 		return rc;
67494fb37a25STejun Heo 
67504fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
67514fb37a25STejun Heo 	if (spd)
67524fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
67534fb37a25STejun Heo 
67544fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
67554fb37a25STejun Heo 
67564fb37a25STejun Heo 	return 0;
67574fb37a25STejun Heo }
67584fb37a25STejun Heo 
67594fb37a25STejun Heo /**
6760f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6761f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6762c6fd2807SJeff Garzik  *
6763f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6764f3187195STejun Heo  *
6765f3187195STejun Heo  *	RETURNS:
6766f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6767c6fd2807SJeff Garzik  *
6768c6fd2807SJeff Garzik  *	LOCKING:
6769f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6770c6fd2807SJeff Garzik  */
6771f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6772c6fd2807SJeff Garzik {
6773f3187195STejun Heo 	struct ata_port *ap;
6774c6fd2807SJeff Garzik 
6775f3187195STejun Heo 	DPRINTK("ENTER\n");
6776f3187195STejun Heo 
6777f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6778f3187195STejun Heo 	if (!ap)
6779f3187195STejun Heo 		return NULL;
6780f3187195STejun Heo 
6781f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6782cca3974eSJeff Garzik 	ap->lock = &host->lock;
6783c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6784f3187195STejun Heo 	ap->print_id = -1;
6785c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6786cca3974eSJeff Garzik 	ap->host = host;
6787f3187195STejun Heo 	ap->dev = host->dev;
6788c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6789c6fd2807SJeff Garzik 
6790c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6791c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6792c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6793c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6794c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6795c6fd2807SJeff Garzik #else
6796c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6797c6fd2807SJeff Garzik #endif
6798c6fd2807SJeff Garzik 
679965f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
680065f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
680165f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6802c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6803c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
68045ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
68055ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
68065ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6807c6fd2807SJeff Garzik 
6808c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6809c6fd2807SJeff Garzik 
68108989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6811c6fd2807SJeff Garzik 
6812c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6813c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6814c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6815c6fd2807SJeff Garzik #endif
6816c6fd2807SJeff Garzik 	return ap;
6817c6fd2807SJeff Garzik }
6818c6fd2807SJeff Garzik 
6819f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6820f0d36efdSTejun Heo {
6821f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6822f0d36efdSTejun Heo 	int i;
6823f0d36efdSTejun Heo 
6824f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6825f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6826f0d36efdSTejun Heo 
6827ecef7253STejun Heo 		if (!ap)
6828ecef7253STejun Heo 			continue;
6829ecef7253STejun Heo 
6830ecef7253STejun Heo 		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6831f0d36efdSTejun Heo 			ap->ops->port_stop(ap);
6832f0d36efdSTejun Heo 	}
6833f0d36efdSTejun Heo 
6834ecef7253STejun Heo 	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6835f0d36efdSTejun Heo 		host->ops->host_stop(host);
68361aa56ccaSTejun Heo 
68371aa506e4STejun Heo 	for (i = 0; i < host->n_ports; i++) {
68381aa506e4STejun Heo 		struct ata_port *ap = host->ports[i];
68391aa506e4STejun Heo 
68404911487aSTejun Heo 		if (!ap)
68414911487aSTejun Heo 			continue;
68424911487aSTejun Heo 
68434911487aSTejun Heo 		if (ap->scsi_host)
68441aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
68451aa506e4STejun Heo 
6846633273a3STejun Heo 		kfree(ap->pmp_link);
68474911487aSTejun Heo 		kfree(ap);
68481aa506e4STejun Heo 		host->ports[i] = NULL;
68491aa506e4STejun Heo 	}
68501aa506e4STejun Heo 
68511aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6852f0d36efdSTejun Heo }
6853f0d36efdSTejun Heo 
6854c6fd2807SJeff Garzik /**
6855f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6856f3187195STejun Heo  *	@dev: generic device this host is associated with
6857f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6858f3187195STejun Heo  *
6859f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6860f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6861f3187195STejun Heo  *	attaches it using ata_host_register().
6862f3187195STejun Heo  *
6863f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6864f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6865f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6866f3187195STejun Heo  *	ports will be automatically freed on registration.
6867f3187195STejun Heo  *
6868f3187195STejun Heo  *	RETURNS:
6869f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6870f3187195STejun Heo  *
6871f3187195STejun Heo  *	LOCKING:
6872f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6873f3187195STejun Heo  */
6874f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6875f3187195STejun Heo {
6876f3187195STejun Heo 	struct ata_host *host;
6877f3187195STejun Heo 	size_t sz;
6878f3187195STejun Heo 	int i;
6879f3187195STejun Heo 
6880f3187195STejun Heo 	DPRINTK("ENTER\n");
6881f3187195STejun Heo 
6882f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6883f3187195STejun Heo 		return NULL;
6884f3187195STejun Heo 
6885f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6886f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6887f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6888f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6889f3187195STejun Heo 	if (!host)
6890f3187195STejun Heo 		goto err_out;
6891f3187195STejun Heo 
6892f3187195STejun Heo 	devres_add(dev, host);
6893f3187195STejun Heo 	dev_set_drvdata(dev, host);
6894f3187195STejun Heo 
6895f3187195STejun Heo 	spin_lock_init(&host->lock);
6896f3187195STejun Heo 	host->dev = dev;
6897f3187195STejun Heo 	host->n_ports = max_ports;
6898f3187195STejun Heo 
6899f3187195STejun Heo 	/* allocate ports bound to this host */
6900f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6901f3187195STejun Heo 		struct ata_port *ap;
6902f3187195STejun Heo 
6903f3187195STejun Heo 		ap = ata_port_alloc(host);
6904f3187195STejun Heo 		if (!ap)
6905f3187195STejun Heo 			goto err_out;
6906f3187195STejun Heo 
6907f3187195STejun Heo 		ap->port_no = i;
6908f3187195STejun Heo 		host->ports[i] = ap;
6909f3187195STejun Heo 	}
6910f3187195STejun Heo 
6911f3187195STejun Heo 	devres_remove_group(dev, NULL);
6912f3187195STejun Heo 	return host;
6913f3187195STejun Heo 
6914f3187195STejun Heo  err_out:
6915f3187195STejun Heo 	devres_release_group(dev, NULL);
6916f3187195STejun Heo 	return NULL;
6917f3187195STejun Heo }
6918f3187195STejun Heo 
6919f3187195STejun Heo /**
6920f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6921f5cda257STejun Heo  *	@dev: generic device this host is associated with
6922f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6923f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6924f5cda257STejun Heo  *
6925f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6926f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6927f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6928f5cda257STejun Heo  *
6929f5cda257STejun Heo  *	RETURNS:
6930f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6931f5cda257STejun Heo  *
6932f5cda257STejun Heo  *	LOCKING:
6933f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6934f5cda257STejun Heo  */
6935f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6936f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6937f5cda257STejun Heo 				      int n_ports)
6938f5cda257STejun Heo {
6939f5cda257STejun Heo 	const struct ata_port_info *pi;
6940f5cda257STejun Heo 	struct ata_host *host;
6941f5cda257STejun Heo 	int i, j;
6942f5cda257STejun Heo 
6943f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6944f5cda257STejun Heo 	if (!host)
6945f5cda257STejun Heo 		return NULL;
6946f5cda257STejun Heo 
6947f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6948f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6949f5cda257STejun Heo 
6950f5cda257STejun Heo 		if (ppi[j])
6951f5cda257STejun Heo 			pi = ppi[j++];
6952f5cda257STejun Heo 
6953f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6954f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6955f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6956f5cda257STejun Heo 		ap->flags |= pi->flags;
69570c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6958f5cda257STejun Heo 		ap->ops = pi->port_ops;
6959f5cda257STejun Heo 
6960f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6961f5cda257STejun Heo 			host->ops = pi->port_ops;
6962f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6963f5cda257STejun Heo 			host->private_data = pi->private_data;
6964f5cda257STejun Heo 	}
6965f5cda257STejun Heo 
6966f5cda257STejun Heo 	return host;
6967f5cda257STejun Heo }
6968f5cda257STejun Heo 
6969f5cda257STejun Heo /**
6970ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6971ecef7253STejun Heo  *	@host: ATA host to start ports for
6972ecef7253STejun Heo  *
6973ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6974ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6975ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6976f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6977f3187195STejun Heo  *	first non-dummy port ops.
6978ecef7253STejun Heo  *
6979ecef7253STejun Heo  *	LOCKING:
6980ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6981ecef7253STejun Heo  *
6982ecef7253STejun Heo  *	RETURNS:
6983ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6984ecef7253STejun Heo  */
6985ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6986ecef7253STejun Heo {
6987ecef7253STejun Heo 	int i, rc;
6988ecef7253STejun Heo 
6989ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6990ecef7253STejun Heo 		return 0;
6991ecef7253STejun Heo 
6992ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6993ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6994ecef7253STejun Heo 
6995f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6996f3187195STejun Heo 			host->ops = ap->ops;
6997f3187195STejun Heo 
6998ecef7253STejun Heo 		if (ap->ops->port_start) {
6999ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
7000ecef7253STejun Heo 			if (rc) {
7001ecef7253STejun Heo 				ata_port_printk(ap, KERN_ERR, "failed to "
7002ecef7253STejun Heo 						"start port (errno=%d)\n", rc);
7003ecef7253STejun Heo 				goto err_out;
7004ecef7253STejun Heo 			}
7005ecef7253STejun Heo 		}
7006ecef7253STejun Heo 
7007ecef7253STejun Heo 		ata_eh_freeze_port(ap);
7008ecef7253STejun Heo 	}
7009ecef7253STejun Heo 
7010ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
7011ecef7253STejun Heo 	return 0;
7012ecef7253STejun Heo 
7013ecef7253STejun Heo  err_out:
7014ecef7253STejun Heo 	while (--i >= 0) {
7015ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
7016ecef7253STejun Heo 
7017ecef7253STejun Heo 		if (ap->ops->port_stop)
7018ecef7253STejun Heo 			ap->ops->port_stop(ap);
7019ecef7253STejun Heo 	}
7020ecef7253STejun Heo 	return rc;
7021ecef7253STejun Heo }
7022ecef7253STejun Heo 
7023ecef7253STejun Heo /**
7024cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
7025cca3974eSJeff Garzik  *	@host:	host to initialize
7026cca3974eSJeff Garzik  *	@dev:	device host is attached to
7027cca3974eSJeff Garzik  *	@flags:	host flags
7028c6fd2807SJeff Garzik  *	@ops:	port_ops
7029c6fd2807SJeff Garzik  *
7030c6fd2807SJeff Garzik  *	LOCKING:
7031c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
7032c6fd2807SJeff Garzik  *
7033c6fd2807SJeff Garzik  */
7034f3187195STejun Heo /* KILLME - the only user left is ipr */
7035cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
7036cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
7037c6fd2807SJeff Garzik {
7038cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
7039cca3974eSJeff Garzik 	host->dev = dev;
7040cca3974eSJeff Garzik 	host->flags = flags;
7041cca3974eSJeff Garzik 	host->ops = ops;
7042c6fd2807SJeff Garzik }
7043c6fd2807SJeff Garzik 
7044c6fd2807SJeff Garzik /**
7045f3187195STejun Heo  *	ata_host_register - register initialized ATA host
7046f3187195STejun Heo  *	@host: ATA host to register
7047f3187195STejun Heo  *	@sht: template for SCSI host
7048c6fd2807SJeff Garzik  *
7049f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
7050f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
7051f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
7052f3187195STejun Heo  *	probe registered devices.
7053c6fd2807SJeff Garzik  *
7054c6fd2807SJeff Garzik  *	LOCKING:
7055f3187195STejun Heo  *	Inherited from calling layer (may sleep).
7056c6fd2807SJeff Garzik  *
7057c6fd2807SJeff Garzik  *	RETURNS:
7058f3187195STejun Heo  *	0 on success, -errno otherwise.
7059c6fd2807SJeff Garzik  */
7060f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7061c6fd2807SJeff Garzik {
7062f3187195STejun Heo 	int i, rc;
7063c6fd2807SJeff Garzik 
7064f3187195STejun Heo 	/* host must have been started */
7065f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
7066f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
7067f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
7068f3187195STejun Heo 		WARN_ON(1);
7069f3187195STejun Heo 		return -EINVAL;
707002f076aaSAlan Cox 	}
7071f0d36efdSTejun Heo 
7072f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
7073f3187195STejun Heo 	 * determine the exact number of ports to allocate at
7074f3187195STejun Heo 	 * allocation time.
7075f3187195STejun Heo 	 */
7076f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
7077f3187195STejun Heo 		kfree(host->ports[i]);
7078f0d36efdSTejun Heo 
7079f3187195STejun Heo 	/* give ports names and add SCSI hosts */
7080f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
7081f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
7082c6fd2807SJeff Garzik 
7083f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
7084ecef7253STejun Heo 	if (rc)
7085f3187195STejun Heo 		return rc;
7086ecef7253STejun Heo 
7087fafbae87STejun Heo 	/* associate with ACPI nodes */
7088fafbae87STejun Heo 	ata_acpi_associate(host);
7089fafbae87STejun Heo 
7090f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
7091cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7092cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7093f3187195STejun Heo 		unsigned long xfer_mask;
7094f3187195STejun Heo 
7095f3187195STejun Heo 		/* set SATA cable type if still unset */
7096f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7097f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
7098c6fd2807SJeff Garzik 
7099c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
71004fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
7101c6fd2807SJeff Garzik 
7102cbcdd875STejun Heo 		/* print per-port info to dmesg */
7103f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7104f3187195STejun Heo 					      ap->udma_mask);
7105f3187195STejun Heo 
7106abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
7107cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
7108cbcdd875STejun Heo 					"%cATA max %s %s\n",
7109a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7110f3187195STejun Heo 					ata_mode_string(xfer_mask),
7111cbcdd875STejun Heo 					ap->link.eh_info.desc);
7112abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
7113abf6e8edSTejun Heo 		} else
7114f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7115c6fd2807SJeff Garzik 	}
7116c6fd2807SJeff Garzik 
7117f3187195STejun Heo 	/* perform each probe synchronously */
7118f3187195STejun Heo 	DPRINTK("probe begin\n");
7119f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
7120f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
7121f3187195STejun Heo 		int rc;
7122f3187195STejun Heo 
7123f3187195STejun Heo 		/* probe */
7124c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
71259af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
7126c6fd2807SJeff Garzik 			unsigned long flags;
7127c6fd2807SJeff Garzik 
7128c6fd2807SJeff Garzik 			ata_port_probe(ap);
7129c6fd2807SJeff Garzik 
7130c6fd2807SJeff Garzik 			/* kick EH for boot probing */
7131c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
7132c6fd2807SJeff Garzik 
7133f58229f8STejun Heo 			ehi->probe_mask =
7134f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
7135c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
7136c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7137c6fd2807SJeff Garzik 
7138f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7139c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
7140c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
7141c6fd2807SJeff Garzik 
7142c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
7143c6fd2807SJeff Garzik 
7144c6fd2807SJeff Garzik 			/* wait for EH to finish */
7145c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
7146c6fd2807SJeff Garzik 		} else {
714744877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7148c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
714944877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
7150c6fd2807SJeff Garzik 
7151c6fd2807SJeff Garzik 			if (rc) {
7152c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
7153c6fd2807SJeff Garzik 				 * Current libata behavior will
7154c6fd2807SJeff Garzik 				 * tear down everything when
7155c6fd2807SJeff Garzik 				 * the module is removed
7156c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
7157c6fd2807SJeff Garzik 				 */
7158c6fd2807SJeff Garzik 			}
7159c6fd2807SJeff Garzik 		}
7160c6fd2807SJeff Garzik 	}
7161c6fd2807SJeff Garzik 
7162c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
7163c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
7164cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7165cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7166c6fd2807SJeff Garzik 
71671ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
7168ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
7169c6fd2807SJeff Garzik 	}
7170c6fd2807SJeff Garzik 
7171f3187195STejun Heo 	return 0;
7172f3187195STejun Heo }
7173f3187195STejun Heo 
7174f3187195STejun Heo /**
7175f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
7176f5cda257STejun Heo  *	@host: target ATA host
7177f5cda257STejun Heo  *	@irq: IRQ to request
7178f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
7179f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
7180f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
7181f5cda257STejun Heo  *
7182f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
7183f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
7184f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
7185f5cda257STejun Heo  *	arguments and performs the three steps in one go.
7186f5cda257STejun Heo  *
71873d46b2e2SPaul Mundt  *	An invalid IRQ skips the IRQ registration and expects the host to
71883d46b2e2SPaul Mundt  *	have set polling mode on the port. In this case, @irq_handler
71893d46b2e2SPaul Mundt  *	should be NULL.
71903d46b2e2SPaul Mundt  *
7191f5cda257STejun Heo  *	LOCKING:
7192f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
7193f5cda257STejun Heo  *
7194f5cda257STejun Heo  *	RETURNS:
7195f5cda257STejun Heo  *	0 on success, -errno otherwise.
7196f5cda257STejun Heo  */
7197f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
7198f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
7199f5cda257STejun Heo 		      struct scsi_host_template *sht)
7200f5cda257STejun Heo {
7201cbcdd875STejun Heo 	int i, rc;
7202f5cda257STejun Heo 
7203f5cda257STejun Heo 	rc = ata_host_start(host);
7204f5cda257STejun Heo 	if (rc)
7205f5cda257STejun Heo 		return rc;
7206f5cda257STejun Heo 
72073d46b2e2SPaul Mundt 	/* Special case for polling mode */
72083d46b2e2SPaul Mundt 	if (!irq) {
72093d46b2e2SPaul Mundt 		WARN_ON(irq_handler);
72103d46b2e2SPaul Mundt 		return ata_host_register(host, sht);
72113d46b2e2SPaul Mundt 	}
72123d46b2e2SPaul Mundt 
7213f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7214f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
7215f5cda257STejun Heo 	if (rc)
7216f5cda257STejun Heo 		return rc;
7217f5cda257STejun Heo 
7218cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
7219cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
72204031826bSTejun Heo 
7221f5cda257STejun Heo 	rc = ata_host_register(host, sht);
7222f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
7223f5cda257STejun Heo 	if (rc)
7224f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
7225f5cda257STejun Heo 
7226f5cda257STejun Heo 	return rc;
7227f5cda257STejun Heo }
7228f5cda257STejun Heo 
7229f5cda257STejun Heo /**
7230c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
7231c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
7232c6fd2807SJeff Garzik  *
7233c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
7234c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
7235c6fd2807SJeff Garzik  *	be quiescent on return from this function.
7236c6fd2807SJeff Garzik  *
7237c6fd2807SJeff Garzik  *	LOCKING:
7238c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
7239c6fd2807SJeff Garzik  */
7240741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
7241c6fd2807SJeff Garzik {
7242c6fd2807SJeff Garzik 	unsigned long flags;
724341bda9c9STejun Heo 	struct ata_link *link;
7244f58229f8STejun Heo 	struct ata_device *dev;
7245c6fd2807SJeff Garzik 
7246c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7247c6fd2807SJeff Garzik 		goto skip_eh;
7248c6fd2807SJeff Garzik 
7249c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7250c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7251c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7252c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7253c6fd2807SJeff Garzik 
7254c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7255c6fd2807SJeff Garzik 
7256c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
7257c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
7258c6fd2807SJeff Garzik 	 */
7259c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7260c6fd2807SJeff Garzik 
726141bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
726241bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7263f58229f8STejun Heo 			ata_dev_disable(dev);
726441bda9c9STejun Heo 	}
7265c6fd2807SJeff Garzik 
7266c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7267c6fd2807SJeff Garzik 
7268c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7269c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7270c6fd2807SJeff Garzik 	 * target.
7271c6fd2807SJeff Garzik 	 */
7272c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7273c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7274c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7275c6fd2807SJeff Garzik 
7276c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
727745a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7278c6fd2807SJeff Garzik 
7279c6fd2807SJeff Garzik  skip_eh:
7280c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7281cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7282c6fd2807SJeff Garzik }
7283c6fd2807SJeff Garzik 
7284c6fd2807SJeff Garzik /**
72850529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
72860529c159STejun Heo  *	@host: Host to detach
72870529c159STejun Heo  *
72880529c159STejun Heo  *	Detach all ports of @host.
72890529c159STejun Heo  *
72900529c159STejun Heo  *	LOCKING:
72910529c159STejun Heo  *	Kernel thread context (may sleep).
72920529c159STejun Heo  */
72930529c159STejun Heo void ata_host_detach(struct ata_host *host)
72940529c159STejun Heo {
72950529c159STejun Heo 	int i;
72960529c159STejun Heo 
72970529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
72980529c159STejun Heo 		ata_port_detach(host->ports[i]);
72990529c159STejun Heo }
73000529c159STejun Heo 
7301c6fd2807SJeff Garzik /**
7302c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7303c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7304c6fd2807SJeff Garzik  *
7305c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7306c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7307c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7308c6fd2807SJeff Garzik  *	relative to cmd_addr.
7309c6fd2807SJeff Garzik  *
7310c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7311c6fd2807SJeff Garzik  */
7312c6fd2807SJeff Garzik 
7313c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7314c6fd2807SJeff Garzik {
7315c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7316c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7317c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7318c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7319c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7320c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7321c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7322c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7323c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7324c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7325c6fd2807SJeff Garzik }
7326c6fd2807SJeff Garzik 
7327c6fd2807SJeff Garzik 
7328c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7329c6fd2807SJeff Garzik 
7330c6fd2807SJeff Garzik /**
7331c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7332c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7333c6fd2807SJeff Garzik  *
7334b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7335b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7336b878ca5dSTejun Heo  *	release is handled via devres.
7337c6fd2807SJeff Garzik  *
7338c6fd2807SJeff Garzik  *	LOCKING:
7339c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7340c6fd2807SJeff Garzik  */
7341c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7342c6fd2807SJeff Garzik {
73432855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7344cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7345c6fd2807SJeff Garzik 
7346f0d36efdSTejun Heo 	ata_host_detach(host);
7347c6fd2807SJeff Garzik }
7348c6fd2807SJeff Garzik 
7349c6fd2807SJeff Garzik /* move to PCI subsystem */
7350c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7351c6fd2807SJeff Garzik {
7352c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7353c6fd2807SJeff Garzik 
7354c6fd2807SJeff Garzik 	switch (bits->width) {
7355c6fd2807SJeff Garzik 	case 1: {
7356c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7357c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7358c6fd2807SJeff Garzik 		tmp = tmp8;
7359c6fd2807SJeff Garzik 		break;
7360c6fd2807SJeff Garzik 	}
7361c6fd2807SJeff Garzik 	case 2: {
7362c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7363c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7364c6fd2807SJeff Garzik 		tmp = tmp16;
7365c6fd2807SJeff Garzik 		break;
7366c6fd2807SJeff Garzik 	}
7367c6fd2807SJeff Garzik 	case 4: {
7368c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7369c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7370c6fd2807SJeff Garzik 		tmp = tmp32;
7371c6fd2807SJeff Garzik 		break;
7372c6fd2807SJeff Garzik 	}
7373c6fd2807SJeff Garzik 
7374c6fd2807SJeff Garzik 	default:
7375c6fd2807SJeff Garzik 		return -EINVAL;
7376c6fd2807SJeff Garzik 	}
7377c6fd2807SJeff Garzik 
7378c6fd2807SJeff Garzik 	tmp &= bits->mask;
7379c6fd2807SJeff Garzik 
7380c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7381c6fd2807SJeff Garzik }
7382c6fd2807SJeff Garzik 
73836ffa01d8STejun Heo #ifdef CONFIG_PM
7384c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7385c6fd2807SJeff Garzik {
7386c6fd2807SJeff Garzik 	pci_save_state(pdev);
7387c6fd2807SJeff Garzik 	pci_disable_device(pdev);
73884c90d971STejun Heo 
73894c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7390c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7391c6fd2807SJeff Garzik }
7392c6fd2807SJeff Garzik 
7393553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7394c6fd2807SJeff Garzik {
7395553c4aa6STejun Heo 	int rc;
7396553c4aa6STejun Heo 
7397c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7398c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7399553c4aa6STejun Heo 
7400f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7401553c4aa6STejun Heo 	if (rc) {
7402553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7403553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7404553c4aa6STejun Heo 		return rc;
7405553c4aa6STejun Heo 	}
7406553c4aa6STejun Heo 
7407c6fd2807SJeff Garzik 	pci_set_master(pdev);
7408553c4aa6STejun Heo 	return 0;
7409c6fd2807SJeff Garzik }
7410c6fd2807SJeff Garzik 
7411c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7412c6fd2807SJeff Garzik {
7413cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7414c6fd2807SJeff Garzik 	int rc = 0;
7415c6fd2807SJeff Garzik 
7416cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7417c6fd2807SJeff Garzik 	if (rc)
7418c6fd2807SJeff Garzik 		return rc;
7419c6fd2807SJeff Garzik 
7420c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7421c6fd2807SJeff Garzik 
7422c6fd2807SJeff Garzik 	return 0;
7423c6fd2807SJeff Garzik }
7424c6fd2807SJeff Garzik 
7425c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7426c6fd2807SJeff Garzik {
7427cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7428553c4aa6STejun Heo 	int rc;
7429c6fd2807SJeff Garzik 
7430553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7431553c4aa6STejun Heo 	if (rc == 0)
7432cca3974eSJeff Garzik 		ata_host_resume(host);
7433553c4aa6STejun Heo 	return rc;
7434c6fd2807SJeff Garzik }
74356ffa01d8STejun Heo #endif /* CONFIG_PM */
74366ffa01d8STejun Heo 
7437c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7438c6fd2807SJeff Garzik 
7439c6fd2807SJeff Garzik 
7440c6fd2807SJeff Garzik static int __init ata_init(void)
7441c6fd2807SJeff Garzik {
7442c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7443c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7444c6fd2807SJeff Garzik 	if (!ata_wq)
7445c6fd2807SJeff Garzik 		return -ENOMEM;
7446c6fd2807SJeff Garzik 
7447c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7448c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7449c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7450c6fd2807SJeff Garzik 		return -ENOMEM;
7451c6fd2807SJeff Garzik 	}
7452c6fd2807SJeff Garzik 
7453c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7454c6fd2807SJeff Garzik 	return 0;
7455c6fd2807SJeff Garzik }
7456c6fd2807SJeff Garzik 
7457c6fd2807SJeff Garzik static void __exit ata_exit(void)
7458c6fd2807SJeff Garzik {
7459c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7460c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7461c6fd2807SJeff Garzik }
7462c6fd2807SJeff Garzik 
7463a4625085SBrian King subsys_initcall(ata_init);
7464c6fd2807SJeff Garzik module_exit(ata_exit);
7465c6fd2807SJeff Garzik 
7466c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7467c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7468c6fd2807SJeff Garzik 
7469c6fd2807SJeff Garzik int ata_ratelimit(void)
7470c6fd2807SJeff Garzik {
7471c6fd2807SJeff Garzik 	int rc;
7472c6fd2807SJeff Garzik 	unsigned long flags;
7473c6fd2807SJeff Garzik 
7474c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7475c6fd2807SJeff Garzik 
7476c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7477c6fd2807SJeff Garzik 		rc = 1;
7478c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7479c6fd2807SJeff Garzik 	} else
7480c6fd2807SJeff Garzik 		rc = 0;
7481c6fd2807SJeff Garzik 
7482c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7483c6fd2807SJeff Garzik 
7484c6fd2807SJeff Garzik 	return rc;
7485c6fd2807SJeff Garzik }
7486c6fd2807SJeff Garzik 
7487c6fd2807SJeff Garzik /**
7488c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7489c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7490c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7491c6fd2807SJeff Garzik  *	@val: Wait condition
7492c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7493c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7494c6fd2807SJeff Garzik  *
7495c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7496c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7497c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7498c6fd2807SJeff Garzik  *
7499c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7500c6fd2807SJeff Garzik  *
7501c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7502c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7503c6fd2807SJeff Garzik  *
7504c6fd2807SJeff Garzik  *	LOCKING:
7505c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7506c6fd2807SJeff Garzik  *
7507c6fd2807SJeff Garzik  *	RETURNS:
7508c6fd2807SJeff Garzik  *	The final register value.
7509c6fd2807SJeff Garzik  */
7510c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7511c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7512c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7513c6fd2807SJeff Garzik {
7514c6fd2807SJeff Garzik 	unsigned long timeout;
7515c6fd2807SJeff Garzik 	u32 tmp;
7516c6fd2807SJeff Garzik 
7517c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7518c6fd2807SJeff Garzik 
7519c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7520c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7521c6fd2807SJeff Garzik 	 * eat away the timeout.
7522c6fd2807SJeff Garzik 	 */
7523c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7524c6fd2807SJeff Garzik 
7525c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7526c6fd2807SJeff Garzik 		msleep(interval_msec);
7527c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7528c6fd2807SJeff Garzik 	}
7529c6fd2807SJeff Garzik 
7530c6fd2807SJeff Garzik 	return tmp;
7531c6fd2807SJeff Garzik }
7532c6fd2807SJeff Garzik 
7533c6fd2807SJeff Garzik /*
7534c6fd2807SJeff Garzik  * Dummy port_ops
7535c6fd2807SJeff Garzik  */
7536c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7537c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7538c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7539c6fd2807SJeff Garzik 
7540c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7541c6fd2807SJeff Garzik {
7542c6fd2807SJeff Garzik 	return ATA_DRDY;
7543c6fd2807SJeff Garzik }
7544c6fd2807SJeff Garzik 
7545c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7546c6fd2807SJeff Garzik {
7547c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7548c6fd2807SJeff Garzik }
7549c6fd2807SJeff Garzik 
7550c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7551c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7552c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7553c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7554c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7555c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7556c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7557c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7558c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7559c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7560c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7561c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7562c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7563c6fd2807SJeff Garzik };
7564c6fd2807SJeff Garzik 
756521b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
756621b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
756721b0ad4fSTejun Heo };
756821b0ad4fSTejun Heo 
7569c6fd2807SJeff Garzik /*
7570c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7571c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7572c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7573c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7574c6fd2807SJeff Garzik  */
7575c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7576c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7577c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7578c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
757921b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7580c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7581c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7582cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7583f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7584f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7585ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7586f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7587f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
75880529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7589c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7590c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7591c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7592c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7593c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7594c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7595c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7596c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7597c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7598c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
759943727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7600c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7601c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7602c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7603c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7604c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7605c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7606d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7607c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
760804351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
76090d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
76100d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
761131cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7612c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7613d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7614c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7615c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7616c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7617c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7618c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7619c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7620c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7621c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7622c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7623c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7624c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7625c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
762610305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7627c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7628936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7629936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7630c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_phy_reset);
7631c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(__sata_phy_reset);
7632c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7633c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7634c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7635cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7636c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7637c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7638c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7639c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7640c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7641c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7642c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7643c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
764488ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7645d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7646c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7647c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7648c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7649c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7650c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7651c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7652c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7653c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7654c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7655c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7656c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7657936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7658936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
76596ffa01d8STejun Heo #ifdef CONFIG_PM
7660cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7661cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
76626ffa01d8STejun Heo #endif /* CONFIG_PM */
7663c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7664c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
766510305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7666c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7667c6fd2807SJeff Garzik 
7668c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7669c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7670c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7671c6fd2807SJeff Garzik 
7672c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7673c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7674d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
76751626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7676d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7677c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7678c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
76796ffa01d8STejun Heo #ifdef CONFIG_PM
7680c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7681c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7682c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7683c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
76846ffa01d8STejun Heo #endif /* CONFIG_PM */
7685c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7686c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7687c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7688c6fd2807SJeff Garzik 
768931f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
76903af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
76913af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
76923af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
76933af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
76943af9a77aSTejun Heo 
7695b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7696b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7697b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7698cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7699cbcdd875STejun Heo #ifdef CONFIG_PCI
7700cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7701cbcdd875STejun Heo #endif /* CONFIG_PCI */
7702c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eng_timeout);
7703c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7704dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7705c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7706c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
77077d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7708c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7709c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7710c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7711c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7712c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
771383625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7714a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7715be0d18dfSAlan Cox 
7716be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7717be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7718be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7719be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7720