xref: /openbmc/linux/drivers/ata/libata-core.c (revision f7fe7ad4)
1c6fd2807SJeff Garzik /*
2c6fd2807SJeff Garzik  *  libata-core.c - helper library for ATA
3c6fd2807SJeff Garzik  *
4c6fd2807SJeff Garzik  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5c6fd2807SJeff Garzik  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6c6fd2807SJeff Garzik  *		    on emails.
7c6fd2807SJeff Garzik  *
8c6fd2807SJeff Garzik  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9c6fd2807SJeff Garzik  *  Copyright 2003-2004 Jeff Garzik
10c6fd2807SJeff Garzik  *
11c6fd2807SJeff Garzik  *
12c6fd2807SJeff Garzik  *  This program is free software; you can redistribute it and/or modify
13c6fd2807SJeff Garzik  *  it under the terms of the GNU General Public License as published by
14c6fd2807SJeff Garzik  *  the Free Software Foundation; either version 2, or (at your option)
15c6fd2807SJeff Garzik  *  any later version.
16c6fd2807SJeff Garzik  *
17c6fd2807SJeff Garzik  *  This program is distributed in the hope that it will be useful,
18c6fd2807SJeff Garzik  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19c6fd2807SJeff Garzik  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20c6fd2807SJeff Garzik  *  GNU General Public License for more details.
21c6fd2807SJeff Garzik  *
22c6fd2807SJeff Garzik  *  You should have received a copy of the GNU General Public License
23c6fd2807SJeff Garzik  *  along with this program; see the file COPYING.  If not, write to
24c6fd2807SJeff Garzik  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25c6fd2807SJeff Garzik  *
26c6fd2807SJeff Garzik  *
27c6fd2807SJeff Garzik  *  libata documentation is available via 'make {ps|pdf}docs',
28c6fd2807SJeff Garzik  *  as Documentation/DocBook/libata.*
29c6fd2807SJeff Garzik  *
30c6fd2807SJeff Garzik  *  Hardware documentation available from http://www.t13.org/ and
31c6fd2807SJeff Garzik  *  http://www.sata-io.org/
32c6fd2807SJeff Garzik  *
3392c52c52SAlan Cox  *  Standards documents from:
3492c52c52SAlan Cox  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
3592c52c52SAlan Cox  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
3692c52c52SAlan Cox  *	http://www.sata-io.org (SATA)
3792c52c52SAlan Cox  *	http://www.compactflash.org (CF)
3892c52c52SAlan Cox  *	http://www.qic.org (QIC157 - Tape and DSC)
3992c52c52SAlan Cox  *	http://www.ce-ata.org (CE-ATA: not supported)
4092c52c52SAlan Cox  *
41c6fd2807SJeff Garzik  */
42c6fd2807SJeff Garzik 
43c6fd2807SJeff Garzik #include <linux/kernel.h>
44c6fd2807SJeff Garzik #include <linux/module.h>
45c6fd2807SJeff Garzik #include <linux/pci.h>
46c6fd2807SJeff Garzik #include <linux/init.h>
47c6fd2807SJeff Garzik #include <linux/list.h>
48c6fd2807SJeff Garzik #include <linux/mm.h>
49c6fd2807SJeff Garzik #include <linux/highmem.h>
50c6fd2807SJeff Garzik #include <linux/spinlock.h>
51c6fd2807SJeff Garzik #include <linux/blkdev.h>
52c6fd2807SJeff Garzik #include <linux/delay.h>
53c6fd2807SJeff Garzik #include <linux/timer.h>
54c6fd2807SJeff Garzik #include <linux/interrupt.h>
55c6fd2807SJeff Garzik #include <linux/completion.h>
56c6fd2807SJeff Garzik #include <linux/suspend.h>
57c6fd2807SJeff Garzik #include <linux/workqueue.h>
58c6fd2807SJeff Garzik #include <linux/jiffies.h>
59c6fd2807SJeff Garzik #include <linux/scatterlist.h>
602dcb407eSJeff Garzik #include <linux/io.h>
61c6fd2807SJeff Garzik #include <scsi/scsi.h>
62c6fd2807SJeff Garzik #include <scsi/scsi_cmnd.h>
63c6fd2807SJeff Garzik #include <scsi/scsi_host.h>
64c6fd2807SJeff Garzik #include <linux/libata.h>
65c6fd2807SJeff Garzik #include <asm/semaphore.h>
66c6fd2807SJeff Garzik #include <asm/byteorder.h>
67c6fd2807SJeff Garzik 
68c6fd2807SJeff Garzik #include "libata.h"
69c6fd2807SJeff Garzik 
70fda0efc5SJeff Garzik 
71c6fd2807SJeff Garzik /* debounce timing parameters in msecs { interval, duration, timeout } */
72c6fd2807SJeff Garzik const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
73c6fd2807SJeff Garzik const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
74c6fd2807SJeff Garzik const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
75c6fd2807SJeff Garzik 
76c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
77c6fd2807SJeff Garzik 					u16 heads, u16 sectors);
78c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
79218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev,
80218f3d30SJeff Garzik 					u8 enable, u8 feature);
81c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev);
8275683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
83c6fd2807SJeff Garzik 
84f3187195STejun Heo unsigned int ata_print_id = 1;
85c6fd2807SJeff Garzik static struct workqueue_struct *ata_wq;
86c6fd2807SJeff Garzik 
87c6fd2807SJeff Garzik struct workqueue_struct *ata_aux_wq;
88c6fd2807SJeff Garzik 
89c6fd2807SJeff Garzik int atapi_enabled = 1;
90c6fd2807SJeff Garzik module_param(atapi_enabled, int, 0444);
91c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
92c6fd2807SJeff Garzik 
93c6fd2807SJeff Garzik int atapi_dmadir = 0;
94c6fd2807SJeff Garzik module_param(atapi_dmadir, int, 0444);
95c6fd2807SJeff Garzik MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
96c6fd2807SJeff Garzik 
97baf4fdfaSMark Lord int atapi_passthru16 = 1;
98baf4fdfaSMark Lord module_param(atapi_passthru16, int, 0444);
99baf4fdfaSMark Lord MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
100baf4fdfaSMark Lord 
101c6fd2807SJeff Garzik int libata_fua = 0;
102c6fd2807SJeff Garzik module_param_named(fua, libata_fua, int, 0444);
103c6fd2807SJeff Garzik MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
104c6fd2807SJeff Garzik 
1052dcb407eSJeff Garzik static int ata_ignore_hpa;
1061e999736SAlan Cox module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
1071e999736SAlan Cox MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
1081e999736SAlan Cox 
109b3a70601SAlan Cox static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
110b3a70601SAlan Cox module_param_named(dma, libata_dma_mask, int, 0444);
111b3a70601SAlan Cox MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
112b3a70601SAlan Cox 
113c6fd2807SJeff Garzik static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
114c6fd2807SJeff Garzik module_param(ata_probe_timeout, int, 0444);
115c6fd2807SJeff Garzik MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
116c6fd2807SJeff Garzik 
1176ebe9d86SJeff Garzik int libata_noacpi = 0;
118d7d0dad6SJeff Garzik module_param_named(noacpi, libata_noacpi, int, 0444);
1196ebe9d86SJeff Garzik MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
12011ef697bSKristen Carlson Accardi 
121c6fd2807SJeff Garzik MODULE_AUTHOR("Jeff Garzik");
122c6fd2807SJeff Garzik MODULE_DESCRIPTION("Library module for ATA devices");
123c6fd2807SJeff Garzik MODULE_LICENSE("GPL");
124c6fd2807SJeff Garzik MODULE_VERSION(DRV_VERSION);
125c6fd2807SJeff Garzik 
126c6fd2807SJeff Garzik 
127c6fd2807SJeff Garzik /**
128c6fd2807SJeff Garzik  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
129c6fd2807SJeff Garzik  *	@tf: Taskfile to convert
130c6fd2807SJeff Garzik  *	@pmp: Port multiplier port
1319977126cSTejun Heo  *	@is_cmd: This FIS is for command
1329977126cSTejun Heo  *	@fis: Buffer into which data will output
133c6fd2807SJeff Garzik  *
134c6fd2807SJeff Garzik  *	Converts a standard ATA taskfile to a Serial ATA
135c6fd2807SJeff Garzik  *	FIS structure (Register - Host to Device).
136c6fd2807SJeff Garzik  *
137c6fd2807SJeff Garzik  *	LOCKING:
138c6fd2807SJeff Garzik  *	Inherited from caller.
139c6fd2807SJeff Garzik  */
1409977126cSTejun Heo void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
141c6fd2807SJeff Garzik {
142c6fd2807SJeff Garzik 	fis[0] = 0x27;			/* Register - Host to Device FIS */
1439977126cSTejun Heo 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
1449977126cSTejun Heo 	if (is_cmd)
1459977126cSTejun Heo 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
1469977126cSTejun Heo 
147c6fd2807SJeff Garzik 	fis[2] = tf->command;
148c6fd2807SJeff Garzik 	fis[3] = tf->feature;
149c6fd2807SJeff Garzik 
150c6fd2807SJeff Garzik 	fis[4] = tf->lbal;
151c6fd2807SJeff Garzik 	fis[5] = tf->lbam;
152c6fd2807SJeff Garzik 	fis[6] = tf->lbah;
153c6fd2807SJeff Garzik 	fis[7] = tf->device;
154c6fd2807SJeff Garzik 
155c6fd2807SJeff Garzik 	fis[8] = tf->hob_lbal;
156c6fd2807SJeff Garzik 	fis[9] = tf->hob_lbam;
157c6fd2807SJeff Garzik 	fis[10] = tf->hob_lbah;
158c6fd2807SJeff Garzik 	fis[11] = tf->hob_feature;
159c6fd2807SJeff Garzik 
160c6fd2807SJeff Garzik 	fis[12] = tf->nsect;
161c6fd2807SJeff Garzik 	fis[13] = tf->hob_nsect;
162c6fd2807SJeff Garzik 	fis[14] = 0;
163c6fd2807SJeff Garzik 	fis[15] = tf->ctl;
164c6fd2807SJeff Garzik 
165c6fd2807SJeff Garzik 	fis[16] = 0;
166c6fd2807SJeff Garzik 	fis[17] = 0;
167c6fd2807SJeff Garzik 	fis[18] = 0;
168c6fd2807SJeff Garzik 	fis[19] = 0;
169c6fd2807SJeff Garzik }
170c6fd2807SJeff Garzik 
171c6fd2807SJeff Garzik /**
172c6fd2807SJeff Garzik  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
173c6fd2807SJeff Garzik  *	@fis: Buffer from which data will be input
174c6fd2807SJeff Garzik  *	@tf: Taskfile to output
175c6fd2807SJeff Garzik  *
176c6fd2807SJeff Garzik  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
177c6fd2807SJeff Garzik  *
178c6fd2807SJeff Garzik  *	LOCKING:
179c6fd2807SJeff Garzik  *	Inherited from caller.
180c6fd2807SJeff Garzik  */
181c6fd2807SJeff Garzik 
182c6fd2807SJeff Garzik void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
183c6fd2807SJeff Garzik {
184c6fd2807SJeff Garzik 	tf->command	= fis[2];	/* status */
185c6fd2807SJeff Garzik 	tf->feature	= fis[3];	/* error */
186c6fd2807SJeff Garzik 
187c6fd2807SJeff Garzik 	tf->lbal	= fis[4];
188c6fd2807SJeff Garzik 	tf->lbam	= fis[5];
189c6fd2807SJeff Garzik 	tf->lbah	= fis[6];
190c6fd2807SJeff Garzik 	tf->device	= fis[7];
191c6fd2807SJeff Garzik 
192c6fd2807SJeff Garzik 	tf->hob_lbal	= fis[8];
193c6fd2807SJeff Garzik 	tf->hob_lbam	= fis[9];
194c6fd2807SJeff Garzik 	tf->hob_lbah	= fis[10];
195c6fd2807SJeff Garzik 
196c6fd2807SJeff Garzik 	tf->nsect	= fis[12];
197c6fd2807SJeff Garzik 	tf->hob_nsect	= fis[13];
198c6fd2807SJeff Garzik }
199c6fd2807SJeff Garzik 
200c6fd2807SJeff Garzik static const u8 ata_rw_cmds[] = {
201c6fd2807SJeff Garzik 	/* pio multi */
202c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI,
203c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI,
204c6fd2807SJeff Garzik 	ATA_CMD_READ_MULTI_EXT,
205c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_EXT,
206c6fd2807SJeff Garzik 	0,
207c6fd2807SJeff Garzik 	0,
208c6fd2807SJeff Garzik 	0,
209c6fd2807SJeff Garzik 	ATA_CMD_WRITE_MULTI_FUA_EXT,
210c6fd2807SJeff Garzik 	/* pio */
211c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ,
212c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE,
213c6fd2807SJeff Garzik 	ATA_CMD_PIO_READ_EXT,
214c6fd2807SJeff Garzik 	ATA_CMD_PIO_WRITE_EXT,
215c6fd2807SJeff Garzik 	0,
216c6fd2807SJeff Garzik 	0,
217c6fd2807SJeff Garzik 	0,
218c6fd2807SJeff Garzik 	0,
219c6fd2807SJeff Garzik 	/* dma */
220c6fd2807SJeff Garzik 	ATA_CMD_READ,
221c6fd2807SJeff Garzik 	ATA_CMD_WRITE,
222c6fd2807SJeff Garzik 	ATA_CMD_READ_EXT,
223c6fd2807SJeff Garzik 	ATA_CMD_WRITE_EXT,
224c6fd2807SJeff Garzik 	0,
225c6fd2807SJeff Garzik 	0,
226c6fd2807SJeff Garzik 	0,
227c6fd2807SJeff Garzik 	ATA_CMD_WRITE_FUA_EXT
228c6fd2807SJeff Garzik };
229c6fd2807SJeff Garzik 
230c6fd2807SJeff Garzik /**
231c6fd2807SJeff Garzik  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
232bd056d7eSTejun Heo  *	@tf: command to examine and configure
233bd056d7eSTejun Heo  *	@dev: device tf belongs to
234c6fd2807SJeff Garzik  *
235c6fd2807SJeff Garzik  *	Examine the device configuration and tf->flags to calculate
236c6fd2807SJeff Garzik  *	the proper read/write commands and protocol to use.
237c6fd2807SJeff Garzik  *
238c6fd2807SJeff Garzik  *	LOCKING:
239c6fd2807SJeff Garzik  *	caller.
240c6fd2807SJeff Garzik  */
241bd056d7eSTejun Heo static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
242c6fd2807SJeff Garzik {
243c6fd2807SJeff Garzik 	u8 cmd;
244c6fd2807SJeff Garzik 
245c6fd2807SJeff Garzik 	int index, fua, lba48, write;
246c6fd2807SJeff Garzik 
247c6fd2807SJeff Garzik 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
248c6fd2807SJeff Garzik 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
249c6fd2807SJeff Garzik 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
250c6fd2807SJeff Garzik 
251c6fd2807SJeff Garzik 	if (dev->flags & ATA_DFLAG_PIO) {
252c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
253c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
2549af5c9c9STejun Heo 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
255c6fd2807SJeff Garzik 		/* Unable to use DMA due to host limitation */
256c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_PIO;
257c6fd2807SJeff Garzik 		index = dev->multi_count ? 0 : 8;
258c6fd2807SJeff Garzik 	} else {
259c6fd2807SJeff Garzik 		tf->protocol = ATA_PROT_DMA;
260c6fd2807SJeff Garzik 		index = 16;
261c6fd2807SJeff Garzik 	}
262c6fd2807SJeff Garzik 
263c6fd2807SJeff Garzik 	cmd = ata_rw_cmds[index + fua + lba48 + write];
264c6fd2807SJeff Garzik 	if (cmd) {
265c6fd2807SJeff Garzik 		tf->command = cmd;
266c6fd2807SJeff Garzik 		return 0;
267c6fd2807SJeff Garzik 	}
268c6fd2807SJeff Garzik 	return -1;
269c6fd2807SJeff Garzik }
270c6fd2807SJeff Garzik 
271c6fd2807SJeff Garzik /**
27235b649feSTejun Heo  *	ata_tf_read_block - Read block address from ATA taskfile
27335b649feSTejun Heo  *	@tf: ATA taskfile of interest
27435b649feSTejun Heo  *	@dev: ATA device @tf belongs to
27535b649feSTejun Heo  *
27635b649feSTejun Heo  *	LOCKING:
27735b649feSTejun Heo  *	None.
27835b649feSTejun Heo  *
27935b649feSTejun Heo  *	Read block address from @tf.  This function can handle all
28035b649feSTejun Heo  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
28135b649feSTejun Heo  *	flags select the address format to use.
28235b649feSTejun Heo  *
28335b649feSTejun Heo  *	RETURNS:
28435b649feSTejun Heo  *	Block address read from @tf.
28535b649feSTejun Heo  */
28635b649feSTejun Heo u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
28735b649feSTejun Heo {
28835b649feSTejun Heo 	u64 block = 0;
28935b649feSTejun Heo 
29035b649feSTejun Heo 	if (tf->flags & ATA_TFLAG_LBA) {
29135b649feSTejun Heo 		if (tf->flags & ATA_TFLAG_LBA48) {
29235b649feSTejun Heo 			block |= (u64)tf->hob_lbah << 40;
29335b649feSTejun Heo 			block |= (u64)tf->hob_lbam << 32;
29435b649feSTejun Heo 			block |= tf->hob_lbal << 24;
29535b649feSTejun Heo 		} else
29635b649feSTejun Heo 			block |= (tf->device & 0xf) << 24;
29735b649feSTejun Heo 
29835b649feSTejun Heo 		block |= tf->lbah << 16;
29935b649feSTejun Heo 		block |= tf->lbam << 8;
30035b649feSTejun Heo 		block |= tf->lbal;
30135b649feSTejun Heo 	} else {
30235b649feSTejun Heo 		u32 cyl, head, sect;
30335b649feSTejun Heo 
30435b649feSTejun Heo 		cyl = tf->lbam | (tf->lbah << 8);
30535b649feSTejun Heo 		head = tf->device & 0xf;
30635b649feSTejun Heo 		sect = tf->lbal;
30735b649feSTejun Heo 
30835b649feSTejun Heo 		block = (cyl * dev->heads + head) * dev->sectors + sect;
30935b649feSTejun Heo 	}
31035b649feSTejun Heo 
31135b649feSTejun Heo 	return block;
31235b649feSTejun Heo }
31335b649feSTejun Heo 
31435b649feSTejun Heo /**
315bd056d7eSTejun Heo  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
316bd056d7eSTejun Heo  *	@tf: Target ATA taskfile
317bd056d7eSTejun Heo  *	@dev: ATA device @tf belongs to
318bd056d7eSTejun Heo  *	@block: Block address
319bd056d7eSTejun Heo  *	@n_block: Number of blocks
320bd056d7eSTejun Heo  *	@tf_flags: RW/FUA etc...
321bd056d7eSTejun Heo  *	@tag: tag
322bd056d7eSTejun Heo  *
323bd056d7eSTejun Heo  *	LOCKING:
324bd056d7eSTejun Heo  *	None.
325bd056d7eSTejun Heo  *
326bd056d7eSTejun Heo  *	Build ATA taskfile @tf for read/write request described by
327bd056d7eSTejun Heo  *	@block, @n_block, @tf_flags and @tag on @dev.
328bd056d7eSTejun Heo  *
329bd056d7eSTejun Heo  *	RETURNS:
330bd056d7eSTejun Heo  *
331bd056d7eSTejun Heo  *	0 on success, -ERANGE if the request is too large for @dev,
332bd056d7eSTejun Heo  *	-EINVAL if the request is invalid.
333bd056d7eSTejun Heo  */
334bd056d7eSTejun Heo int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
335bd056d7eSTejun Heo 		    u64 block, u32 n_block, unsigned int tf_flags,
336bd056d7eSTejun Heo 		    unsigned int tag)
337bd056d7eSTejun Heo {
338bd056d7eSTejun Heo 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
339bd056d7eSTejun Heo 	tf->flags |= tf_flags;
340bd056d7eSTejun Heo 
3416d1245bfSTejun Heo 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
342bd056d7eSTejun Heo 		/* yay, NCQ */
343bd056d7eSTejun Heo 		if (!lba_48_ok(block, n_block))
344bd056d7eSTejun Heo 			return -ERANGE;
345bd056d7eSTejun Heo 
346bd056d7eSTejun Heo 		tf->protocol = ATA_PROT_NCQ;
347bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
348bd056d7eSTejun Heo 
349bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_WRITE)
350bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_WRITE;
351bd056d7eSTejun Heo 		else
352bd056d7eSTejun Heo 			tf->command = ATA_CMD_FPDMA_READ;
353bd056d7eSTejun Heo 
354bd056d7eSTejun Heo 		tf->nsect = tag << 3;
355bd056d7eSTejun Heo 		tf->hob_feature = (n_block >> 8) & 0xff;
356bd056d7eSTejun Heo 		tf->feature = n_block & 0xff;
357bd056d7eSTejun Heo 
358bd056d7eSTejun Heo 		tf->hob_lbah = (block >> 40) & 0xff;
359bd056d7eSTejun Heo 		tf->hob_lbam = (block >> 32) & 0xff;
360bd056d7eSTejun Heo 		tf->hob_lbal = (block >> 24) & 0xff;
361bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
362bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
363bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
364bd056d7eSTejun Heo 
365bd056d7eSTejun Heo 		tf->device = 1 << 6;
366bd056d7eSTejun Heo 		if (tf->flags & ATA_TFLAG_FUA)
367bd056d7eSTejun Heo 			tf->device |= 1 << 7;
368bd056d7eSTejun Heo 	} else if (dev->flags & ATA_DFLAG_LBA) {
369bd056d7eSTejun Heo 		tf->flags |= ATA_TFLAG_LBA;
370bd056d7eSTejun Heo 
371bd056d7eSTejun Heo 		if (lba_28_ok(block, n_block)) {
372bd056d7eSTejun Heo 			/* use LBA28 */
373bd056d7eSTejun Heo 			tf->device |= (block >> 24) & 0xf;
374bd056d7eSTejun Heo 		} else if (lba_48_ok(block, n_block)) {
375bd056d7eSTejun Heo 			if (!(dev->flags & ATA_DFLAG_LBA48))
376bd056d7eSTejun Heo 				return -ERANGE;
377bd056d7eSTejun Heo 
378bd056d7eSTejun Heo 			/* use LBA48 */
379bd056d7eSTejun Heo 			tf->flags |= ATA_TFLAG_LBA48;
380bd056d7eSTejun Heo 
381bd056d7eSTejun Heo 			tf->hob_nsect = (n_block >> 8) & 0xff;
382bd056d7eSTejun Heo 
383bd056d7eSTejun Heo 			tf->hob_lbah = (block >> 40) & 0xff;
384bd056d7eSTejun Heo 			tf->hob_lbam = (block >> 32) & 0xff;
385bd056d7eSTejun Heo 			tf->hob_lbal = (block >> 24) & 0xff;
386bd056d7eSTejun Heo 		} else
387bd056d7eSTejun Heo 			/* request too large even for LBA48 */
388bd056d7eSTejun Heo 			return -ERANGE;
389bd056d7eSTejun Heo 
390bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
391bd056d7eSTejun Heo 			return -EINVAL;
392bd056d7eSTejun Heo 
393bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff;
394bd056d7eSTejun Heo 
395bd056d7eSTejun Heo 		tf->lbah = (block >> 16) & 0xff;
396bd056d7eSTejun Heo 		tf->lbam = (block >> 8) & 0xff;
397bd056d7eSTejun Heo 		tf->lbal = block & 0xff;
398bd056d7eSTejun Heo 
399bd056d7eSTejun Heo 		tf->device |= ATA_LBA;
400bd056d7eSTejun Heo 	} else {
401bd056d7eSTejun Heo 		/* CHS */
402bd056d7eSTejun Heo 		u32 sect, head, cyl, track;
403bd056d7eSTejun Heo 
404bd056d7eSTejun Heo 		/* The request -may- be too large for CHS addressing. */
405bd056d7eSTejun Heo 		if (!lba_28_ok(block, n_block))
406bd056d7eSTejun Heo 			return -ERANGE;
407bd056d7eSTejun Heo 
408bd056d7eSTejun Heo 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
409bd056d7eSTejun Heo 			return -EINVAL;
410bd056d7eSTejun Heo 
411bd056d7eSTejun Heo 		/* Convert LBA to CHS */
412bd056d7eSTejun Heo 		track = (u32)block / dev->sectors;
413bd056d7eSTejun Heo 		cyl   = track / dev->heads;
414bd056d7eSTejun Heo 		head  = track % dev->heads;
415bd056d7eSTejun Heo 		sect  = (u32)block % dev->sectors + 1;
416bd056d7eSTejun Heo 
417bd056d7eSTejun Heo 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
418bd056d7eSTejun Heo 			(u32)block, track, cyl, head, sect);
419bd056d7eSTejun Heo 
420bd056d7eSTejun Heo 		/* Check whether the converted CHS can fit.
421bd056d7eSTejun Heo 		   Cylinder: 0-65535
422bd056d7eSTejun Heo 		   Head: 0-15
423bd056d7eSTejun Heo 		   Sector: 1-255*/
424bd056d7eSTejun Heo 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
425bd056d7eSTejun Heo 			return -ERANGE;
426bd056d7eSTejun Heo 
427bd056d7eSTejun Heo 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
428bd056d7eSTejun Heo 		tf->lbal = sect;
429bd056d7eSTejun Heo 		tf->lbam = cyl;
430bd056d7eSTejun Heo 		tf->lbah = cyl >> 8;
431bd056d7eSTejun Heo 		tf->device |= head;
432bd056d7eSTejun Heo 	}
433bd056d7eSTejun Heo 
434bd056d7eSTejun Heo 	return 0;
435bd056d7eSTejun Heo }
436bd056d7eSTejun Heo 
437bd056d7eSTejun Heo /**
438c6fd2807SJeff Garzik  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
439c6fd2807SJeff Garzik  *	@pio_mask: pio_mask
440c6fd2807SJeff Garzik  *	@mwdma_mask: mwdma_mask
441c6fd2807SJeff Garzik  *	@udma_mask: udma_mask
442c6fd2807SJeff Garzik  *
443c6fd2807SJeff Garzik  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
444c6fd2807SJeff Garzik  *	unsigned int xfer_mask.
445c6fd2807SJeff Garzik  *
446c6fd2807SJeff Garzik  *	LOCKING:
447c6fd2807SJeff Garzik  *	None.
448c6fd2807SJeff Garzik  *
449c6fd2807SJeff Garzik  *	RETURNS:
450c6fd2807SJeff Garzik  *	Packed xfer_mask.
451c6fd2807SJeff Garzik  */
452c6fd2807SJeff Garzik static unsigned int ata_pack_xfermask(unsigned int pio_mask,
453c6fd2807SJeff Garzik 				      unsigned int mwdma_mask,
454c6fd2807SJeff Garzik 				      unsigned int udma_mask)
455c6fd2807SJeff Garzik {
456c6fd2807SJeff Garzik 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
457c6fd2807SJeff Garzik 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
458c6fd2807SJeff Garzik 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
459c6fd2807SJeff Garzik }
460c6fd2807SJeff Garzik 
461c6fd2807SJeff Garzik /**
462c6fd2807SJeff Garzik  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
463c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask to unpack
464c6fd2807SJeff Garzik  *	@pio_mask: resulting pio_mask
465c6fd2807SJeff Garzik  *	@mwdma_mask: resulting mwdma_mask
466c6fd2807SJeff Garzik  *	@udma_mask: resulting udma_mask
467c6fd2807SJeff Garzik  *
468c6fd2807SJeff Garzik  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
469c6fd2807SJeff Garzik  *	Any NULL distination masks will be ignored.
470c6fd2807SJeff Garzik  */
471c6fd2807SJeff Garzik static void ata_unpack_xfermask(unsigned int xfer_mask,
472c6fd2807SJeff Garzik 				unsigned int *pio_mask,
473c6fd2807SJeff Garzik 				unsigned int *mwdma_mask,
474c6fd2807SJeff Garzik 				unsigned int *udma_mask)
475c6fd2807SJeff Garzik {
476c6fd2807SJeff Garzik 	if (pio_mask)
477c6fd2807SJeff Garzik 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
478c6fd2807SJeff Garzik 	if (mwdma_mask)
479c6fd2807SJeff Garzik 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
480c6fd2807SJeff Garzik 	if (udma_mask)
481c6fd2807SJeff Garzik 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
482c6fd2807SJeff Garzik }
483c6fd2807SJeff Garzik 
484c6fd2807SJeff Garzik static const struct ata_xfer_ent {
485c6fd2807SJeff Garzik 	int shift, bits;
486c6fd2807SJeff Garzik 	u8 base;
487c6fd2807SJeff Garzik } ata_xfer_tbl[] = {
488c6fd2807SJeff Garzik 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
489c6fd2807SJeff Garzik 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
490c6fd2807SJeff Garzik 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
491c6fd2807SJeff Garzik 	{ -1, },
492c6fd2807SJeff Garzik };
493c6fd2807SJeff Garzik 
494c6fd2807SJeff Garzik /**
495c6fd2807SJeff Garzik  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
496c6fd2807SJeff Garzik  *	@xfer_mask: xfer_mask of interest
497c6fd2807SJeff Garzik  *
498c6fd2807SJeff Garzik  *	Return matching XFER_* value for @xfer_mask.  Only the highest
499c6fd2807SJeff Garzik  *	bit of @xfer_mask is considered.
500c6fd2807SJeff Garzik  *
501c6fd2807SJeff Garzik  *	LOCKING:
502c6fd2807SJeff Garzik  *	None.
503c6fd2807SJeff Garzik  *
504c6fd2807SJeff Garzik  *	RETURNS:
505c6fd2807SJeff Garzik  *	Matching XFER_* value, 0 if no match found.
506c6fd2807SJeff Garzik  */
507c6fd2807SJeff Garzik static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
508c6fd2807SJeff Garzik {
509c6fd2807SJeff Garzik 	int highbit = fls(xfer_mask) - 1;
510c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
511c6fd2807SJeff Garzik 
512c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
513c6fd2807SJeff Garzik 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
514c6fd2807SJeff Garzik 			return ent->base + highbit - ent->shift;
515c6fd2807SJeff Garzik 	return 0;
516c6fd2807SJeff Garzik }
517c6fd2807SJeff Garzik 
518c6fd2807SJeff Garzik /**
519c6fd2807SJeff Garzik  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
520c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
521c6fd2807SJeff Garzik  *
522c6fd2807SJeff Garzik  *	Return matching xfer_mask for @xfer_mode.
523c6fd2807SJeff Garzik  *
524c6fd2807SJeff Garzik  *	LOCKING:
525c6fd2807SJeff Garzik  *	None.
526c6fd2807SJeff Garzik  *
527c6fd2807SJeff Garzik  *	RETURNS:
528c6fd2807SJeff Garzik  *	Matching xfer_mask, 0 if no match found.
529c6fd2807SJeff Garzik  */
530c6fd2807SJeff Garzik static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
531c6fd2807SJeff Garzik {
532c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
533c6fd2807SJeff Garzik 
534c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
535c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
536c6fd2807SJeff Garzik 			return 1 << (ent->shift + xfer_mode - ent->base);
537c6fd2807SJeff Garzik 	return 0;
538c6fd2807SJeff Garzik }
539c6fd2807SJeff Garzik 
540c6fd2807SJeff Garzik /**
541c6fd2807SJeff Garzik  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
542c6fd2807SJeff Garzik  *	@xfer_mode: XFER_* of interest
543c6fd2807SJeff Garzik  *
544c6fd2807SJeff Garzik  *	Return matching xfer_shift for @xfer_mode.
545c6fd2807SJeff Garzik  *
546c6fd2807SJeff Garzik  *	LOCKING:
547c6fd2807SJeff Garzik  *	None.
548c6fd2807SJeff Garzik  *
549c6fd2807SJeff Garzik  *	RETURNS:
550c6fd2807SJeff Garzik  *	Matching xfer_shift, -1 if no match found.
551c6fd2807SJeff Garzik  */
552c6fd2807SJeff Garzik static int ata_xfer_mode2shift(unsigned int xfer_mode)
553c6fd2807SJeff Garzik {
554c6fd2807SJeff Garzik 	const struct ata_xfer_ent *ent;
555c6fd2807SJeff Garzik 
556c6fd2807SJeff Garzik 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
557c6fd2807SJeff Garzik 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
558c6fd2807SJeff Garzik 			return ent->shift;
559c6fd2807SJeff Garzik 	return -1;
560c6fd2807SJeff Garzik }
561c6fd2807SJeff Garzik 
562c6fd2807SJeff Garzik /**
563c6fd2807SJeff Garzik  *	ata_mode_string - convert xfer_mask to string
564c6fd2807SJeff Garzik  *	@xfer_mask: mask of bits supported; only highest bit counts.
565c6fd2807SJeff Garzik  *
566c6fd2807SJeff Garzik  *	Determine string which represents the highest speed
567c6fd2807SJeff Garzik  *	(highest bit in @modemask).
568c6fd2807SJeff Garzik  *
569c6fd2807SJeff Garzik  *	LOCKING:
570c6fd2807SJeff Garzik  *	None.
571c6fd2807SJeff Garzik  *
572c6fd2807SJeff Garzik  *	RETURNS:
573c6fd2807SJeff Garzik  *	Constant C string representing highest speed listed in
574c6fd2807SJeff Garzik  *	@mode_mask, or the constant C string "<n/a>".
575c6fd2807SJeff Garzik  */
576c6fd2807SJeff Garzik static const char *ata_mode_string(unsigned int xfer_mask)
577c6fd2807SJeff Garzik {
578c6fd2807SJeff Garzik 	static const char * const xfer_mode_str[] = {
579c6fd2807SJeff Garzik 		"PIO0",
580c6fd2807SJeff Garzik 		"PIO1",
581c6fd2807SJeff Garzik 		"PIO2",
582c6fd2807SJeff Garzik 		"PIO3",
583c6fd2807SJeff Garzik 		"PIO4",
584b352e57dSAlan Cox 		"PIO5",
585b352e57dSAlan Cox 		"PIO6",
586c6fd2807SJeff Garzik 		"MWDMA0",
587c6fd2807SJeff Garzik 		"MWDMA1",
588c6fd2807SJeff Garzik 		"MWDMA2",
589b352e57dSAlan Cox 		"MWDMA3",
590b352e57dSAlan Cox 		"MWDMA4",
591c6fd2807SJeff Garzik 		"UDMA/16",
592c6fd2807SJeff Garzik 		"UDMA/25",
593c6fd2807SJeff Garzik 		"UDMA/33",
594c6fd2807SJeff Garzik 		"UDMA/44",
595c6fd2807SJeff Garzik 		"UDMA/66",
596c6fd2807SJeff Garzik 		"UDMA/100",
597c6fd2807SJeff Garzik 		"UDMA/133",
598c6fd2807SJeff Garzik 		"UDMA7",
599c6fd2807SJeff Garzik 	};
600c6fd2807SJeff Garzik 	int highbit;
601c6fd2807SJeff Garzik 
602c6fd2807SJeff Garzik 	highbit = fls(xfer_mask) - 1;
603c6fd2807SJeff Garzik 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
604c6fd2807SJeff Garzik 		return xfer_mode_str[highbit];
605c6fd2807SJeff Garzik 	return "<n/a>";
606c6fd2807SJeff Garzik }
607c6fd2807SJeff Garzik 
608c6fd2807SJeff Garzik static const char *sata_spd_string(unsigned int spd)
609c6fd2807SJeff Garzik {
610c6fd2807SJeff Garzik 	static const char * const spd_str[] = {
611c6fd2807SJeff Garzik 		"1.5 Gbps",
612c6fd2807SJeff Garzik 		"3.0 Gbps",
613c6fd2807SJeff Garzik 	};
614c6fd2807SJeff Garzik 
615c6fd2807SJeff Garzik 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
616c6fd2807SJeff Garzik 		return "<unknown>";
617c6fd2807SJeff Garzik 	return spd_str[spd - 1];
618c6fd2807SJeff Garzik }
619c6fd2807SJeff Garzik 
620c6fd2807SJeff Garzik void ata_dev_disable(struct ata_device *dev)
621c6fd2807SJeff Garzik {
62209d7f9b0STejun Heo 	if (ata_dev_enabled(dev)) {
6239af5c9c9STejun Heo 		if (ata_msg_drv(dev->link->ap))
624c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
6254ae72a1eSTejun Heo 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
6264ae72a1eSTejun Heo 					     ATA_DNXFER_QUIET);
627c6fd2807SJeff Garzik 		dev->class++;
628c6fd2807SJeff Garzik 	}
629c6fd2807SJeff Garzik }
630c6fd2807SJeff Garzik 
631ca77329fSKristen Carlson Accardi static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
632ca77329fSKristen Carlson Accardi {
633ca77329fSKristen Carlson Accardi 	struct ata_link *link = dev->link;
634ca77329fSKristen Carlson Accardi 	struct ata_port *ap = link->ap;
635ca77329fSKristen Carlson Accardi 	u32 scontrol;
636ca77329fSKristen Carlson Accardi 	unsigned int err_mask;
637ca77329fSKristen Carlson Accardi 	int rc;
638ca77329fSKristen Carlson Accardi 
639ca77329fSKristen Carlson Accardi 	/*
640ca77329fSKristen Carlson Accardi 	 * disallow DIPM for drivers which haven't set
641ca77329fSKristen Carlson Accardi 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
642ca77329fSKristen Carlson Accardi 	 * phy ready will be set in the interrupt status on
643ca77329fSKristen Carlson Accardi 	 * state changes, which will cause some drivers to
644ca77329fSKristen Carlson Accardi 	 * think there are errors - additionally drivers will
645ca77329fSKristen Carlson Accardi 	 * need to disable hot plug.
646ca77329fSKristen Carlson Accardi 	 */
647ca77329fSKristen Carlson Accardi 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
648ca77329fSKristen Carlson Accardi 		ap->pm_policy = NOT_AVAILABLE;
649ca77329fSKristen Carlson Accardi 		return -EINVAL;
650ca77329fSKristen Carlson Accardi 	}
651ca77329fSKristen Carlson Accardi 
652ca77329fSKristen Carlson Accardi 	/*
653ca77329fSKristen Carlson Accardi 	 * For DIPM, we will only enable it for the
654ca77329fSKristen Carlson Accardi 	 * min_power setting.
655ca77329fSKristen Carlson Accardi 	 *
656ca77329fSKristen Carlson Accardi 	 * Why?  Because Disks are too stupid to know that
657ca77329fSKristen Carlson Accardi 	 * If the host rejects a request to go to SLUMBER
658ca77329fSKristen Carlson Accardi 	 * they should retry at PARTIAL, and instead it
659ca77329fSKristen Carlson Accardi 	 * just would give up.  So, for medium_power to
660ca77329fSKristen Carlson Accardi 	 * work at all, we need to only allow HIPM.
661ca77329fSKristen Carlson Accardi 	 */
662ca77329fSKristen Carlson Accardi 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
663ca77329fSKristen Carlson Accardi 	if (rc)
664ca77329fSKristen Carlson Accardi 		return rc;
665ca77329fSKristen Carlson Accardi 
666ca77329fSKristen Carlson Accardi 	switch (policy) {
667ca77329fSKristen Carlson Accardi 	case MIN_POWER:
668ca77329fSKristen Carlson Accardi 		/* no restrictions on IPM transitions */
669ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x3 << 8);
670ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
671ca77329fSKristen Carlson Accardi 		if (rc)
672ca77329fSKristen Carlson Accardi 			return rc;
673ca77329fSKristen Carlson Accardi 
674ca77329fSKristen Carlson Accardi 		/* enable DIPM */
675ca77329fSKristen Carlson Accardi 		if (dev->flags & ATA_DFLAG_DIPM)
676ca77329fSKristen Carlson Accardi 			err_mask = ata_dev_set_feature(dev,
677ca77329fSKristen Carlson Accardi 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
678ca77329fSKristen Carlson Accardi 		break;
679ca77329fSKristen Carlson Accardi 	case MEDIUM_POWER:
680ca77329fSKristen Carlson Accardi 		/* allow IPM to PARTIAL */
681ca77329fSKristen Carlson Accardi 		scontrol &= ~(0x1 << 8);
682ca77329fSKristen Carlson Accardi 		scontrol |= (0x2 << 8);
683ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
684ca77329fSKristen Carlson Accardi 		if (rc)
685ca77329fSKristen Carlson Accardi 			return rc;
686ca77329fSKristen Carlson Accardi 
687f5456b63SKristen Carlson Accardi 		/*
688f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
689f5456b63SKristen Carlson Accardi 		 * disallow transitions to SLUMBER, which effectively
690f5456b63SKristen Carlson Accardi 		 * disable DIPM if it does not support PARTIAL
691f5456b63SKristen Carlson Accardi 		 */
692ca77329fSKristen Carlson Accardi 		break;
693ca77329fSKristen Carlson Accardi 	case NOT_AVAILABLE:
694ca77329fSKristen Carlson Accardi 	case MAX_PERFORMANCE:
695ca77329fSKristen Carlson Accardi 		/* disable all IPM transitions */
696ca77329fSKristen Carlson Accardi 		scontrol |= (0x3 << 8);
697ca77329fSKristen Carlson Accardi 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
698ca77329fSKristen Carlson Accardi 		if (rc)
699ca77329fSKristen Carlson Accardi 			return rc;
700ca77329fSKristen Carlson Accardi 
701f5456b63SKristen Carlson Accardi 		/*
702f5456b63SKristen Carlson Accardi 		 * we don't have to disable DIPM since IPM flags
703f5456b63SKristen Carlson Accardi 		 * disallow all transitions which effectively
704f5456b63SKristen Carlson Accardi 		 * disable DIPM anyway.
705f5456b63SKristen Carlson Accardi 		 */
706ca77329fSKristen Carlson Accardi 		break;
707ca77329fSKristen Carlson Accardi 	}
708ca77329fSKristen Carlson Accardi 
709ca77329fSKristen Carlson Accardi 	/* FIXME: handle SET FEATURES failure */
710ca77329fSKristen Carlson Accardi 	(void) err_mask;
711ca77329fSKristen Carlson Accardi 
712ca77329fSKristen Carlson Accardi 	return 0;
713ca77329fSKristen Carlson Accardi }
714ca77329fSKristen Carlson Accardi 
715ca77329fSKristen Carlson Accardi /**
716ca77329fSKristen Carlson Accardi  *	ata_dev_enable_pm - enable SATA interface power management
71748166fd9SStephen Hemminger  *	@dev:  device to enable power management
71848166fd9SStephen Hemminger  *	@policy: the link power management policy
719ca77329fSKristen Carlson Accardi  *
720ca77329fSKristen Carlson Accardi  *	Enable SATA Interface power management.  This will enable
721ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) for min_power
722ca77329fSKristen Carlson Accardi  * 	policy, and then call driver specific callbacks for
723ca77329fSKristen Carlson Accardi  *	enabling Host Initiated Power management.
724ca77329fSKristen Carlson Accardi  *
725ca77329fSKristen Carlson Accardi  *	Locking: Caller.
726ca77329fSKristen Carlson Accardi  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
727ca77329fSKristen Carlson Accardi  */
728ca77329fSKristen Carlson Accardi void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
729ca77329fSKristen Carlson Accardi {
730ca77329fSKristen Carlson Accardi 	int rc = 0;
731ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
732ca77329fSKristen Carlson Accardi 
733ca77329fSKristen Carlson Accardi 	/* set HIPM first, then DIPM */
734ca77329fSKristen Carlson Accardi 	if (ap->ops->enable_pm)
735ca77329fSKristen Carlson Accardi 		rc = ap->ops->enable_pm(ap, policy);
736ca77329fSKristen Carlson Accardi 	if (rc)
737ca77329fSKristen Carlson Accardi 		goto enable_pm_out;
738ca77329fSKristen Carlson Accardi 	rc = ata_dev_set_dipm(dev, policy);
739ca77329fSKristen Carlson Accardi 
740ca77329fSKristen Carlson Accardi enable_pm_out:
741ca77329fSKristen Carlson Accardi 	if (rc)
742ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
743ca77329fSKristen Carlson Accardi 	else
744ca77329fSKristen Carlson Accardi 		ap->pm_policy = policy;
745ca77329fSKristen Carlson Accardi 	return /* rc */;	/* hopefully we can use 'rc' eventually */
746ca77329fSKristen Carlson Accardi }
747ca77329fSKristen Carlson Accardi 
7481992a5edSStephen Rothwell #ifdef CONFIG_PM
749ca77329fSKristen Carlson Accardi /**
750ca77329fSKristen Carlson Accardi  *	ata_dev_disable_pm - disable SATA interface power management
75148166fd9SStephen Hemminger  *	@dev: device to disable power management
752ca77329fSKristen Carlson Accardi  *
753ca77329fSKristen Carlson Accardi  *	Disable SATA Interface power management.  This will disable
754ca77329fSKristen Carlson Accardi  *	Device Interface Power Management (DIPM) without changing
755ca77329fSKristen Carlson Accardi  * 	policy,  call driver specific callbacks for disabling Host
756ca77329fSKristen Carlson Accardi  * 	Initiated Power management.
757ca77329fSKristen Carlson Accardi  *
758ca77329fSKristen Carlson Accardi  *	Locking: Caller.
759ca77329fSKristen Carlson Accardi  *	Returns: void
760ca77329fSKristen Carlson Accardi  */
761ca77329fSKristen Carlson Accardi static void ata_dev_disable_pm(struct ata_device *dev)
762ca77329fSKristen Carlson Accardi {
763ca77329fSKristen Carlson Accardi 	struct ata_port *ap = dev->link->ap;
764ca77329fSKristen Carlson Accardi 
765ca77329fSKristen Carlson Accardi 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
766ca77329fSKristen Carlson Accardi 	if (ap->ops->disable_pm)
767ca77329fSKristen Carlson Accardi 		ap->ops->disable_pm(ap);
768ca77329fSKristen Carlson Accardi }
7691992a5edSStephen Rothwell #endif	/* CONFIG_PM */
770ca77329fSKristen Carlson Accardi 
771ca77329fSKristen Carlson Accardi void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
772ca77329fSKristen Carlson Accardi {
773ca77329fSKristen Carlson Accardi 	ap->pm_policy = policy;
774ca77329fSKristen Carlson Accardi 	ap->link.eh_info.action |= ATA_EHI_LPM;
775ca77329fSKristen Carlson Accardi 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
776ca77329fSKristen Carlson Accardi 	ata_port_schedule_eh(ap);
777ca77329fSKristen Carlson Accardi }
778ca77329fSKristen Carlson Accardi 
7791992a5edSStephen Rothwell #ifdef CONFIG_PM
780ca77329fSKristen Carlson Accardi static void ata_lpm_enable(struct ata_host *host)
781ca77329fSKristen Carlson Accardi {
782ca77329fSKristen Carlson Accardi 	struct ata_link *link;
783ca77329fSKristen Carlson Accardi 	struct ata_port *ap;
784ca77329fSKristen Carlson Accardi 	struct ata_device *dev;
785ca77329fSKristen Carlson Accardi 	int i;
786ca77329fSKristen Carlson Accardi 
787ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
788ca77329fSKristen Carlson Accardi 		ap = host->ports[i];
789ca77329fSKristen Carlson Accardi 		ata_port_for_each_link(link, ap) {
790ca77329fSKristen Carlson Accardi 			ata_link_for_each_dev(dev, link)
791ca77329fSKristen Carlson Accardi 				ata_dev_disable_pm(dev);
792ca77329fSKristen Carlson Accardi 		}
793ca77329fSKristen Carlson Accardi 	}
794ca77329fSKristen Carlson Accardi }
795ca77329fSKristen Carlson Accardi 
796ca77329fSKristen Carlson Accardi static void ata_lpm_disable(struct ata_host *host)
797ca77329fSKristen Carlson Accardi {
798ca77329fSKristen Carlson Accardi 	int i;
799ca77329fSKristen Carlson Accardi 
800ca77329fSKristen Carlson Accardi 	for (i = 0; i < host->n_ports; i++) {
801ca77329fSKristen Carlson Accardi 		struct ata_port *ap = host->ports[i];
802ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
803ca77329fSKristen Carlson Accardi 	}
804ca77329fSKristen Carlson Accardi }
8051992a5edSStephen Rothwell #endif	/* CONFIG_PM */
806ca77329fSKristen Carlson Accardi 
807ca77329fSKristen Carlson Accardi 
808c6fd2807SJeff Garzik /**
809c6fd2807SJeff Garzik  *	ata_devchk - PATA device presence detection
810c6fd2807SJeff Garzik  *	@ap: ATA channel to examine
811c6fd2807SJeff Garzik  *	@device: Device to examine (starting at zero)
812c6fd2807SJeff Garzik  *
8130d5ff566STejun Heo  *	This technique was originally described in
8140d5ff566STejun Heo  *	Hale Landis's ATADRVR (www.ata-atapi.com), and
8150d5ff566STejun Heo  *	later found its way into the ATA/ATAPI spec.
8160d5ff566STejun Heo  *
8170d5ff566STejun Heo  *	Write a pattern to the ATA shadow registers,
8180d5ff566STejun Heo  *	and if a device is present, it will respond by
8190d5ff566STejun Heo  *	correctly storing and echoing back the
8200d5ff566STejun Heo  *	ATA shadow register contents.
821c6fd2807SJeff Garzik  *
822c6fd2807SJeff Garzik  *	LOCKING:
823c6fd2807SJeff Garzik  *	caller.
824c6fd2807SJeff Garzik  */
825c6fd2807SJeff Garzik 
8260d5ff566STejun Heo static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
827c6fd2807SJeff Garzik {
8280d5ff566STejun Heo 	struct ata_ioports *ioaddr = &ap->ioaddr;
8290d5ff566STejun Heo 	u8 nsect, lbal;
8300d5ff566STejun Heo 
8310d5ff566STejun Heo 	ap->ops->dev_select(ap, device);
8320d5ff566STejun Heo 
8330d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8340d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8350d5ff566STejun Heo 
8360d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->nsect_addr);
8370d5ff566STejun Heo 	iowrite8(0x55, ioaddr->lbal_addr);
8380d5ff566STejun Heo 
8390d5ff566STejun Heo 	iowrite8(0x55, ioaddr->nsect_addr);
8400d5ff566STejun Heo 	iowrite8(0xaa, ioaddr->lbal_addr);
8410d5ff566STejun Heo 
8420d5ff566STejun Heo 	nsect = ioread8(ioaddr->nsect_addr);
8430d5ff566STejun Heo 	lbal = ioread8(ioaddr->lbal_addr);
8440d5ff566STejun Heo 
8450d5ff566STejun Heo 	if ((nsect == 0x55) && (lbal == 0xaa))
8460d5ff566STejun Heo 		return 1;	/* we found a device */
8470d5ff566STejun Heo 
8480d5ff566STejun Heo 	return 0;		/* nothing found */
849c6fd2807SJeff Garzik }
850c6fd2807SJeff Garzik 
851c6fd2807SJeff Garzik /**
852c6fd2807SJeff Garzik  *	ata_dev_classify - determine device type based on ATA-spec signature
853c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set for device to be identified
854c6fd2807SJeff Garzik  *
855c6fd2807SJeff Garzik  *	Determine from taskfile register contents whether a device is
856c6fd2807SJeff Garzik  *	ATA or ATAPI, as per "Signature and persistence" section
857c6fd2807SJeff Garzik  *	of ATA/PI spec (volume 1, sect 5.14).
858c6fd2807SJeff Garzik  *
859c6fd2807SJeff Garzik  *	LOCKING:
860c6fd2807SJeff Garzik  *	None.
861c6fd2807SJeff Garzik  *
862c6fd2807SJeff Garzik  *	RETURNS:
863633273a3STejun Heo  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
864633273a3STejun Heo  *	%ATA_DEV_UNKNOWN the event of failure.
865c6fd2807SJeff Garzik  */
866c6fd2807SJeff Garzik unsigned int ata_dev_classify(const struct ata_taskfile *tf)
867c6fd2807SJeff Garzik {
868c6fd2807SJeff Garzik 	/* Apple's open source Darwin code hints that some devices only
869c6fd2807SJeff Garzik 	 * put a proper signature into the LBA mid/high registers,
870c6fd2807SJeff Garzik 	 * So, we only check those.  It's sufficient for uniqueness.
871633273a3STejun Heo 	 *
872633273a3STejun Heo 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
873633273a3STejun Heo 	 * signatures for ATA and ATAPI devices attached on SerialATA,
874633273a3STejun Heo 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
875633273a3STejun Heo 	 * spec has never mentioned about using different signatures
876633273a3STejun Heo 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
877633273a3STejun Heo 	 * Multiplier specification began to use 0x69/0x96 to identify
878633273a3STejun Heo 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
879633273a3STejun Heo 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
880633273a3STejun Heo 	 * 0x69/0x96 shortly and described them as reserved for
881633273a3STejun Heo 	 * SerialATA.
882633273a3STejun Heo 	 *
883633273a3STejun Heo 	 * We follow the current spec and consider that 0x69/0x96
884633273a3STejun Heo 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
885c6fd2807SJeff Garzik 	 */
886633273a3STejun Heo 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
887c6fd2807SJeff Garzik 		DPRINTK("found ATA device by sig\n");
888c6fd2807SJeff Garzik 		return ATA_DEV_ATA;
889c6fd2807SJeff Garzik 	}
890c6fd2807SJeff Garzik 
891633273a3STejun Heo 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
892c6fd2807SJeff Garzik 		DPRINTK("found ATAPI device by sig\n");
893c6fd2807SJeff Garzik 		return ATA_DEV_ATAPI;
894c6fd2807SJeff Garzik 	}
895c6fd2807SJeff Garzik 
896633273a3STejun Heo 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
897633273a3STejun Heo 		DPRINTK("found PMP device by sig\n");
898633273a3STejun Heo 		return ATA_DEV_PMP;
899633273a3STejun Heo 	}
900633273a3STejun Heo 
901633273a3STejun Heo 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
9022dcb407eSJeff Garzik 		printk(KERN_INFO "ata: SEMB device ignored\n");
903633273a3STejun Heo 		return ATA_DEV_SEMB_UNSUP; /* not yet */
904633273a3STejun Heo 	}
905633273a3STejun Heo 
906c6fd2807SJeff Garzik 	DPRINTK("unknown device\n");
907c6fd2807SJeff Garzik 	return ATA_DEV_UNKNOWN;
908c6fd2807SJeff Garzik }
909c6fd2807SJeff Garzik 
910c6fd2807SJeff Garzik /**
911c6fd2807SJeff Garzik  *	ata_dev_try_classify - Parse returned ATA device signature
9123f19859eSTejun Heo  *	@dev: ATA device to classify (starting at zero)
9133f19859eSTejun Heo  *	@present: device seems present
914c6fd2807SJeff Garzik  *	@r_err: Value of error register on completion
915c6fd2807SJeff Garzik  *
916c6fd2807SJeff Garzik  *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
917c6fd2807SJeff Garzik  *	an ATA/ATAPI-defined set of values is placed in the ATA
918c6fd2807SJeff Garzik  *	shadow registers, indicating the results of device detection
919c6fd2807SJeff Garzik  *	and diagnostics.
920c6fd2807SJeff Garzik  *
921c6fd2807SJeff Garzik  *	Select the ATA device, and read the values from the ATA shadow
922c6fd2807SJeff Garzik  *	registers.  Then parse according to the Error register value,
923c6fd2807SJeff Garzik  *	and the spec-defined values examined by ata_dev_classify().
924c6fd2807SJeff Garzik  *
925c6fd2807SJeff Garzik  *	LOCKING:
926c6fd2807SJeff Garzik  *	caller.
927c6fd2807SJeff Garzik  *
928c6fd2807SJeff Garzik  *	RETURNS:
929c6fd2807SJeff Garzik  *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
930c6fd2807SJeff Garzik  */
9313f19859eSTejun Heo unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
9323f19859eSTejun Heo 				  u8 *r_err)
933c6fd2807SJeff Garzik {
9343f19859eSTejun Heo 	struct ata_port *ap = dev->link->ap;
935c6fd2807SJeff Garzik 	struct ata_taskfile tf;
936c6fd2807SJeff Garzik 	unsigned int class;
937c6fd2807SJeff Garzik 	u8 err;
938c6fd2807SJeff Garzik 
9393f19859eSTejun Heo 	ap->ops->dev_select(ap, dev->devno);
940c6fd2807SJeff Garzik 
941c6fd2807SJeff Garzik 	memset(&tf, 0, sizeof(tf));
942c6fd2807SJeff Garzik 
943c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &tf);
944c6fd2807SJeff Garzik 	err = tf.feature;
945c6fd2807SJeff Garzik 	if (r_err)
946c6fd2807SJeff Garzik 		*r_err = err;
947c6fd2807SJeff Garzik 
94893590859SAlan Cox 	/* see if device passed diags: if master then continue and warn later */
9493f19859eSTejun Heo 	if (err == 0 && dev->devno == 0)
95093590859SAlan Cox 		/* diagnostic fail : do nothing _YET_ */
9513f19859eSTejun Heo 		dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
95293590859SAlan Cox 	else if (err == 1)
953c6fd2807SJeff Garzik 		/* do nothing */ ;
9543f19859eSTejun Heo 	else if ((dev->devno == 0) && (err == 0x81))
955c6fd2807SJeff Garzik 		/* do nothing */ ;
956c6fd2807SJeff Garzik 	else
957c6fd2807SJeff Garzik 		return ATA_DEV_NONE;
958c6fd2807SJeff Garzik 
959c6fd2807SJeff Garzik 	/* determine if device is ATA or ATAPI */
960c6fd2807SJeff Garzik 	class = ata_dev_classify(&tf);
961c6fd2807SJeff Garzik 
962d7fbee05STejun Heo 	if (class == ATA_DEV_UNKNOWN) {
963d7fbee05STejun Heo 		/* If the device failed diagnostic, it's likely to
964d7fbee05STejun Heo 		 * have reported incorrect device signature too.
965d7fbee05STejun Heo 		 * Assume ATA device if the device seems present but
966d7fbee05STejun Heo 		 * device signature is invalid with diagnostic
967d7fbee05STejun Heo 		 * failure.
968d7fbee05STejun Heo 		 */
969d7fbee05STejun Heo 		if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
970d7fbee05STejun Heo 			class = ATA_DEV_ATA;
971d7fbee05STejun Heo 		else
972d7fbee05STejun Heo 			class = ATA_DEV_NONE;
973d7fbee05STejun Heo 	} else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
974d7fbee05STejun Heo 		class = ATA_DEV_NONE;
975d7fbee05STejun Heo 
976c6fd2807SJeff Garzik 	return class;
977c6fd2807SJeff Garzik }
978c6fd2807SJeff Garzik 
979c6fd2807SJeff Garzik /**
980c6fd2807SJeff Garzik  *	ata_id_string - Convert IDENTIFY DEVICE page into string
981c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
982c6fd2807SJeff Garzik  *	@s: string into which data is output
983c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
984c6fd2807SJeff Garzik  *	@len: length of string to return. must be an even number.
985c6fd2807SJeff Garzik  *
986c6fd2807SJeff Garzik  *	The strings in the IDENTIFY DEVICE page are broken up into
987c6fd2807SJeff Garzik  *	16-bit chunks.  Run through the string, and output each
988c6fd2807SJeff Garzik  *	8-bit chunk linearly, regardless of platform.
989c6fd2807SJeff Garzik  *
990c6fd2807SJeff Garzik  *	LOCKING:
991c6fd2807SJeff Garzik  *	caller.
992c6fd2807SJeff Garzik  */
993c6fd2807SJeff Garzik 
994c6fd2807SJeff Garzik void ata_id_string(const u16 *id, unsigned char *s,
995c6fd2807SJeff Garzik 		   unsigned int ofs, unsigned int len)
996c6fd2807SJeff Garzik {
997c6fd2807SJeff Garzik 	unsigned int c;
998c6fd2807SJeff Garzik 
999c6fd2807SJeff Garzik 	while (len > 0) {
1000c6fd2807SJeff Garzik 		c = id[ofs] >> 8;
1001c6fd2807SJeff Garzik 		*s = c;
1002c6fd2807SJeff Garzik 		s++;
1003c6fd2807SJeff Garzik 
1004c6fd2807SJeff Garzik 		c = id[ofs] & 0xff;
1005c6fd2807SJeff Garzik 		*s = c;
1006c6fd2807SJeff Garzik 		s++;
1007c6fd2807SJeff Garzik 
1008c6fd2807SJeff Garzik 		ofs++;
1009c6fd2807SJeff Garzik 		len -= 2;
1010c6fd2807SJeff Garzik 	}
1011c6fd2807SJeff Garzik }
1012c6fd2807SJeff Garzik 
1013c6fd2807SJeff Garzik /**
1014c6fd2807SJeff Garzik  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1015c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE results we will examine
1016c6fd2807SJeff Garzik  *	@s: string into which data is output
1017c6fd2807SJeff Garzik  *	@ofs: offset into identify device page
1018c6fd2807SJeff Garzik  *	@len: length of string to return. must be an odd number.
1019c6fd2807SJeff Garzik  *
1020c6fd2807SJeff Garzik  *	This function is identical to ata_id_string except that it
1021c6fd2807SJeff Garzik  *	trims trailing spaces and terminates the resulting string with
1022c6fd2807SJeff Garzik  *	null.  @len must be actual maximum length (even number) + 1.
1023c6fd2807SJeff Garzik  *
1024c6fd2807SJeff Garzik  *	LOCKING:
1025c6fd2807SJeff Garzik  *	caller.
1026c6fd2807SJeff Garzik  */
1027c6fd2807SJeff Garzik void ata_id_c_string(const u16 *id, unsigned char *s,
1028c6fd2807SJeff Garzik 		     unsigned int ofs, unsigned int len)
1029c6fd2807SJeff Garzik {
1030c6fd2807SJeff Garzik 	unsigned char *p;
1031c6fd2807SJeff Garzik 
1032c6fd2807SJeff Garzik 	WARN_ON(!(len & 1));
1033c6fd2807SJeff Garzik 
1034c6fd2807SJeff Garzik 	ata_id_string(id, s, ofs, len - 1);
1035c6fd2807SJeff Garzik 
1036c6fd2807SJeff Garzik 	p = s + strnlen(s, len - 1);
1037c6fd2807SJeff Garzik 	while (p > s && p[-1] == ' ')
1038c6fd2807SJeff Garzik 		p--;
1039c6fd2807SJeff Garzik 	*p = '\0';
1040c6fd2807SJeff Garzik }
1041c6fd2807SJeff Garzik 
1042db6f8759STejun Heo static u64 ata_id_n_sectors(const u16 *id)
1043db6f8759STejun Heo {
1044db6f8759STejun Heo 	if (ata_id_has_lba(id)) {
1045db6f8759STejun Heo 		if (ata_id_has_lba48(id))
1046db6f8759STejun Heo 			return ata_id_u64(id, 100);
1047db6f8759STejun Heo 		else
1048db6f8759STejun Heo 			return ata_id_u32(id, 60);
1049db6f8759STejun Heo 	} else {
1050db6f8759STejun Heo 		if (ata_id_current_chs_valid(id))
1051db6f8759STejun Heo 			return ata_id_u32(id, 57);
1052db6f8759STejun Heo 		else
1053db6f8759STejun Heo 			return id[1] * id[3] * id[6];
1054db6f8759STejun Heo 	}
1055db6f8759STejun Heo }
1056db6f8759STejun Heo 
10571e999736SAlan Cox static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
10581e999736SAlan Cox {
10591e999736SAlan Cox 	u64 sectors = 0;
10601e999736SAlan Cox 
10611e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
10621e999736SAlan Cox 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
10631e999736SAlan Cox 	sectors |= (tf->hob_lbal & 0xff) << 24;
10641e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10651e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10661e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10671e999736SAlan Cox 
10681e999736SAlan Cox 	return ++sectors;
10691e999736SAlan Cox }
10701e999736SAlan Cox 
10711e999736SAlan Cox static u64 ata_tf_to_lba(struct ata_taskfile *tf)
10721e999736SAlan Cox {
10731e999736SAlan Cox 	u64 sectors = 0;
10741e999736SAlan Cox 
10751e999736SAlan Cox 	sectors |= (tf->device & 0x0f) << 24;
10761e999736SAlan Cox 	sectors |= (tf->lbah & 0xff) << 16;
10771e999736SAlan Cox 	sectors |= (tf->lbam & 0xff) << 8;
10781e999736SAlan Cox 	sectors |= (tf->lbal & 0xff);
10791e999736SAlan Cox 
10801e999736SAlan Cox 	return ++sectors;
10811e999736SAlan Cox }
10821e999736SAlan Cox 
10831e999736SAlan Cox /**
1084c728a914STejun Heo  *	ata_read_native_max_address - Read native max address
1085c728a914STejun Heo  *	@dev: target device
1086c728a914STejun Heo  *	@max_sectors: out parameter for the result native max address
10871e999736SAlan Cox  *
1088c728a914STejun Heo  *	Perform an LBA48 or LBA28 native size query upon the device in
1089c728a914STejun Heo  *	question.
1090c728a914STejun Heo  *
1091c728a914STejun Heo  *	RETURNS:
1092c728a914STejun Heo  *	0 on success, -EACCES if command is aborted by the drive.
1093c728a914STejun Heo  *	-EIO on other errors.
10941e999736SAlan Cox  */
1095c728a914STejun Heo static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
10961e999736SAlan Cox {
1097c728a914STejun Heo 	unsigned int err_mask;
10981e999736SAlan Cox 	struct ata_taskfile tf;
1099c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11001e999736SAlan Cox 
11011e999736SAlan Cox 	ata_tf_init(dev, &tf);
11021e999736SAlan Cox 
1103c728a914STejun Heo 	/* always clear all address registers */
11041e999736SAlan Cox 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1105c728a914STejun Heo 
1106c728a914STejun Heo 	if (lba48) {
1107c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1108c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
1109c728a914STejun Heo 	} else
1110c728a914STejun Heo 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1111c728a914STejun Heo 
11121e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1113c728a914STejun Heo 	tf.device |= ATA_LBA;
11141e999736SAlan Cox 
11152b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1116c728a914STejun Heo 	if (err_mask) {
1117c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1118c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1119c728a914STejun Heo 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1120c728a914STejun Heo 			return -EACCES;
1121c728a914STejun Heo 		return -EIO;
1122c728a914STejun Heo 	}
1123c728a914STejun Heo 
1124c728a914STejun Heo 	if (lba48)
1125c728a914STejun Heo 		*max_sectors = ata_tf_to_lba48(&tf);
1126c728a914STejun Heo 	else
1127c728a914STejun Heo 		*max_sectors = ata_tf_to_lba(&tf);
112893328e11SAlan Cox 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
112993328e11SAlan Cox 		(*max_sectors)--;
11301e999736SAlan Cox 	return 0;
11311e999736SAlan Cox }
11321e999736SAlan Cox 
11331e999736SAlan Cox /**
1134c728a914STejun Heo  *	ata_set_max_sectors - Set max sectors
1135c728a914STejun Heo  *	@dev: target device
11366b38d1d1SRandy Dunlap  *	@new_sectors: new max sectors value to set for the device
11371e999736SAlan Cox  *
1138c728a914STejun Heo  *	Set max sectors of @dev to @new_sectors.
1139c728a914STejun Heo  *
1140c728a914STejun Heo  *	RETURNS:
1141c728a914STejun Heo  *	0 on success, -EACCES if command is aborted or denied (due to
1142c728a914STejun Heo  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1143c728a914STejun Heo  *	errors.
11441e999736SAlan Cox  */
114505027adcSTejun Heo static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
11461e999736SAlan Cox {
1147c728a914STejun Heo 	unsigned int err_mask;
11481e999736SAlan Cox 	struct ata_taskfile tf;
1149c728a914STejun Heo 	int lba48 = ata_id_has_lba48(dev->id);
11501e999736SAlan Cox 
11511e999736SAlan Cox 	new_sectors--;
11521e999736SAlan Cox 
11531e999736SAlan Cox 	ata_tf_init(dev, &tf);
11541e999736SAlan Cox 
1155c728a914STejun Heo 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
11561e999736SAlan Cox 
1157c728a914STejun Heo 	if (lba48) {
1158c728a914STejun Heo 		tf.command = ATA_CMD_SET_MAX_EXT;
1159c728a914STejun Heo 		tf.flags |= ATA_TFLAG_LBA48;
11601e999736SAlan Cox 
11611e999736SAlan Cox 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
11621e999736SAlan Cox 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
11631e999736SAlan Cox 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
11641e582ba4STejun Heo 	} else {
11651e999736SAlan Cox 		tf.command = ATA_CMD_SET_MAX;
1166c728a914STejun Heo 
11671e582ba4STejun Heo 		tf.device |= (new_sectors >> 24) & 0xf;
11681e582ba4STejun Heo 	}
11691e582ba4STejun Heo 
11701e999736SAlan Cox 	tf.protocol |= ATA_PROT_NODATA;
1171c728a914STejun Heo 	tf.device |= ATA_LBA;
11721e999736SAlan Cox 
11731e999736SAlan Cox 	tf.lbal = (new_sectors >> 0) & 0xff;
11741e999736SAlan Cox 	tf.lbam = (new_sectors >> 8) & 0xff;
11751e999736SAlan Cox 	tf.lbah = (new_sectors >> 16) & 0xff;
11761e999736SAlan Cox 
11772b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1178c728a914STejun Heo 	if (err_mask) {
1179c728a914STejun Heo 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1180c728a914STejun Heo 			       "max address (err_mask=0x%x)\n", err_mask);
1181c728a914STejun Heo 		if (err_mask == AC_ERR_DEV &&
1182c728a914STejun Heo 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1183c728a914STejun Heo 			return -EACCES;
1184c728a914STejun Heo 		return -EIO;
1185c728a914STejun Heo 	}
1186c728a914STejun Heo 
11871e999736SAlan Cox 	return 0;
11881e999736SAlan Cox }
11891e999736SAlan Cox 
11901e999736SAlan Cox /**
11911e999736SAlan Cox  *	ata_hpa_resize		-	Resize a device with an HPA set
11921e999736SAlan Cox  *	@dev: Device to resize
11931e999736SAlan Cox  *
11941e999736SAlan Cox  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
11951e999736SAlan Cox  *	it if required to the full size of the media. The caller must check
11961e999736SAlan Cox  *	the drive has the HPA feature set enabled.
119705027adcSTejun Heo  *
119805027adcSTejun Heo  *	RETURNS:
119905027adcSTejun Heo  *	0 on success, -errno on failure.
12001e999736SAlan Cox  */
120105027adcSTejun Heo static int ata_hpa_resize(struct ata_device *dev)
12021e999736SAlan Cox {
120305027adcSTejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
120405027adcSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
120505027adcSTejun Heo 	u64 sectors = ata_id_n_sectors(dev->id);
120605027adcSTejun Heo 	u64 native_sectors;
1207c728a914STejun Heo 	int rc;
12081e999736SAlan Cox 
120905027adcSTejun Heo 	/* do we need to do it? */
121005027adcSTejun Heo 	if (dev->class != ATA_DEV_ATA ||
121105027adcSTejun Heo 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
121205027adcSTejun Heo 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1213c728a914STejun Heo 		return 0;
12141e999736SAlan Cox 
121505027adcSTejun Heo 	/* read native max address */
121605027adcSTejun Heo 	rc = ata_read_native_max_address(dev, &native_sectors);
121705027adcSTejun Heo 	if (rc) {
121805027adcSTejun Heo 		/* If HPA isn't going to be unlocked, skip HPA
121905027adcSTejun Heo 		 * resizing from the next try.
122005027adcSTejun Heo 		 */
122105027adcSTejun Heo 		if (!ata_ignore_hpa) {
122205027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
122305027adcSTejun Heo 				       "broken, will skip HPA handling\n");
122405027adcSTejun Heo 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
122505027adcSTejun Heo 
122605027adcSTejun Heo 			/* we can continue if device aborted the command */
122705027adcSTejun Heo 			if (rc == -EACCES)
122805027adcSTejun Heo 				rc = 0;
122905027adcSTejun Heo 		}
123005027adcSTejun Heo 
123105027adcSTejun Heo 		return rc;
123205027adcSTejun Heo 	}
123305027adcSTejun Heo 
123405027adcSTejun Heo 	/* nothing to do? */
123505027adcSTejun Heo 	if (native_sectors <= sectors || !ata_ignore_hpa) {
123605027adcSTejun Heo 		if (!print_info || native_sectors == sectors)
123705027adcSTejun Heo 			return 0;
123805027adcSTejun Heo 
123905027adcSTejun Heo 		if (native_sectors > sectors)
12401e999736SAlan Cox 			ata_dev_printk(dev, KERN_INFO,
124105027adcSTejun Heo 				"HPA detected: current %llu, native %llu\n",
124205027adcSTejun Heo 				(unsigned long long)sectors,
124305027adcSTejun Heo 				(unsigned long long)native_sectors);
124405027adcSTejun Heo 		else if (native_sectors < sectors)
124505027adcSTejun Heo 			ata_dev_printk(dev, KERN_WARNING,
124605027adcSTejun Heo 				"native sectors (%llu) is smaller than "
124705027adcSTejun Heo 				"sectors (%llu)\n",
124805027adcSTejun Heo 				(unsigned long long)native_sectors,
124905027adcSTejun Heo 				(unsigned long long)sectors);
125005027adcSTejun Heo 		return 0;
12511e999736SAlan Cox 	}
125237301a55STejun Heo 
125305027adcSTejun Heo 	/* let's unlock HPA */
125405027adcSTejun Heo 	rc = ata_set_max_sectors(dev, native_sectors);
125505027adcSTejun Heo 	if (rc == -EACCES) {
125605027adcSTejun Heo 		/* if device aborted the command, skip HPA resizing */
125705027adcSTejun Heo 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
125805027adcSTejun Heo 			       "(%llu -> %llu), skipping HPA handling\n",
125905027adcSTejun Heo 			       (unsigned long long)sectors,
126005027adcSTejun Heo 			       (unsigned long long)native_sectors);
126105027adcSTejun Heo 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
126205027adcSTejun Heo 		return 0;
126305027adcSTejun Heo 	} else if (rc)
126405027adcSTejun Heo 		return rc;
126505027adcSTejun Heo 
126605027adcSTejun Heo 	/* re-read IDENTIFY data */
126705027adcSTejun Heo 	rc = ata_dev_reread_id(dev, 0);
126805027adcSTejun Heo 	if (rc) {
126905027adcSTejun Heo 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
127005027adcSTejun Heo 			       "data after HPA resizing\n");
127105027adcSTejun Heo 		return rc;
127205027adcSTejun Heo 	}
127305027adcSTejun Heo 
127405027adcSTejun Heo 	if (print_info) {
127505027adcSTejun Heo 		u64 new_sectors = ata_id_n_sectors(dev->id);
127605027adcSTejun Heo 		ata_dev_printk(dev, KERN_INFO,
127705027adcSTejun Heo 			"HPA unlocked: %llu -> %llu, native %llu\n",
127805027adcSTejun Heo 			(unsigned long long)sectors,
127905027adcSTejun Heo 			(unsigned long long)new_sectors,
128005027adcSTejun Heo 			(unsigned long long)native_sectors);
128105027adcSTejun Heo 	}
128205027adcSTejun Heo 
128305027adcSTejun Heo 	return 0;
12841e999736SAlan Cox }
12851e999736SAlan Cox 
1286c6fd2807SJeff Garzik /**
128710305f0fSAlan  *	ata_id_to_dma_mode	-	Identify DMA mode from id block
128810305f0fSAlan  *	@dev: device to identify
1289cc261267SRandy Dunlap  *	@unknown: mode to assume if we cannot tell
129010305f0fSAlan  *
129110305f0fSAlan  *	Set up the timing values for the device based upon the identify
129210305f0fSAlan  *	reported values for the DMA mode. This function is used by drivers
129310305f0fSAlan  *	which rely upon firmware configured modes, but wish to report the
129410305f0fSAlan  *	mode correctly when possible.
129510305f0fSAlan  *
129610305f0fSAlan  *	In addition we emit similarly formatted messages to the default
129710305f0fSAlan  *	ata_dev_set_mode handler, in order to provide consistency of
129810305f0fSAlan  *	presentation.
129910305f0fSAlan  */
130010305f0fSAlan 
130110305f0fSAlan void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
130210305f0fSAlan {
130310305f0fSAlan 	unsigned int mask;
130410305f0fSAlan 	u8 mode;
130510305f0fSAlan 
130610305f0fSAlan 	/* Pack the DMA modes */
130710305f0fSAlan 	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
130810305f0fSAlan 	if (dev->id[53] & 0x04)
130910305f0fSAlan 		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
131010305f0fSAlan 
131110305f0fSAlan 	/* Select the mode in use */
131210305f0fSAlan 	mode = ata_xfer_mask2mode(mask);
131310305f0fSAlan 
131410305f0fSAlan 	if (mode != 0) {
131510305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
131610305f0fSAlan 		       ata_mode_string(mask));
131710305f0fSAlan 	} else {
131810305f0fSAlan 		/* SWDMA perhaps ? */
131910305f0fSAlan 		mode = unknown;
132010305f0fSAlan 		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
132110305f0fSAlan 	}
132210305f0fSAlan 
132310305f0fSAlan 	/* Configure the device reporting */
132410305f0fSAlan 	dev->xfer_mode = mode;
132510305f0fSAlan 	dev->xfer_shift = ata_xfer_mode2shift(mode);
132610305f0fSAlan }
132710305f0fSAlan 
132810305f0fSAlan /**
1329c6fd2807SJeff Garzik  *	ata_noop_dev_select - Select device 0/1 on ATA bus
1330c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1331c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1332c6fd2807SJeff Garzik  *
1333c6fd2807SJeff Garzik  *	This function performs no actual function.
1334c6fd2807SJeff Garzik  *
1335c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1336c6fd2807SJeff Garzik  *
1337c6fd2807SJeff Garzik  *	LOCKING:
1338c6fd2807SJeff Garzik  *	caller.
1339c6fd2807SJeff Garzik  */
1340c6fd2807SJeff Garzik void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1341c6fd2807SJeff Garzik {
1342c6fd2807SJeff Garzik }
1343c6fd2807SJeff Garzik 
1344c6fd2807SJeff Garzik 
1345c6fd2807SJeff Garzik /**
1346c6fd2807SJeff Garzik  *	ata_std_dev_select - Select device 0/1 on ATA bus
1347c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1348c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1349c6fd2807SJeff Garzik  *
1350c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1351c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1352c6fd2807SJeff Garzik  *	ATA channel.  Works with both PIO and MMIO.
1353c6fd2807SJeff Garzik  *
1354c6fd2807SJeff Garzik  *	May be used as the dev_select() entry in ata_port_operations.
1355c6fd2807SJeff Garzik  *
1356c6fd2807SJeff Garzik  *	LOCKING:
1357c6fd2807SJeff Garzik  *	caller.
1358c6fd2807SJeff Garzik  */
1359c6fd2807SJeff Garzik 
1360c6fd2807SJeff Garzik void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1361c6fd2807SJeff Garzik {
1362c6fd2807SJeff Garzik 	u8 tmp;
1363c6fd2807SJeff Garzik 
1364c6fd2807SJeff Garzik 	if (device == 0)
1365c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS;
1366c6fd2807SJeff Garzik 	else
1367c6fd2807SJeff Garzik 		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1368c6fd2807SJeff Garzik 
13690d5ff566STejun Heo 	iowrite8(tmp, ap->ioaddr.device_addr);
1370c6fd2807SJeff Garzik 	ata_pause(ap);		/* needed; also flushes, for mmio */
1371c6fd2807SJeff Garzik }
1372c6fd2807SJeff Garzik 
1373c6fd2807SJeff Garzik /**
1374c6fd2807SJeff Garzik  *	ata_dev_select - Select device 0/1 on ATA bus
1375c6fd2807SJeff Garzik  *	@ap: ATA channel to manipulate
1376c6fd2807SJeff Garzik  *	@device: ATA device (numbered from zero) to select
1377c6fd2807SJeff Garzik  *	@wait: non-zero to wait for Status register BSY bit to clear
1378c6fd2807SJeff Garzik  *	@can_sleep: non-zero if context allows sleeping
1379c6fd2807SJeff Garzik  *
1380c6fd2807SJeff Garzik  *	Use the method defined in the ATA specification to
1381c6fd2807SJeff Garzik  *	make either device 0, or device 1, active on the
1382c6fd2807SJeff Garzik  *	ATA channel.
1383c6fd2807SJeff Garzik  *
1384c6fd2807SJeff Garzik  *	This is a high-level version of ata_std_dev_select(),
1385c6fd2807SJeff Garzik  *	which additionally provides the services of inserting
1386c6fd2807SJeff Garzik  *	the proper pauses and status polling, where needed.
1387c6fd2807SJeff Garzik  *
1388c6fd2807SJeff Garzik  *	LOCKING:
1389c6fd2807SJeff Garzik  *	caller.
1390c6fd2807SJeff Garzik  */
1391c6fd2807SJeff Garzik 
1392c6fd2807SJeff Garzik void ata_dev_select(struct ata_port *ap, unsigned int device,
1393c6fd2807SJeff Garzik 			   unsigned int wait, unsigned int can_sleep)
1394c6fd2807SJeff Garzik {
1395c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
139644877b4eSTejun Heo 		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
139744877b4eSTejun Heo 				"device %u, wait %u\n", device, wait);
1398c6fd2807SJeff Garzik 
1399c6fd2807SJeff Garzik 	if (wait)
1400c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1401c6fd2807SJeff Garzik 
1402c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, device);
1403c6fd2807SJeff Garzik 
1404c6fd2807SJeff Garzik 	if (wait) {
14059af5c9c9STejun Heo 		if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1406c6fd2807SJeff Garzik 			msleep(150);
1407c6fd2807SJeff Garzik 		ata_wait_idle(ap);
1408c6fd2807SJeff Garzik 	}
1409c6fd2807SJeff Garzik }
1410c6fd2807SJeff Garzik 
1411c6fd2807SJeff Garzik /**
1412c6fd2807SJeff Garzik  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1413c6fd2807SJeff Garzik  *	@id: IDENTIFY DEVICE page to dump
1414c6fd2807SJeff Garzik  *
1415c6fd2807SJeff Garzik  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1416c6fd2807SJeff Garzik  *	page.
1417c6fd2807SJeff Garzik  *
1418c6fd2807SJeff Garzik  *	LOCKING:
1419c6fd2807SJeff Garzik  *	caller.
1420c6fd2807SJeff Garzik  */
1421c6fd2807SJeff Garzik 
1422c6fd2807SJeff Garzik static inline void ata_dump_id(const u16 *id)
1423c6fd2807SJeff Garzik {
1424c6fd2807SJeff Garzik 	DPRINTK("49==0x%04x  "
1425c6fd2807SJeff Garzik 		"53==0x%04x  "
1426c6fd2807SJeff Garzik 		"63==0x%04x  "
1427c6fd2807SJeff Garzik 		"64==0x%04x  "
1428c6fd2807SJeff Garzik 		"75==0x%04x  \n",
1429c6fd2807SJeff Garzik 		id[49],
1430c6fd2807SJeff Garzik 		id[53],
1431c6fd2807SJeff Garzik 		id[63],
1432c6fd2807SJeff Garzik 		id[64],
1433c6fd2807SJeff Garzik 		id[75]);
1434c6fd2807SJeff Garzik 	DPRINTK("80==0x%04x  "
1435c6fd2807SJeff Garzik 		"81==0x%04x  "
1436c6fd2807SJeff Garzik 		"82==0x%04x  "
1437c6fd2807SJeff Garzik 		"83==0x%04x  "
1438c6fd2807SJeff Garzik 		"84==0x%04x  \n",
1439c6fd2807SJeff Garzik 		id[80],
1440c6fd2807SJeff Garzik 		id[81],
1441c6fd2807SJeff Garzik 		id[82],
1442c6fd2807SJeff Garzik 		id[83],
1443c6fd2807SJeff Garzik 		id[84]);
1444c6fd2807SJeff Garzik 	DPRINTK("88==0x%04x  "
1445c6fd2807SJeff Garzik 		"93==0x%04x\n",
1446c6fd2807SJeff Garzik 		id[88],
1447c6fd2807SJeff Garzik 		id[93]);
1448c6fd2807SJeff Garzik }
1449c6fd2807SJeff Garzik 
1450c6fd2807SJeff Garzik /**
1451c6fd2807SJeff Garzik  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1452c6fd2807SJeff Garzik  *	@id: IDENTIFY data to compute xfer mask from
1453c6fd2807SJeff Garzik  *
1454c6fd2807SJeff Garzik  *	Compute the xfermask for this device. This is not as trivial
1455c6fd2807SJeff Garzik  *	as it seems if we must consider early devices correctly.
1456c6fd2807SJeff Garzik  *
1457c6fd2807SJeff Garzik  *	FIXME: pre IDE drive timing (do we care ?).
1458c6fd2807SJeff Garzik  *
1459c6fd2807SJeff Garzik  *	LOCKING:
1460c6fd2807SJeff Garzik  *	None.
1461c6fd2807SJeff Garzik  *
1462c6fd2807SJeff Garzik  *	RETURNS:
1463c6fd2807SJeff Garzik  *	Computed xfermask
1464c6fd2807SJeff Garzik  */
1465c6fd2807SJeff Garzik static unsigned int ata_id_xfermask(const u16 *id)
1466c6fd2807SJeff Garzik {
1467c6fd2807SJeff Garzik 	unsigned int pio_mask, mwdma_mask, udma_mask;
1468c6fd2807SJeff Garzik 
1469c6fd2807SJeff Garzik 	/* Usual case. Word 53 indicates word 64 is valid */
1470c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1471c6fd2807SJeff Garzik 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1472c6fd2807SJeff Garzik 		pio_mask <<= 3;
1473c6fd2807SJeff Garzik 		pio_mask |= 0x7;
1474c6fd2807SJeff Garzik 	} else {
1475c6fd2807SJeff Garzik 		/* If word 64 isn't valid then Word 51 high byte holds
1476c6fd2807SJeff Garzik 		 * the PIO timing number for the maximum. Turn it into
1477c6fd2807SJeff Garzik 		 * a mask.
1478c6fd2807SJeff Garzik 		 */
14797a0f1c8aSLennert Buytenhek 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
148046767aebSAlan Cox 		if (mode < 5)	/* Valid PIO range */
148146767aebSAlan Cox 			pio_mask = (2 << mode) - 1;
148246767aebSAlan Cox 		else
148346767aebSAlan Cox 			pio_mask = 1;
1484c6fd2807SJeff Garzik 
1485c6fd2807SJeff Garzik 		/* But wait.. there's more. Design your standards by
1486c6fd2807SJeff Garzik 		 * committee and you too can get a free iordy field to
1487c6fd2807SJeff Garzik 		 * process. However its the speeds not the modes that
1488c6fd2807SJeff Garzik 		 * are supported... Note drivers using the timing API
1489c6fd2807SJeff Garzik 		 * will get this right anyway
1490c6fd2807SJeff Garzik 		 */
1491c6fd2807SJeff Garzik 	}
1492c6fd2807SJeff Garzik 
1493c6fd2807SJeff Garzik 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1494c6fd2807SJeff Garzik 
1495b352e57dSAlan Cox 	if (ata_id_is_cfa(id)) {
1496b352e57dSAlan Cox 		/*
1497b352e57dSAlan Cox 		 *	Process compact flash extended modes
1498b352e57dSAlan Cox 		 */
1499b352e57dSAlan Cox 		int pio = id[163] & 0x7;
1500b352e57dSAlan Cox 		int dma = (id[163] >> 3) & 7;
1501b352e57dSAlan Cox 
1502b352e57dSAlan Cox 		if (pio)
1503b352e57dSAlan Cox 			pio_mask |= (1 << 5);
1504b352e57dSAlan Cox 		if (pio > 1)
1505b352e57dSAlan Cox 			pio_mask |= (1 << 6);
1506b352e57dSAlan Cox 		if (dma)
1507b352e57dSAlan Cox 			mwdma_mask |= (1 << 3);
1508b352e57dSAlan Cox 		if (dma > 1)
1509b352e57dSAlan Cox 			mwdma_mask |= (1 << 4);
1510b352e57dSAlan Cox 	}
1511b352e57dSAlan Cox 
1512c6fd2807SJeff Garzik 	udma_mask = 0;
1513c6fd2807SJeff Garzik 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1514c6fd2807SJeff Garzik 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1515c6fd2807SJeff Garzik 
1516c6fd2807SJeff Garzik 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1517c6fd2807SJeff Garzik }
1518c6fd2807SJeff Garzik 
1519c6fd2807SJeff Garzik /**
1520c6fd2807SJeff Garzik  *	ata_port_queue_task - Queue port_task
1521c6fd2807SJeff Garzik  *	@ap: The ata_port to queue port_task for
1522c6fd2807SJeff Garzik  *	@fn: workqueue function to be scheduled
152365f27f38SDavid Howells  *	@data: data for @fn to use
1524c6fd2807SJeff Garzik  *	@delay: delay time for workqueue function
1525c6fd2807SJeff Garzik  *
1526c6fd2807SJeff Garzik  *	Schedule @fn(@data) for execution after @delay jiffies using
1527c6fd2807SJeff Garzik  *	port_task.  There is one port_task per port and it's the
1528c6fd2807SJeff Garzik  *	user(low level driver)'s responsibility to make sure that only
1529c6fd2807SJeff Garzik  *	one task is active at any given time.
1530c6fd2807SJeff Garzik  *
1531c6fd2807SJeff Garzik  *	libata core layer takes care of synchronization between
1532c6fd2807SJeff Garzik  *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1533c6fd2807SJeff Garzik  *	synchronization.
1534c6fd2807SJeff Garzik  *
1535c6fd2807SJeff Garzik  *	LOCKING:
1536c6fd2807SJeff Garzik  *	Inherited from caller.
1537c6fd2807SJeff Garzik  */
153865f27f38SDavid Howells void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1539c6fd2807SJeff Garzik 			 unsigned long delay)
1540c6fd2807SJeff Garzik {
154165f27f38SDavid Howells 	PREPARE_DELAYED_WORK(&ap->port_task, fn);
154265f27f38SDavid Howells 	ap->port_task_data = data;
1543c6fd2807SJeff Garzik 
154445a66c1cSOleg Nesterov 	/* may fail if ata_port_flush_task() in progress */
154545a66c1cSOleg Nesterov 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1546c6fd2807SJeff Garzik }
1547c6fd2807SJeff Garzik 
1548c6fd2807SJeff Garzik /**
1549c6fd2807SJeff Garzik  *	ata_port_flush_task - Flush port_task
1550c6fd2807SJeff Garzik  *	@ap: The ata_port to flush port_task for
1551c6fd2807SJeff Garzik  *
1552c6fd2807SJeff Garzik  *	After this function completes, port_task is guranteed not to
1553c6fd2807SJeff Garzik  *	be running or scheduled.
1554c6fd2807SJeff Garzik  *
1555c6fd2807SJeff Garzik  *	LOCKING:
1556c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1557c6fd2807SJeff Garzik  */
1558c6fd2807SJeff Garzik void ata_port_flush_task(struct ata_port *ap)
1559c6fd2807SJeff Garzik {
1560c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
1561c6fd2807SJeff Garzik 
156245a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->port_task);
1563c6fd2807SJeff Garzik 
1564c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
1565c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1566c6fd2807SJeff Garzik }
1567c6fd2807SJeff Garzik 
15687102d230SAdrian Bunk static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1569c6fd2807SJeff Garzik {
1570c6fd2807SJeff Garzik 	struct completion *waiting = qc->private_data;
1571c6fd2807SJeff Garzik 
1572c6fd2807SJeff Garzik 	complete(waiting);
1573c6fd2807SJeff Garzik }
1574c6fd2807SJeff Garzik 
1575c6fd2807SJeff Garzik /**
15762432697bSTejun Heo  *	ata_exec_internal_sg - execute libata internal command
1577c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1578c6fd2807SJeff Garzik  *	@tf: Taskfile registers for the command and the result
1579c6fd2807SJeff Garzik  *	@cdb: CDB for packet command
1580c6fd2807SJeff Garzik  *	@dma_dir: Data tranfer direction of the command
15815c1ad8b3SRandy Dunlap  *	@sgl: sg list for the data buffer of the command
15822432697bSTejun Heo  *	@n_elem: Number of sg entries
15832b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
1584c6fd2807SJeff Garzik  *
1585c6fd2807SJeff Garzik  *	Executes libata internal command with timeout.  @tf contains
1586c6fd2807SJeff Garzik  *	command on entry and result on return.  Timeout and error
1587c6fd2807SJeff Garzik  *	conditions are reported via return value.  No recovery action
1588c6fd2807SJeff Garzik  *	is taken after a command times out.  It's caller's duty to
1589c6fd2807SJeff Garzik  *	clean up after timeout.
1590c6fd2807SJeff Garzik  *
1591c6fd2807SJeff Garzik  *	LOCKING:
1592c6fd2807SJeff Garzik  *	None.  Should be called with kernel context, might sleep.
1593c6fd2807SJeff Garzik  *
1594c6fd2807SJeff Garzik  *	RETURNS:
1595c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1596c6fd2807SJeff Garzik  */
15972432697bSTejun Heo unsigned ata_exec_internal_sg(struct ata_device *dev,
1598c6fd2807SJeff Garzik 			      struct ata_taskfile *tf, const u8 *cdb,
159987260216SJens Axboe 			      int dma_dir, struct scatterlist *sgl,
16002b789108STejun Heo 			      unsigned int n_elem, unsigned long timeout)
1601c6fd2807SJeff Garzik {
16029af5c9c9STejun Heo 	struct ata_link *link = dev->link;
16039af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
1604c6fd2807SJeff Garzik 	u8 command = tf->command;
1605c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
1606c6fd2807SJeff Garzik 	unsigned int tag, preempted_tag;
1607c6fd2807SJeff Garzik 	u32 preempted_sactive, preempted_qc_active;
1608da917d69STejun Heo 	int preempted_nr_active_links;
1609c6fd2807SJeff Garzik 	DECLARE_COMPLETION_ONSTACK(wait);
1610c6fd2807SJeff Garzik 	unsigned long flags;
1611c6fd2807SJeff Garzik 	unsigned int err_mask;
1612c6fd2807SJeff Garzik 	int rc;
1613c6fd2807SJeff Garzik 
1614c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1615c6fd2807SJeff Garzik 
1616c6fd2807SJeff Garzik 	/* no internal command while frozen */
1617c6fd2807SJeff Garzik 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1618c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1619c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
1620c6fd2807SJeff Garzik 	}
1621c6fd2807SJeff Garzik 
1622c6fd2807SJeff Garzik 	/* initialize internal qc */
1623c6fd2807SJeff Garzik 
1624c6fd2807SJeff Garzik 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1625c6fd2807SJeff Garzik 	 * drivers choke if any other tag is given.  This breaks
1626c6fd2807SJeff Garzik 	 * ata_tag_internal() test for those drivers.  Don't use new
1627c6fd2807SJeff Garzik 	 * EH stuff without converting to it.
1628c6fd2807SJeff Garzik 	 */
1629c6fd2807SJeff Garzik 	if (ap->ops->error_handler)
1630c6fd2807SJeff Garzik 		tag = ATA_TAG_INTERNAL;
1631c6fd2807SJeff Garzik 	else
1632c6fd2807SJeff Garzik 		tag = 0;
1633c6fd2807SJeff Garzik 
1634c6fd2807SJeff Garzik 	if (test_and_set_bit(tag, &ap->qc_allocated))
1635c6fd2807SJeff Garzik 		BUG();
1636c6fd2807SJeff Garzik 	qc = __ata_qc_from_tag(ap, tag);
1637c6fd2807SJeff Garzik 
1638c6fd2807SJeff Garzik 	qc->tag = tag;
1639c6fd2807SJeff Garzik 	qc->scsicmd = NULL;
1640c6fd2807SJeff Garzik 	qc->ap = ap;
1641c6fd2807SJeff Garzik 	qc->dev = dev;
1642c6fd2807SJeff Garzik 	ata_qc_reinit(qc);
1643c6fd2807SJeff Garzik 
16449af5c9c9STejun Heo 	preempted_tag = link->active_tag;
16459af5c9c9STejun Heo 	preempted_sactive = link->sactive;
1646c6fd2807SJeff Garzik 	preempted_qc_active = ap->qc_active;
1647da917d69STejun Heo 	preempted_nr_active_links = ap->nr_active_links;
16489af5c9c9STejun Heo 	link->active_tag = ATA_TAG_POISON;
16499af5c9c9STejun Heo 	link->sactive = 0;
1650c6fd2807SJeff Garzik 	ap->qc_active = 0;
1651da917d69STejun Heo 	ap->nr_active_links = 0;
1652c6fd2807SJeff Garzik 
1653c6fd2807SJeff Garzik 	/* prepare & issue qc */
1654c6fd2807SJeff Garzik 	qc->tf = *tf;
1655c6fd2807SJeff Garzik 	if (cdb)
1656c6fd2807SJeff Garzik 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1657c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1658c6fd2807SJeff Garzik 	qc->dma_dir = dma_dir;
1659c6fd2807SJeff Garzik 	if (dma_dir != DMA_NONE) {
16602432697bSTejun Heo 		unsigned int i, buflen = 0;
166187260216SJens Axboe 		struct scatterlist *sg;
16622432697bSTejun Heo 
166387260216SJens Axboe 		for_each_sg(sgl, sg, n_elem, i)
166487260216SJens Axboe 			buflen += sg->length;
16652432697bSTejun Heo 
166687260216SJens Axboe 		ata_sg_init(qc, sgl, n_elem);
166749c80429SBrian King 		qc->nbytes = buflen;
1668c6fd2807SJeff Garzik 	}
1669c6fd2807SJeff Garzik 
1670c6fd2807SJeff Garzik 	qc->private_data = &wait;
1671c6fd2807SJeff Garzik 	qc->complete_fn = ata_qc_complete_internal;
1672c6fd2807SJeff Garzik 
1673c6fd2807SJeff Garzik 	ata_qc_issue(qc);
1674c6fd2807SJeff Garzik 
1675c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1676c6fd2807SJeff Garzik 
16772b789108STejun Heo 	if (!timeout)
16782b789108STejun Heo 		timeout = ata_probe_timeout * 1000 / HZ;
16792b789108STejun Heo 
16802b789108STejun Heo 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1681c6fd2807SJeff Garzik 
1682c6fd2807SJeff Garzik 	ata_port_flush_task(ap);
1683c6fd2807SJeff Garzik 
1684c6fd2807SJeff Garzik 	if (!rc) {
1685c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
1686c6fd2807SJeff Garzik 
1687c6fd2807SJeff Garzik 		/* We're racing with irq here.  If we lose, the
1688c6fd2807SJeff Garzik 		 * following test prevents us from completing the qc
1689c6fd2807SJeff Garzik 		 * twice.  If we win, the port is frozen and will be
1690c6fd2807SJeff Garzik 		 * cleaned up by ->post_internal_cmd().
1691c6fd2807SJeff Garzik 		 */
1692c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1693c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_TIMEOUT;
1694c6fd2807SJeff Garzik 
1695c6fd2807SJeff Garzik 			if (ap->ops->error_handler)
1696c6fd2807SJeff Garzik 				ata_port_freeze(ap);
1697c6fd2807SJeff Garzik 			else
1698c6fd2807SJeff Garzik 				ata_qc_complete(qc);
1699c6fd2807SJeff Garzik 
1700c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
1701c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
1702c6fd2807SJeff Garzik 					"qc timeout (cmd 0x%x)\n", command);
1703c6fd2807SJeff Garzik 		}
1704c6fd2807SJeff Garzik 
1705c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
1706c6fd2807SJeff Garzik 	}
1707c6fd2807SJeff Garzik 
1708c6fd2807SJeff Garzik 	/* do post_internal_cmd */
1709c6fd2807SJeff Garzik 	if (ap->ops->post_internal_cmd)
1710c6fd2807SJeff Garzik 		ap->ops->post_internal_cmd(qc);
1711c6fd2807SJeff Garzik 
1712a51d644aSTejun Heo 	/* perform minimal error analysis */
1713a51d644aSTejun Heo 	if (qc->flags & ATA_QCFLAG_FAILED) {
1714a51d644aSTejun Heo 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1715a51d644aSTejun Heo 			qc->err_mask |= AC_ERR_DEV;
1716a51d644aSTejun Heo 
1717a51d644aSTejun Heo 		if (!qc->err_mask)
1718c6fd2807SJeff Garzik 			qc->err_mask |= AC_ERR_OTHER;
1719a51d644aSTejun Heo 
1720a51d644aSTejun Heo 		if (qc->err_mask & ~AC_ERR_OTHER)
1721a51d644aSTejun Heo 			qc->err_mask &= ~AC_ERR_OTHER;
1722c6fd2807SJeff Garzik 	}
1723c6fd2807SJeff Garzik 
1724c6fd2807SJeff Garzik 	/* finish up */
1725c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
1726c6fd2807SJeff Garzik 
1727c6fd2807SJeff Garzik 	*tf = qc->result_tf;
1728c6fd2807SJeff Garzik 	err_mask = qc->err_mask;
1729c6fd2807SJeff Garzik 
1730c6fd2807SJeff Garzik 	ata_qc_free(qc);
17319af5c9c9STejun Heo 	link->active_tag = preempted_tag;
17329af5c9c9STejun Heo 	link->sactive = preempted_sactive;
1733c6fd2807SJeff Garzik 	ap->qc_active = preempted_qc_active;
1734da917d69STejun Heo 	ap->nr_active_links = preempted_nr_active_links;
1735c6fd2807SJeff Garzik 
1736c6fd2807SJeff Garzik 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1737c6fd2807SJeff Garzik 	 * Until those drivers are fixed, we detect the condition
1738c6fd2807SJeff Garzik 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1739c6fd2807SJeff Garzik 	 * port.
1740c6fd2807SJeff Garzik 	 *
1741c6fd2807SJeff Garzik 	 * Note that this doesn't change any behavior as internal
1742c6fd2807SJeff Garzik 	 * command failure results in disabling the device in the
1743c6fd2807SJeff Garzik 	 * higher layer for LLDDs without new reset/EH callbacks.
1744c6fd2807SJeff Garzik 	 *
1745c6fd2807SJeff Garzik 	 * Kill the following code as soon as those drivers are fixed.
1746c6fd2807SJeff Garzik 	 */
1747c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_DISABLED) {
1748c6fd2807SJeff Garzik 		err_mask |= AC_ERR_SYSTEM;
1749c6fd2807SJeff Garzik 		ata_port_probe(ap);
1750c6fd2807SJeff Garzik 	}
1751c6fd2807SJeff Garzik 
1752c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
1753c6fd2807SJeff Garzik 
1754c6fd2807SJeff Garzik 	return err_mask;
1755c6fd2807SJeff Garzik }
1756c6fd2807SJeff Garzik 
1757c6fd2807SJeff Garzik /**
175833480a0eSTejun Heo  *	ata_exec_internal - execute libata internal command
17592432697bSTejun Heo  *	@dev: Device to which the command is sent
17602432697bSTejun Heo  *	@tf: Taskfile registers for the command and the result
17612432697bSTejun Heo  *	@cdb: CDB for packet command
17622432697bSTejun Heo  *	@dma_dir: Data tranfer direction of the command
17632432697bSTejun Heo  *	@buf: Data buffer of the command
17642432697bSTejun Heo  *	@buflen: Length of data buffer
17652b789108STejun Heo  *	@timeout: Timeout in msecs (0 for default)
17662432697bSTejun Heo  *
17672432697bSTejun Heo  *	Wrapper around ata_exec_internal_sg() which takes simple
17682432697bSTejun Heo  *	buffer instead of sg list.
17692432697bSTejun Heo  *
17702432697bSTejun Heo  *	LOCKING:
17712432697bSTejun Heo  *	None.  Should be called with kernel context, might sleep.
17722432697bSTejun Heo  *
17732432697bSTejun Heo  *	RETURNS:
17742432697bSTejun Heo  *	Zero on success, AC_ERR_* mask on failure
17752432697bSTejun Heo  */
17762432697bSTejun Heo unsigned ata_exec_internal(struct ata_device *dev,
17772432697bSTejun Heo 			   struct ata_taskfile *tf, const u8 *cdb,
17782b789108STejun Heo 			   int dma_dir, void *buf, unsigned int buflen,
17792b789108STejun Heo 			   unsigned long timeout)
17802432697bSTejun Heo {
178133480a0eSTejun Heo 	struct scatterlist *psg = NULL, sg;
178233480a0eSTejun Heo 	unsigned int n_elem = 0;
17832432697bSTejun Heo 
178433480a0eSTejun Heo 	if (dma_dir != DMA_NONE) {
178533480a0eSTejun Heo 		WARN_ON(!buf);
17862432697bSTejun Heo 		sg_init_one(&sg, buf, buflen);
178733480a0eSTejun Heo 		psg = &sg;
178833480a0eSTejun Heo 		n_elem++;
178933480a0eSTejun Heo 	}
17902432697bSTejun Heo 
17912b789108STejun Heo 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
17922b789108STejun Heo 				    timeout);
17932432697bSTejun Heo }
17942432697bSTejun Heo 
17952432697bSTejun Heo /**
1796c6fd2807SJeff Garzik  *	ata_do_simple_cmd - execute simple internal command
1797c6fd2807SJeff Garzik  *	@dev: Device to which the command is sent
1798c6fd2807SJeff Garzik  *	@cmd: Opcode to execute
1799c6fd2807SJeff Garzik  *
1800c6fd2807SJeff Garzik  *	Execute a 'simple' command, that only consists of the opcode
1801c6fd2807SJeff Garzik  *	'cmd' itself, without filling any other registers
1802c6fd2807SJeff Garzik  *
1803c6fd2807SJeff Garzik  *	LOCKING:
1804c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
1805c6fd2807SJeff Garzik  *
1806c6fd2807SJeff Garzik  *	RETURNS:
1807c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
1808c6fd2807SJeff Garzik  */
1809c6fd2807SJeff Garzik unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1810c6fd2807SJeff Garzik {
1811c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1812c6fd2807SJeff Garzik 
1813c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1814c6fd2807SJeff Garzik 
1815c6fd2807SJeff Garzik 	tf.command = cmd;
1816c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_DEVICE;
1817c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
1818c6fd2807SJeff Garzik 
18192b789108STejun Heo 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1820c6fd2807SJeff Garzik }
1821c6fd2807SJeff Garzik 
1822c6fd2807SJeff Garzik /**
1823c6fd2807SJeff Garzik  *	ata_pio_need_iordy	-	check if iordy needed
1824c6fd2807SJeff Garzik  *	@adev: ATA device
1825c6fd2807SJeff Garzik  *
1826c6fd2807SJeff Garzik  *	Check if the current speed of the device requires IORDY. Used
1827c6fd2807SJeff Garzik  *	by various controllers for chip configuration.
1828c6fd2807SJeff Garzik  */
1829c6fd2807SJeff Garzik 
1830c6fd2807SJeff Garzik unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1831c6fd2807SJeff Garzik {
1832432729f0SAlan Cox 	/* Controller doesn't support  IORDY. Probably a pointless check
1833432729f0SAlan Cox 	   as the caller should know this */
18349af5c9c9STejun Heo 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1835c6fd2807SJeff Garzik 		return 0;
1836432729f0SAlan Cox 	/* PIO3 and higher it is mandatory */
1837432729f0SAlan Cox 	if (adev->pio_mode > XFER_PIO_2)
1838c6fd2807SJeff Garzik 		return 1;
1839432729f0SAlan Cox 	/* We turn it on when possible */
1840432729f0SAlan Cox 	if (ata_id_has_iordy(adev->id))
1841432729f0SAlan Cox 		return 1;
1842432729f0SAlan Cox 	return 0;
1843432729f0SAlan Cox }
1844c6fd2807SJeff Garzik 
1845432729f0SAlan Cox /**
1846432729f0SAlan Cox  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1847432729f0SAlan Cox  *	@adev: ATA device
1848432729f0SAlan Cox  *
1849432729f0SAlan Cox  *	Compute the highest mode possible if we are not using iordy. Return
1850432729f0SAlan Cox  *	-1 if no iordy mode is available.
1851432729f0SAlan Cox  */
1852432729f0SAlan Cox 
1853432729f0SAlan Cox static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1854432729f0SAlan Cox {
1855c6fd2807SJeff Garzik 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1856c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1857432729f0SAlan Cox 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1858c6fd2807SJeff Garzik 		/* Is the speed faster than the drive allows non IORDY ? */
1859c6fd2807SJeff Garzik 		if (pio) {
1860c6fd2807SJeff Garzik 			/* This is cycle times not frequency - watch the logic! */
1861c6fd2807SJeff Garzik 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1862432729f0SAlan Cox 				return 3 << ATA_SHIFT_PIO;
1863432729f0SAlan Cox 			return 7 << ATA_SHIFT_PIO;
1864c6fd2807SJeff Garzik 		}
1865c6fd2807SJeff Garzik 	}
1866432729f0SAlan Cox 	return 3 << ATA_SHIFT_PIO;
1867c6fd2807SJeff Garzik }
1868c6fd2807SJeff Garzik 
1869c6fd2807SJeff Garzik /**
1870c6fd2807SJeff Garzik  *	ata_dev_read_id - Read ID data from the specified device
1871c6fd2807SJeff Garzik  *	@dev: target device
1872c6fd2807SJeff Garzik  *	@p_class: pointer to class of the target device (may be changed)
1873bff04647STejun Heo  *	@flags: ATA_READID_* flags
1874c6fd2807SJeff Garzik  *	@id: buffer to read IDENTIFY data into
1875c6fd2807SJeff Garzik  *
1876c6fd2807SJeff Garzik  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1877c6fd2807SJeff Garzik  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1878c6fd2807SJeff Garzik  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1879c6fd2807SJeff Garzik  *	for pre-ATA4 drives.
1880c6fd2807SJeff Garzik  *
188150a99018SAlan Cox  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
188250a99018SAlan Cox  *	now we abort if we hit that case.
188350a99018SAlan Cox  *
1884c6fd2807SJeff Garzik  *	LOCKING:
1885c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
1886c6fd2807SJeff Garzik  *
1887c6fd2807SJeff Garzik  *	RETURNS:
1888c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
1889c6fd2807SJeff Garzik  */
1890c6fd2807SJeff Garzik int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1891bff04647STejun Heo 		    unsigned int flags, u16 *id)
1892c6fd2807SJeff Garzik {
18939af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
1894c6fd2807SJeff Garzik 	unsigned int class = *p_class;
1895c6fd2807SJeff Garzik 	struct ata_taskfile tf;
1896c6fd2807SJeff Garzik 	unsigned int err_mask = 0;
1897c6fd2807SJeff Garzik 	const char *reason;
189854936f8bSTejun Heo 	int may_fallback = 1, tried_spinup = 0;
1899c6fd2807SJeff Garzik 	int rc;
1900c6fd2807SJeff Garzik 
1901c6fd2807SJeff Garzik 	if (ata_msg_ctl(ap))
190244877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1903c6fd2807SJeff Garzik 
1904c6fd2807SJeff Garzik 	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1905c6fd2807SJeff Garzik  retry:
1906c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
1907c6fd2807SJeff Garzik 
1908c6fd2807SJeff Garzik 	switch (class) {
1909c6fd2807SJeff Garzik 	case ATA_DEV_ATA:
1910c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATA;
1911c6fd2807SJeff Garzik 		break;
1912c6fd2807SJeff Garzik 	case ATA_DEV_ATAPI:
1913c6fd2807SJeff Garzik 		tf.command = ATA_CMD_ID_ATAPI;
1914c6fd2807SJeff Garzik 		break;
1915c6fd2807SJeff Garzik 	default:
1916c6fd2807SJeff Garzik 		rc = -ENODEV;
1917c6fd2807SJeff Garzik 		reason = "unsupported class";
1918c6fd2807SJeff Garzik 		goto err_out;
1919c6fd2807SJeff Garzik 	}
1920c6fd2807SJeff Garzik 
1921c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_PIO;
192281afe893STejun Heo 
192381afe893STejun Heo 	/* Some devices choke if TF registers contain garbage.  Make
192481afe893STejun Heo 	 * sure those are properly initialized.
192581afe893STejun Heo 	 */
192681afe893STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
192781afe893STejun Heo 
192881afe893STejun Heo 	/* Device presence detection is unreliable on some
192981afe893STejun Heo 	 * controllers.  Always poll IDENTIFY if available.
193081afe893STejun Heo 	 */
193181afe893STejun Heo 	tf.flags |= ATA_TFLAG_POLLING;
1932c6fd2807SJeff Garzik 
1933c6fd2807SJeff Garzik 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
19342b789108STejun Heo 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1935c6fd2807SJeff Garzik 	if (err_mask) {
1936800b3996STejun Heo 		if (err_mask & AC_ERR_NODEV_HINT) {
193755a8e2c8STejun Heo 			DPRINTK("ata%u.%d: NODEV after polling detection\n",
193844877b4eSTejun Heo 				ap->print_id, dev->devno);
193955a8e2c8STejun Heo 			return -ENOENT;
194055a8e2c8STejun Heo 		}
194155a8e2c8STejun Heo 
194254936f8bSTejun Heo 		/* Device or controller might have reported the wrong
194354936f8bSTejun Heo 		 * device class.  Give a shot at the other IDENTIFY if
194454936f8bSTejun Heo 		 * the current one is aborted by the device.
194554936f8bSTejun Heo 		 */
194654936f8bSTejun Heo 		if (may_fallback &&
194754936f8bSTejun Heo 		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
194854936f8bSTejun Heo 			may_fallback = 0;
194954936f8bSTejun Heo 
195054936f8bSTejun Heo 			if (class == ATA_DEV_ATA)
195154936f8bSTejun Heo 				class = ATA_DEV_ATAPI;
195254936f8bSTejun Heo 			else
195354936f8bSTejun Heo 				class = ATA_DEV_ATA;
195454936f8bSTejun Heo 			goto retry;
195554936f8bSTejun Heo 		}
195654936f8bSTejun Heo 
1957c6fd2807SJeff Garzik 		rc = -EIO;
1958c6fd2807SJeff Garzik 		reason = "I/O error";
1959c6fd2807SJeff Garzik 		goto err_out;
1960c6fd2807SJeff Garzik 	}
1961c6fd2807SJeff Garzik 
196254936f8bSTejun Heo 	/* Falling back doesn't make sense if ID data was read
196354936f8bSTejun Heo 	 * successfully at least once.
196454936f8bSTejun Heo 	 */
196554936f8bSTejun Heo 	may_fallback = 0;
196654936f8bSTejun Heo 
1967c6fd2807SJeff Garzik 	swap_buf_le16(id, ATA_ID_WORDS);
1968c6fd2807SJeff Garzik 
1969c6fd2807SJeff Garzik 	/* sanity check */
1970c6fd2807SJeff Garzik 	rc = -EINVAL;
19716070068bSAlan Cox 	reason = "device reports invalid type";
19724a3381feSJeff Garzik 
19734a3381feSJeff Garzik 	if (class == ATA_DEV_ATA) {
19744a3381feSJeff Garzik 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
19754a3381feSJeff Garzik 			goto err_out;
19764a3381feSJeff Garzik 	} else {
19774a3381feSJeff Garzik 		if (ata_id_is_ata(id))
1978c6fd2807SJeff Garzik 			goto err_out;
1979c6fd2807SJeff Garzik 	}
1980c6fd2807SJeff Garzik 
1981169439c2SMark Lord 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1982169439c2SMark Lord 		tried_spinup = 1;
1983169439c2SMark Lord 		/*
1984169439c2SMark Lord 		 * Drive powered-up in standby mode, and requires a specific
1985169439c2SMark Lord 		 * SET_FEATURES spin-up subcommand before it will accept
1986169439c2SMark Lord 		 * anything other than the original IDENTIFY command.
1987169439c2SMark Lord 		 */
1988218f3d30SJeff Garzik 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1989fb0582f9SRyan Power 		if (err_mask && id[2] != 0x738c) {
1990169439c2SMark Lord 			rc = -EIO;
1991169439c2SMark Lord 			reason = "SPINUP failed";
1992169439c2SMark Lord 			goto err_out;
1993169439c2SMark Lord 		}
1994169439c2SMark Lord 		/*
1995169439c2SMark Lord 		 * If the drive initially returned incomplete IDENTIFY info,
1996169439c2SMark Lord 		 * we now must reissue the IDENTIFY command.
1997169439c2SMark Lord 		 */
1998169439c2SMark Lord 		if (id[2] == 0x37c8)
1999169439c2SMark Lord 			goto retry;
2000169439c2SMark Lord 	}
2001169439c2SMark Lord 
2002bff04647STejun Heo 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2003c6fd2807SJeff Garzik 		/*
2004c6fd2807SJeff Garzik 		 * The exact sequence expected by certain pre-ATA4 drives is:
2005c6fd2807SJeff Garzik 		 * SRST RESET
200650a99018SAlan Cox 		 * IDENTIFY (optional in early ATA)
200750a99018SAlan Cox 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2008c6fd2807SJeff Garzik 		 * anything else..
2009c6fd2807SJeff Garzik 		 * Some drives were very specific about that exact sequence.
201050a99018SAlan Cox 		 *
201150a99018SAlan Cox 		 * Note that ATA4 says lba is mandatory so the second check
201250a99018SAlan Cox 		 * shoud never trigger.
2013c6fd2807SJeff Garzik 		 */
2014c6fd2807SJeff Garzik 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2015c6fd2807SJeff Garzik 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2016c6fd2807SJeff Garzik 			if (err_mask) {
2017c6fd2807SJeff Garzik 				rc = -EIO;
2018c6fd2807SJeff Garzik 				reason = "INIT_DEV_PARAMS failed";
2019c6fd2807SJeff Garzik 				goto err_out;
2020c6fd2807SJeff Garzik 			}
2021c6fd2807SJeff Garzik 
2022c6fd2807SJeff Garzik 			/* current CHS translation info (id[53-58]) might be
2023c6fd2807SJeff Garzik 			 * changed. reread the identify device info.
2024c6fd2807SJeff Garzik 			 */
2025bff04647STejun Heo 			flags &= ~ATA_READID_POSTRESET;
2026c6fd2807SJeff Garzik 			goto retry;
2027c6fd2807SJeff Garzik 		}
2028c6fd2807SJeff Garzik 	}
2029c6fd2807SJeff Garzik 
2030c6fd2807SJeff Garzik 	*p_class = class;
2031c6fd2807SJeff Garzik 
2032c6fd2807SJeff Garzik 	return 0;
2033c6fd2807SJeff Garzik 
2034c6fd2807SJeff Garzik  err_out:
2035c6fd2807SJeff Garzik 	if (ata_msg_warn(ap))
2036c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2037c6fd2807SJeff Garzik 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2038c6fd2807SJeff Garzik 	return rc;
2039c6fd2807SJeff Garzik }
2040c6fd2807SJeff Garzik 
2041c6fd2807SJeff Garzik static inline u8 ata_dev_knobble(struct ata_device *dev)
2042c6fd2807SJeff Garzik {
20439af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20449af5c9c9STejun Heo 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2045c6fd2807SJeff Garzik }
2046c6fd2807SJeff Garzik 
2047c6fd2807SJeff Garzik static void ata_dev_config_ncq(struct ata_device *dev,
2048c6fd2807SJeff Garzik 			       char *desc, size_t desc_sz)
2049c6fd2807SJeff Garzik {
20509af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
2051c6fd2807SJeff Garzik 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2052c6fd2807SJeff Garzik 
2053c6fd2807SJeff Garzik 	if (!ata_id_has_ncq(dev->id)) {
2054c6fd2807SJeff Garzik 		desc[0] = '\0';
2055c6fd2807SJeff Garzik 		return;
2056c6fd2807SJeff Garzik 	}
205775683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
20586919a0a6SAlan Cox 		snprintf(desc, desc_sz, "NCQ (not used)");
20596919a0a6SAlan Cox 		return;
20606919a0a6SAlan Cox 	}
2061c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_NCQ) {
2062cca3974eSJeff Garzik 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2063c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_NCQ;
2064c6fd2807SJeff Garzik 	}
2065c6fd2807SJeff Garzik 
2066c6fd2807SJeff Garzik 	if (hdepth >= ddepth)
2067c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2068c6fd2807SJeff Garzik 	else
2069c6fd2807SJeff Garzik 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2070c6fd2807SJeff Garzik }
2071c6fd2807SJeff Garzik 
2072c6fd2807SJeff Garzik /**
2073c6fd2807SJeff Garzik  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2074c6fd2807SJeff Garzik  *	@dev: Target device to configure
2075c6fd2807SJeff Garzik  *
2076c6fd2807SJeff Garzik  *	Configure @dev according to @dev->id.  Generic and low-level
2077c6fd2807SJeff Garzik  *	driver specific fixups are also applied.
2078c6fd2807SJeff Garzik  *
2079c6fd2807SJeff Garzik  *	LOCKING:
2080c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
2081c6fd2807SJeff Garzik  *
2082c6fd2807SJeff Garzik  *	RETURNS:
2083c6fd2807SJeff Garzik  *	0 on success, -errno otherwise
2084c6fd2807SJeff Garzik  */
2085efdaedc4STejun Heo int ata_dev_configure(struct ata_device *dev)
2086c6fd2807SJeff Garzik {
20879af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
20889af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
20896746544cSTejun Heo 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2090c6fd2807SJeff Garzik 	const u16 *id = dev->id;
2091c6fd2807SJeff Garzik 	unsigned int xfer_mask;
2092b352e57dSAlan Cox 	char revbuf[7];		/* XYZ-99\0 */
20933f64f565SEric D. Mudama 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
20943f64f565SEric D. Mudama 	char modelbuf[ATA_ID_PROD_LEN+1];
2095c6fd2807SJeff Garzik 	int rc;
2096c6fd2807SJeff Garzik 
2097c6fd2807SJeff Garzik 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
209844877b4eSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
209944877b4eSTejun Heo 			       __FUNCTION__);
2100c6fd2807SJeff Garzik 		return 0;
2101c6fd2807SJeff Garzik 	}
2102c6fd2807SJeff Garzik 
2103c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
210444877b4eSTejun Heo 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
2105c6fd2807SJeff Garzik 
210675683fe7STejun Heo 	/* set horkage */
210775683fe7STejun Heo 	dev->horkage |= ata_dev_blacklisted(dev);
210875683fe7STejun Heo 
21096746544cSTejun Heo 	/* let ACPI work its magic */
21106746544cSTejun Heo 	rc = ata_acpi_on_devcfg(dev);
21116746544cSTejun Heo 	if (rc)
21126746544cSTejun Heo 		return rc;
211308573a86SKristen Carlson Accardi 
211405027adcSTejun Heo 	/* massage HPA, do it early as it might change IDENTIFY data */
211505027adcSTejun Heo 	rc = ata_hpa_resize(dev);
211605027adcSTejun Heo 	if (rc)
211705027adcSTejun Heo 		return rc;
211805027adcSTejun Heo 
2119c6fd2807SJeff Garzik 	/* print device capabilities */
2120c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2121c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2122c6fd2807SJeff Garzik 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2123c6fd2807SJeff Garzik 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2124c6fd2807SJeff Garzik 			       __FUNCTION__,
2125c6fd2807SJeff Garzik 			       id[49], id[82], id[83], id[84],
2126c6fd2807SJeff Garzik 			       id[85], id[86], id[87], id[88]);
2127c6fd2807SJeff Garzik 
2128c6fd2807SJeff Garzik 	/* initialize to-be-configured parameters */
2129c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2130c6fd2807SJeff Garzik 	dev->max_sectors = 0;
2131c6fd2807SJeff Garzik 	dev->cdb_len = 0;
2132c6fd2807SJeff Garzik 	dev->n_sectors = 0;
2133c6fd2807SJeff Garzik 	dev->cylinders = 0;
2134c6fd2807SJeff Garzik 	dev->heads = 0;
2135c6fd2807SJeff Garzik 	dev->sectors = 0;
2136c6fd2807SJeff Garzik 
2137c6fd2807SJeff Garzik 	/*
2138c6fd2807SJeff Garzik 	 * common ATA, ATAPI feature tests
2139c6fd2807SJeff Garzik 	 */
2140c6fd2807SJeff Garzik 
2141c6fd2807SJeff Garzik 	/* find max transfer mode; for printk only */
2142c6fd2807SJeff Garzik 	xfer_mask = ata_id_xfermask(id);
2143c6fd2807SJeff Garzik 
2144c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2145c6fd2807SJeff Garzik 		ata_dump_id(id);
2146c6fd2807SJeff Garzik 
2147ef143d57SAlbert Lee 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2148ef143d57SAlbert Lee 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2149ef143d57SAlbert Lee 			sizeof(fwrevbuf));
2150ef143d57SAlbert Lee 
2151ef143d57SAlbert Lee 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2152ef143d57SAlbert Lee 			sizeof(modelbuf));
2153ef143d57SAlbert Lee 
2154c6fd2807SJeff Garzik 	/* ATA-specific feature tests */
2155c6fd2807SJeff Garzik 	if (dev->class == ATA_DEV_ATA) {
2156b352e57dSAlan Cox 		if (ata_id_is_cfa(id)) {
2157b352e57dSAlan Cox 			if (id[162] & 1) /* CPRM may make this media unusable */
215844877b4eSTejun Heo 				ata_dev_printk(dev, KERN_WARNING,
215944877b4eSTejun Heo 					       "supports DRM functions and may "
216044877b4eSTejun Heo 					       "not be fully accessable.\n");
2161b352e57dSAlan Cox 			snprintf(revbuf, 7, "CFA");
21622dcb407eSJeff Garzik 		} else
2163b352e57dSAlan Cox 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2164b352e57dSAlan Cox 
2165c6fd2807SJeff Garzik 		dev->n_sectors = ata_id_n_sectors(id);
2166c6fd2807SJeff Garzik 
21673f64f565SEric D. Mudama 		if (dev->id[59] & 0x100)
21683f64f565SEric D. Mudama 			dev->multi_count = dev->id[59] & 0xff;
21693f64f565SEric D. Mudama 
2170c6fd2807SJeff Garzik 		if (ata_id_has_lba(id)) {
2171c6fd2807SJeff Garzik 			const char *lba_desc;
2172c6fd2807SJeff Garzik 			char ncq_desc[20];
2173c6fd2807SJeff Garzik 
2174c6fd2807SJeff Garzik 			lba_desc = "LBA";
2175c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_LBA;
2176c6fd2807SJeff Garzik 			if (ata_id_has_lba48(id)) {
2177c6fd2807SJeff Garzik 				dev->flags |= ATA_DFLAG_LBA48;
2178c6fd2807SJeff Garzik 				lba_desc = "LBA48";
21796fc49adbSTejun Heo 
21806fc49adbSTejun Heo 				if (dev->n_sectors >= (1UL << 28) &&
21816fc49adbSTejun Heo 				    ata_id_has_flush_ext(id))
21826fc49adbSTejun Heo 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2183c6fd2807SJeff Garzik 			}
2184c6fd2807SJeff Garzik 
2185c6fd2807SJeff Garzik 			/* config NCQ */
2186c6fd2807SJeff Garzik 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2187c6fd2807SJeff Garzik 
2188c6fd2807SJeff Garzik 			/* print device info to dmesg */
21893f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
21903f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21913f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
21923f64f565SEric D. Mudama 					revbuf, modelbuf, fwrevbuf,
21933f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
21943f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
21953f64f565SEric D. Mudama 					"%Lu sectors, multi %u: %s %s\n",
2196c6fd2807SJeff Garzik 					(unsigned long long)dev->n_sectors,
21973f64f565SEric D. Mudama 					dev->multi_count, lba_desc, ncq_desc);
21983f64f565SEric D. Mudama 			}
2199c6fd2807SJeff Garzik 		} else {
2200c6fd2807SJeff Garzik 			/* CHS */
2201c6fd2807SJeff Garzik 
2202c6fd2807SJeff Garzik 			/* Default translation */
2203c6fd2807SJeff Garzik 			dev->cylinders	= id[1];
2204c6fd2807SJeff Garzik 			dev->heads	= id[3];
2205c6fd2807SJeff Garzik 			dev->sectors	= id[6];
2206c6fd2807SJeff Garzik 
2207c6fd2807SJeff Garzik 			if (ata_id_current_chs_valid(id)) {
2208c6fd2807SJeff Garzik 				/* Current CHS translation is valid. */
2209c6fd2807SJeff Garzik 				dev->cylinders = id[54];
2210c6fd2807SJeff Garzik 				dev->heads     = id[55];
2211c6fd2807SJeff Garzik 				dev->sectors   = id[56];
2212c6fd2807SJeff Garzik 			}
2213c6fd2807SJeff Garzik 
2214c6fd2807SJeff Garzik 			/* print device info to dmesg */
22153f64f565SEric D. Mudama 			if (ata_msg_drv(ap) && print_info) {
2216c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_INFO,
22173f64f565SEric D. Mudama 					"%s: %s, %s, max %s\n",
22183f64f565SEric D. Mudama 					revbuf,	modelbuf, fwrevbuf,
22193f64f565SEric D. Mudama 					ata_mode_string(xfer_mask));
22203f64f565SEric D. Mudama 				ata_dev_printk(dev, KERN_INFO,
22213f64f565SEric D. Mudama 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
22223f64f565SEric D. Mudama 					(unsigned long long)dev->n_sectors,
22233f64f565SEric D. Mudama 					dev->multi_count, dev->cylinders,
22243f64f565SEric D. Mudama 					dev->heads, dev->sectors);
22253f64f565SEric D. Mudama 			}
2226c6fd2807SJeff Garzik 		}
2227c6fd2807SJeff Garzik 
2228c6fd2807SJeff Garzik 		dev->cdb_len = 16;
2229c6fd2807SJeff Garzik 	}
2230c6fd2807SJeff Garzik 
2231c6fd2807SJeff Garzik 	/* ATAPI-specific feature tests */
2232c6fd2807SJeff Garzik 	else if (dev->class == ATA_DEV_ATAPI) {
2233854c73a2STejun Heo 		const char *cdb_intr_string = "";
2234854c73a2STejun Heo 		const char *atapi_an_string = "";
22357d77b247STejun Heo 		u32 sntf;
2236c6fd2807SJeff Garzik 
2237c6fd2807SJeff Garzik 		rc = atapi_cdb_len(id);
2238c6fd2807SJeff Garzik 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2239c6fd2807SJeff Garzik 			if (ata_msg_warn(ap))
2240c6fd2807SJeff Garzik 				ata_dev_printk(dev, KERN_WARNING,
2241c6fd2807SJeff Garzik 					       "unsupported CDB len\n");
2242c6fd2807SJeff Garzik 			rc = -EINVAL;
2243c6fd2807SJeff Garzik 			goto err_out_nosup;
2244c6fd2807SJeff Garzik 		}
2245c6fd2807SJeff Garzik 		dev->cdb_len = (unsigned int) rc;
2246c6fd2807SJeff Garzik 
22477d77b247STejun Heo 		/* Enable ATAPI AN if both the host and device have
22487d77b247STejun Heo 		 * the support.  If PMP is attached, SNTF is required
22497d77b247STejun Heo 		 * to enable ATAPI AN to discern between PHY status
22507d77b247STejun Heo 		 * changed notifications and ATAPI ANs.
22519f45cbd3SKristen Carlson Accardi 		 */
22527d77b247STejun Heo 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
22537d77b247STejun Heo 		    (!ap->nr_pmp_links ||
22547d77b247STejun Heo 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2255854c73a2STejun Heo 			unsigned int err_mask;
2256854c73a2STejun Heo 
22579f45cbd3SKristen Carlson Accardi 			/* issue SET feature command to turn this on */
2258218f3d30SJeff Garzik 			err_mask = ata_dev_set_feature(dev,
2259218f3d30SJeff Garzik 					SETFEATURES_SATA_ENABLE, SATA_AN);
2260854c73a2STejun Heo 			if (err_mask)
22619f45cbd3SKristen Carlson Accardi 				ata_dev_printk(dev, KERN_ERR,
2262854c73a2STejun Heo 					"failed to enable ATAPI AN "
2263854c73a2STejun Heo 					"(err_mask=0x%x)\n", err_mask);
2264854c73a2STejun Heo 			else {
22659f45cbd3SKristen Carlson Accardi 				dev->flags |= ATA_DFLAG_AN;
2266854c73a2STejun Heo 				atapi_an_string = ", ATAPI AN";
2267854c73a2STejun Heo 			}
22689f45cbd3SKristen Carlson Accardi 		}
22699f45cbd3SKristen Carlson Accardi 
2270c6fd2807SJeff Garzik 		if (ata_id_cdb_intr(dev->id)) {
2271c6fd2807SJeff Garzik 			dev->flags |= ATA_DFLAG_CDB_INTR;
2272c6fd2807SJeff Garzik 			cdb_intr_string = ", CDB intr";
2273c6fd2807SJeff Garzik 		}
2274c6fd2807SJeff Garzik 
2275c6fd2807SJeff Garzik 		/* print device info to dmesg */
2276c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2277ef143d57SAlbert Lee 			ata_dev_printk(dev, KERN_INFO,
2278854c73a2STejun Heo 				       "ATAPI: %s, %s, max %s%s%s\n",
2279ef143d57SAlbert Lee 				       modelbuf, fwrevbuf,
2280c6fd2807SJeff Garzik 				       ata_mode_string(xfer_mask),
2281854c73a2STejun Heo 				       cdb_intr_string, atapi_an_string);
2282c6fd2807SJeff Garzik 	}
2283c6fd2807SJeff Garzik 
2284914ed354STejun Heo 	/* determine max_sectors */
2285914ed354STejun Heo 	dev->max_sectors = ATA_MAX_SECTORS;
2286914ed354STejun Heo 	if (dev->flags & ATA_DFLAG_LBA48)
2287914ed354STejun Heo 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2288914ed354STejun Heo 
2289ca77329fSKristen Carlson Accardi 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2290ca77329fSKristen Carlson Accardi 		if (ata_id_has_hipm(dev->id))
2291ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_HIPM;
2292ca77329fSKristen Carlson Accardi 		if (ata_id_has_dipm(dev->id))
2293ca77329fSKristen Carlson Accardi 			dev->flags |= ATA_DFLAG_DIPM;
2294ca77329fSKristen Carlson Accardi 	}
2295ca77329fSKristen Carlson Accardi 
229693590859SAlan Cox 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
229793590859SAlan Cox 		/* Let the user know. We don't want to disallow opens for
229893590859SAlan Cox 		   rescue purposes, or in case the vendor is just a blithering
229993590859SAlan Cox 		   idiot */
230093590859SAlan Cox 		if (print_info) {
230193590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
230293590859SAlan Cox "Drive reports diagnostics failure. This may indicate a drive\n");
230393590859SAlan Cox 			ata_dev_printk(dev, KERN_WARNING,
230493590859SAlan Cox "fault or invalid emulation. Contact drive vendor for information.\n");
230593590859SAlan Cox 		}
230693590859SAlan Cox 	}
230793590859SAlan Cox 
2308c6fd2807SJeff Garzik 	/* limit bridge transfers to udma5, 200 sectors */
2309c6fd2807SJeff Garzik 	if (ata_dev_knobble(dev)) {
2310c6fd2807SJeff Garzik 		if (ata_msg_drv(ap) && print_info)
2311c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_INFO,
2312c6fd2807SJeff Garzik 				       "applying bridge limits\n");
2313c6fd2807SJeff Garzik 		dev->udma_mask &= ATA_UDMA5;
2314c6fd2807SJeff Garzik 		dev->max_sectors = ATA_MAX_SECTORS;
2315c6fd2807SJeff Garzik 	}
2316c6fd2807SJeff Garzik 
2317f8d8e579STony Battersby 	if ((dev->class == ATA_DEV_ATAPI) &&
2318f442cd86SAlbert Lee 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2319f8d8e579STony Battersby 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2320f442cd86SAlbert Lee 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2321f442cd86SAlbert Lee 	}
2322f8d8e579STony Battersby 
232375683fe7STejun Heo 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
232403ec52deSTejun Heo 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
232503ec52deSTejun Heo 					 dev->max_sectors);
232618d6e9d5SAlbert Lee 
2327ca77329fSKristen Carlson Accardi 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2328ca77329fSKristen Carlson Accardi 		dev->horkage |= ATA_HORKAGE_IPM;
2329ca77329fSKristen Carlson Accardi 
2330ca77329fSKristen Carlson Accardi 		/* reset link pm_policy for this port to no pm */
2331ca77329fSKristen Carlson Accardi 		ap->pm_policy = MAX_PERFORMANCE;
2332ca77329fSKristen Carlson Accardi 	}
2333ca77329fSKristen Carlson Accardi 
2334c6fd2807SJeff Garzik 	if (ap->ops->dev_config)
2335cd0d3bbcSAlan 		ap->ops->dev_config(dev);
2336c6fd2807SJeff Garzik 
2337c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2338c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2339c6fd2807SJeff Garzik 			__FUNCTION__, ata_chk_status(ap));
2340c6fd2807SJeff Garzik 	return 0;
2341c6fd2807SJeff Garzik 
2342c6fd2807SJeff Garzik err_out_nosup:
2343c6fd2807SJeff Garzik 	if (ata_msg_probe(ap))
2344c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_DEBUG,
2345c6fd2807SJeff Garzik 			       "%s: EXIT, err\n", __FUNCTION__);
2346c6fd2807SJeff Garzik 	return rc;
2347c6fd2807SJeff Garzik }
2348c6fd2807SJeff Garzik 
2349c6fd2807SJeff Garzik /**
23502e41e8e6SAlan Cox  *	ata_cable_40wire	-	return 40 wire cable type
2351be0d18dfSAlan Cox  *	@ap: port
2352be0d18dfSAlan Cox  *
23532e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 40 wire cable
2354be0d18dfSAlan Cox  *	detection.
2355be0d18dfSAlan Cox  */
2356be0d18dfSAlan Cox 
2357be0d18dfSAlan Cox int ata_cable_40wire(struct ata_port *ap)
2358be0d18dfSAlan Cox {
2359be0d18dfSAlan Cox 	return ATA_CBL_PATA40;
2360be0d18dfSAlan Cox }
2361be0d18dfSAlan Cox 
2362be0d18dfSAlan Cox /**
23632e41e8e6SAlan Cox  *	ata_cable_80wire	-	return 80 wire cable type
2364be0d18dfSAlan Cox  *	@ap: port
2365be0d18dfSAlan Cox  *
23662e41e8e6SAlan Cox  *	Helper method for drivers which want to hardwire 80 wire cable
2367be0d18dfSAlan Cox  *	detection.
2368be0d18dfSAlan Cox  */
2369be0d18dfSAlan Cox 
2370be0d18dfSAlan Cox int ata_cable_80wire(struct ata_port *ap)
2371be0d18dfSAlan Cox {
2372be0d18dfSAlan Cox 	return ATA_CBL_PATA80;
2373be0d18dfSAlan Cox }
2374be0d18dfSAlan Cox 
2375be0d18dfSAlan Cox /**
2376be0d18dfSAlan Cox  *	ata_cable_unknown	-	return unknown PATA cable.
2377be0d18dfSAlan Cox  *	@ap: port
2378be0d18dfSAlan Cox  *
2379be0d18dfSAlan Cox  *	Helper method for drivers which have no PATA cable detection.
2380be0d18dfSAlan Cox  */
2381be0d18dfSAlan Cox 
2382be0d18dfSAlan Cox int ata_cable_unknown(struct ata_port *ap)
2383be0d18dfSAlan Cox {
2384be0d18dfSAlan Cox 	return ATA_CBL_PATA_UNK;
2385be0d18dfSAlan Cox }
2386be0d18dfSAlan Cox 
2387be0d18dfSAlan Cox /**
2388be0d18dfSAlan Cox  *	ata_cable_sata	-	return SATA cable type
2389be0d18dfSAlan Cox  *	@ap: port
2390be0d18dfSAlan Cox  *
2391be0d18dfSAlan Cox  *	Helper method for drivers which have SATA cables
2392be0d18dfSAlan Cox  */
2393be0d18dfSAlan Cox 
2394be0d18dfSAlan Cox int ata_cable_sata(struct ata_port *ap)
2395be0d18dfSAlan Cox {
2396be0d18dfSAlan Cox 	return ATA_CBL_SATA;
2397be0d18dfSAlan Cox }
2398be0d18dfSAlan Cox 
2399be0d18dfSAlan Cox /**
2400c6fd2807SJeff Garzik  *	ata_bus_probe - Reset and probe ATA bus
2401c6fd2807SJeff Garzik  *	@ap: Bus to probe
2402c6fd2807SJeff Garzik  *
2403c6fd2807SJeff Garzik  *	Master ATA bus probing function.  Initiates a hardware-dependent
2404c6fd2807SJeff Garzik  *	bus reset, then attempts to identify any devices found on
2405c6fd2807SJeff Garzik  *	the bus.
2406c6fd2807SJeff Garzik  *
2407c6fd2807SJeff Garzik  *	LOCKING:
2408c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
2409c6fd2807SJeff Garzik  *
2410c6fd2807SJeff Garzik  *	RETURNS:
2411c6fd2807SJeff Garzik  *	Zero on success, negative errno otherwise.
2412c6fd2807SJeff Garzik  */
2413c6fd2807SJeff Garzik 
2414c6fd2807SJeff Garzik int ata_bus_probe(struct ata_port *ap)
2415c6fd2807SJeff Garzik {
2416c6fd2807SJeff Garzik 	unsigned int classes[ATA_MAX_DEVICES];
2417c6fd2807SJeff Garzik 	int tries[ATA_MAX_DEVICES];
2418f58229f8STejun Heo 	int rc;
2419c6fd2807SJeff Garzik 	struct ata_device *dev;
2420c6fd2807SJeff Garzik 
2421c6fd2807SJeff Garzik 	ata_port_probe(ap);
2422c6fd2807SJeff Garzik 
2423f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2424f58229f8STejun Heo 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2425c6fd2807SJeff Garzik 
2426c6fd2807SJeff Garzik  retry:
2427cdeab114STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2428cdeab114STejun Heo 		/* If we issue an SRST then an ATA drive (not ATAPI)
2429cdeab114STejun Heo 		 * may change configuration and be in PIO0 timing. If
2430cdeab114STejun Heo 		 * we do a hard reset (or are coming from power on)
2431cdeab114STejun Heo 		 * this is true for ATA or ATAPI. Until we've set a
2432cdeab114STejun Heo 		 * suitable controller mode we should not touch the
2433cdeab114STejun Heo 		 * bus as we may be talking too fast.
2434cdeab114STejun Heo 		 */
2435cdeab114STejun Heo 		dev->pio_mode = XFER_PIO_0;
2436cdeab114STejun Heo 
2437cdeab114STejun Heo 		/* If the controller has a pio mode setup function
2438cdeab114STejun Heo 		 * then use it to set the chipset to rights. Don't
2439cdeab114STejun Heo 		 * touch the DMA setup as that will be dealt with when
2440cdeab114STejun Heo 		 * configuring devices.
2441cdeab114STejun Heo 		 */
2442cdeab114STejun Heo 		if (ap->ops->set_piomode)
2443cdeab114STejun Heo 			ap->ops->set_piomode(ap, dev);
2444cdeab114STejun Heo 	}
2445cdeab114STejun Heo 
2446c6fd2807SJeff Garzik 	/* reset and determine device classes */
2447c6fd2807SJeff Garzik 	ap->ops->phy_reset(ap);
2448c6fd2807SJeff Garzik 
2449f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2450c6fd2807SJeff Garzik 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2451c6fd2807SJeff Garzik 		    dev->class != ATA_DEV_UNKNOWN)
2452c6fd2807SJeff Garzik 			classes[dev->devno] = dev->class;
2453c6fd2807SJeff Garzik 		else
2454c6fd2807SJeff Garzik 			classes[dev->devno] = ATA_DEV_NONE;
2455c6fd2807SJeff Garzik 
2456c6fd2807SJeff Garzik 		dev->class = ATA_DEV_UNKNOWN;
2457c6fd2807SJeff Garzik 	}
2458c6fd2807SJeff Garzik 
2459c6fd2807SJeff Garzik 	ata_port_probe(ap);
2460c6fd2807SJeff Garzik 
2461f31f0cc2SJeff Garzik 	/* read IDENTIFY page and configure devices. We have to do the identify
2462f31f0cc2SJeff Garzik 	   specific sequence bass-ackwards so that PDIAG- is released by
2463f31f0cc2SJeff Garzik 	   the slave device */
2464f31f0cc2SJeff Garzik 
2465f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2466f58229f8STejun Heo 		if (tries[dev->devno])
2467f58229f8STejun Heo 			dev->class = classes[dev->devno];
2468c6fd2807SJeff Garzik 
2469c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
2470c6fd2807SJeff Garzik 			continue;
2471c6fd2807SJeff Garzik 
2472bff04647STejun Heo 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2473bff04647STejun Heo 				     dev->id);
2474c6fd2807SJeff Garzik 		if (rc)
2475c6fd2807SJeff Garzik 			goto fail;
2476f31f0cc2SJeff Garzik 	}
2477f31f0cc2SJeff Garzik 
2478be0d18dfSAlan Cox 	/* Now ask for the cable type as PDIAG- should have been released */
2479be0d18dfSAlan Cox 	if (ap->ops->cable_detect)
2480be0d18dfSAlan Cox 		ap->cbl = ap->ops->cable_detect(ap);
2481be0d18dfSAlan Cox 
2482614fe29bSAlan Cox 	/* We may have SATA bridge glue hiding here irrespective of the
2483614fe29bSAlan Cox 	   reported cable types and sensed types */
2484614fe29bSAlan Cox 	ata_link_for_each_dev(dev, &ap->link) {
2485614fe29bSAlan Cox 		if (!ata_dev_enabled(dev))
2486614fe29bSAlan Cox 			continue;
2487614fe29bSAlan Cox 		/* SATA drives indicate we have a bridge. We don't know which
2488614fe29bSAlan Cox 		   end of the link the bridge is which is a problem */
2489614fe29bSAlan Cox 		if (ata_id_is_sata(dev->id))
2490614fe29bSAlan Cox 			ap->cbl = ATA_CBL_SATA;
2491614fe29bSAlan Cox 	}
2492614fe29bSAlan Cox 
2493f31f0cc2SJeff Garzik 	/* After the identify sequence we can now set up the devices. We do
2494f31f0cc2SJeff Garzik 	   this in the normal order so that the user doesn't get confused */
2495f31f0cc2SJeff Garzik 
2496f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link) {
2497f31f0cc2SJeff Garzik 		if (!ata_dev_enabled(dev))
2498f31f0cc2SJeff Garzik 			continue;
2499c6fd2807SJeff Garzik 
25009af5c9c9STejun Heo 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2501efdaedc4STejun Heo 		rc = ata_dev_configure(dev);
25029af5c9c9STejun Heo 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2503c6fd2807SJeff Garzik 		if (rc)
2504c6fd2807SJeff Garzik 			goto fail;
2505c6fd2807SJeff Garzik 	}
2506c6fd2807SJeff Garzik 
2507c6fd2807SJeff Garzik 	/* configure transfer mode */
25080260731fSTejun Heo 	rc = ata_set_mode(&ap->link, &dev);
25094ae72a1eSTejun Heo 	if (rc)
2510c6fd2807SJeff Garzik 		goto fail;
2511c6fd2807SJeff Garzik 
2512f58229f8STejun Heo 	ata_link_for_each_dev(dev, &ap->link)
2513f58229f8STejun Heo 		if (ata_dev_enabled(dev))
2514c6fd2807SJeff Garzik 			return 0;
2515c6fd2807SJeff Garzik 
2516c6fd2807SJeff Garzik 	/* no device present, disable port */
2517c6fd2807SJeff Garzik 	ata_port_disable(ap);
2518c6fd2807SJeff Garzik 	return -ENODEV;
2519c6fd2807SJeff Garzik 
2520c6fd2807SJeff Garzik  fail:
25214ae72a1eSTejun Heo 	tries[dev->devno]--;
25224ae72a1eSTejun Heo 
2523c6fd2807SJeff Garzik 	switch (rc) {
2524c6fd2807SJeff Garzik 	case -EINVAL:
25254ae72a1eSTejun Heo 		/* eeek, something went very wrong, give up */
2526c6fd2807SJeff Garzik 		tries[dev->devno] = 0;
2527c6fd2807SJeff Garzik 		break;
25284ae72a1eSTejun Heo 
25294ae72a1eSTejun Heo 	case -ENODEV:
25304ae72a1eSTejun Heo 		/* give it just one more chance */
25314ae72a1eSTejun Heo 		tries[dev->devno] = min(tries[dev->devno], 1);
2532c6fd2807SJeff Garzik 	case -EIO:
25334ae72a1eSTejun Heo 		if (tries[dev->devno] == 1) {
25344ae72a1eSTejun Heo 			/* This is the last chance, better to slow
25354ae72a1eSTejun Heo 			 * down than lose it.
25364ae72a1eSTejun Heo 			 */
2537936fd732STejun Heo 			sata_down_spd_limit(&ap->link);
25384ae72a1eSTejun Heo 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
25394ae72a1eSTejun Heo 		}
2540c6fd2807SJeff Garzik 	}
2541c6fd2807SJeff Garzik 
25424ae72a1eSTejun Heo 	if (!tries[dev->devno])
2543c6fd2807SJeff Garzik 		ata_dev_disable(dev);
2544c6fd2807SJeff Garzik 
2545c6fd2807SJeff Garzik 	goto retry;
2546c6fd2807SJeff Garzik }
2547c6fd2807SJeff Garzik 
2548c6fd2807SJeff Garzik /**
2549c6fd2807SJeff Garzik  *	ata_port_probe - Mark port as enabled
2550c6fd2807SJeff Garzik  *	@ap: Port for which we indicate enablement
2551c6fd2807SJeff Garzik  *
2552c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2553c6fd2807SJeff Garzik  *	thinks that the entire port is enabled.
2554c6fd2807SJeff Garzik  *
2555cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2556c6fd2807SJeff Garzik  *	serialization.
2557c6fd2807SJeff Garzik  */
2558c6fd2807SJeff Garzik 
2559c6fd2807SJeff Garzik void ata_port_probe(struct ata_port *ap)
2560c6fd2807SJeff Garzik {
2561c6fd2807SJeff Garzik 	ap->flags &= ~ATA_FLAG_DISABLED;
2562c6fd2807SJeff Garzik }
2563c6fd2807SJeff Garzik 
2564c6fd2807SJeff Garzik /**
2565c6fd2807SJeff Garzik  *	sata_print_link_status - Print SATA link status
2566936fd732STejun Heo  *	@link: SATA link to printk link status about
2567c6fd2807SJeff Garzik  *
2568c6fd2807SJeff Garzik  *	This function prints link speed and status of a SATA link.
2569c6fd2807SJeff Garzik  *
2570c6fd2807SJeff Garzik  *	LOCKING:
2571c6fd2807SJeff Garzik  *	None.
2572c6fd2807SJeff Garzik  */
2573936fd732STejun Heo void sata_print_link_status(struct ata_link *link)
2574c6fd2807SJeff Garzik {
2575c6fd2807SJeff Garzik 	u32 sstatus, scontrol, tmp;
2576c6fd2807SJeff Garzik 
2577936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2578c6fd2807SJeff Garzik 		return;
2579936fd732STejun Heo 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2580c6fd2807SJeff Garzik 
2581936fd732STejun Heo 	if (ata_link_online(link)) {
2582c6fd2807SJeff Garzik 		tmp = (sstatus >> 4) & 0xf;
2583936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2584c6fd2807SJeff Garzik 				"SATA link up %s (SStatus %X SControl %X)\n",
2585c6fd2807SJeff Garzik 				sata_spd_string(tmp), sstatus, scontrol);
2586c6fd2807SJeff Garzik 	} else {
2587936fd732STejun Heo 		ata_link_printk(link, KERN_INFO,
2588c6fd2807SJeff Garzik 				"SATA link down (SStatus %X SControl %X)\n",
2589c6fd2807SJeff Garzik 				sstatus, scontrol);
2590c6fd2807SJeff Garzik 	}
2591c6fd2807SJeff Garzik }
2592c6fd2807SJeff Garzik 
2593c6fd2807SJeff Garzik /**
2594c6fd2807SJeff Garzik  *	ata_dev_pair		-	return other device on cable
2595c6fd2807SJeff Garzik  *	@adev: device
2596c6fd2807SJeff Garzik  *
2597c6fd2807SJeff Garzik  *	Obtain the other device on the same cable, or if none is
2598c6fd2807SJeff Garzik  *	present NULL is returned
2599c6fd2807SJeff Garzik  */
2600c6fd2807SJeff Garzik 
2601c6fd2807SJeff Garzik struct ata_device *ata_dev_pair(struct ata_device *adev)
2602c6fd2807SJeff Garzik {
26039af5c9c9STejun Heo 	struct ata_link *link = adev->link;
26049af5c9c9STejun Heo 	struct ata_device *pair = &link->device[1 - adev->devno];
2605c6fd2807SJeff Garzik 	if (!ata_dev_enabled(pair))
2606c6fd2807SJeff Garzik 		return NULL;
2607c6fd2807SJeff Garzik 	return pair;
2608c6fd2807SJeff Garzik }
2609c6fd2807SJeff Garzik 
2610c6fd2807SJeff Garzik /**
2611c6fd2807SJeff Garzik  *	ata_port_disable - Disable port.
2612c6fd2807SJeff Garzik  *	@ap: Port to be disabled.
2613c6fd2807SJeff Garzik  *
2614c6fd2807SJeff Garzik  *	Modify @ap data structure such that the system
2615c6fd2807SJeff Garzik  *	thinks that the entire port is disabled, and should
2616c6fd2807SJeff Garzik  *	never attempt to probe or communicate with devices
2617c6fd2807SJeff Garzik  *	on this port.
2618c6fd2807SJeff Garzik  *
2619cca3974eSJeff Garzik  *	LOCKING: host lock, or some other form of
2620c6fd2807SJeff Garzik  *	serialization.
2621c6fd2807SJeff Garzik  */
2622c6fd2807SJeff Garzik 
2623c6fd2807SJeff Garzik void ata_port_disable(struct ata_port *ap)
2624c6fd2807SJeff Garzik {
26259af5c9c9STejun Heo 	ap->link.device[0].class = ATA_DEV_NONE;
26269af5c9c9STejun Heo 	ap->link.device[1].class = ATA_DEV_NONE;
2627c6fd2807SJeff Garzik 	ap->flags |= ATA_FLAG_DISABLED;
2628c6fd2807SJeff Garzik }
2629c6fd2807SJeff Garzik 
2630c6fd2807SJeff Garzik /**
2631c6fd2807SJeff Garzik  *	sata_down_spd_limit - adjust SATA spd limit downward
2632936fd732STejun Heo  *	@link: Link to adjust SATA spd limit for
2633c6fd2807SJeff Garzik  *
2634936fd732STejun Heo  *	Adjust SATA spd limit of @link downward.  Note that this
2635c6fd2807SJeff Garzik  *	function only adjusts the limit.  The change must be applied
2636c6fd2807SJeff Garzik  *	using sata_set_spd().
2637c6fd2807SJeff Garzik  *
2638c6fd2807SJeff Garzik  *	LOCKING:
2639c6fd2807SJeff Garzik  *	Inherited from caller.
2640c6fd2807SJeff Garzik  *
2641c6fd2807SJeff Garzik  *	RETURNS:
2642c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2643c6fd2807SJeff Garzik  */
2644936fd732STejun Heo int sata_down_spd_limit(struct ata_link *link)
2645c6fd2807SJeff Garzik {
2646c6fd2807SJeff Garzik 	u32 sstatus, spd, mask;
2647c6fd2807SJeff Garzik 	int rc, highbit;
2648c6fd2807SJeff Garzik 
2649936fd732STejun Heo 	if (!sata_scr_valid(link))
2650008a7896STejun Heo 		return -EOPNOTSUPP;
2651008a7896STejun Heo 
2652008a7896STejun Heo 	/* If SCR can be read, use it to determine the current SPD.
2653936fd732STejun Heo 	 * If not, use cached value in link->sata_spd.
2654008a7896STejun Heo 	 */
2655936fd732STejun Heo 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2656008a7896STejun Heo 	if (rc == 0)
2657008a7896STejun Heo 		spd = (sstatus >> 4) & 0xf;
2658008a7896STejun Heo 	else
2659936fd732STejun Heo 		spd = link->sata_spd;
2660c6fd2807SJeff Garzik 
2661936fd732STejun Heo 	mask = link->sata_spd_limit;
2662c6fd2807SJeff Garzik 	if (mask <= 1)
2663c6fd2807SJeff Garzik 		return -EINVAL;
2664008a7896STejun Heo 
2665008a7896STejun Heo 	/* unconditionally mask off the highest bit */
2666c6fd2807SJeff Garzik 	highbit = fls(mask) - 1;
2667c6fd2807SJeff Garzik 	mask &= ~(1 << highbit);
2668c6fd2807SJeff Garzik 
2669008a7896STejun Heo 	/* Mask off all speeds higher than or equal to the current
2670008a7896STejun Heo 	 * one.  Force 1.5Gbps if current SPD is not available.
2671008a7896STejun Heo 	 */
2672008a7896STejun Heo 	if (spd > 1)
2673008a7896STejun Heo 		mask &= (1 << (spd - 1)) - 1;
2674008a7896STejun Heo 	else
2675008a7896STejun Heo 		mask &= 1;
2676008a7896STejun Heo 
2677008a7896STejun Heo 	/* were we already at the bottom? */
2678c6fd2807SJeff Garzik 	if (!mask)
2679c6fd2807SJeff Garzik 		return -EINVAL;
2680c6fd2807SJeff Garzik 
2681936fd732STejun Heo 	link->sata_spd_limit = mask;
2682c6fd2807SJeff Garzik 
2683936fd732STejun Heo 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2684c6fd2807SJeff Garzik 			sata_spd_string(fls(mask)));
2685c6fd2807SJeff Garzik 
2686c6fd2807SJeff Garzik 	return 0;
2687c6fd2807SJeff Garzik }
2688c6fd2807SJeff Garzik 
2689936fd732STejun Heo static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2690c6fd2807SJeff Garzik {
26915270222fSTejun Heo 	struct ata_link *host_link = &link->ap->link;
26925270222fSTejun Heo 	u32 limit, target, spd;
2693c6fd2807SJeff Garzik 
26945270222fSTejun Heo 	limit = link->sata_spd_limit;
26955270222fSTejun Heo 
26965270222fSTejun Heo 	/* Don't configure downstream link faster than upstream link.
26975270222fSTejun Heo 	 * It doesn't speed up anything and some PMPs choke on such
26985270222fSTejun Heo 	 * configuration.
26995270222fSTejun Heo 	 */
27005270222fSTejun Heo 	if (!ata_is_host_link(link) && host_link->sata_spd)
27015270222fSTejun Heo 		limit &= (1 << host_link->sata_spd) - 1;
27025270222fSTejun Heo 
27035270222fSTejun Heo 	if (limit == UINT_MAX)
27045270222fSTejun Heo 		target = 0;
2705c6fd2807SJeff Garzik 	else
27065270222fSTejun Heo 		target = fls(limit);
2707c6fd2807SJeff Garzik 
2708c6fd2807SJeff Garzik 	spd = (*scontrol >> 4) & 0xf;
27095270222fSTejun Heo 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2710c6fd2807SJeff Garzik 
27115270222fSTejun Heo 	return spd != target;
2712c6fd2807SJeff Garzik }
2713c6fd2807SJeff Garzik 
2714c6fd2807SJeff Garzik /**
2715c6fd2807SJeff Garzik  *	sata_set_spd_needed - is SATA spd configuration needed
2716936fd732STejun Heo  *	@link: Link in question
2717c6fd2807SJeff Garzik  *
2718c6fd2807SJeff Garzik  *	Test whether the spd limit in SControl matches
2719936fd732STejun Heo  *	@link->sata_spd_limit.  This function is used to determine
2720c6fd2807SJeff Garzik  *	whether hardreset is necessary to apply SATA spd
2721c6fd2807SJeff Garzik  *	configuration.
2722c6fd2807SJeff Garzik  *
2723c6fd2807SJeff Garzik  *	LOCKING:
2724c6fd2807SJeff Garzik  *	Inherited from caller.
2725c6fd2807SJeff Garzik  *
2726c6fd2807SJeff Garzik  *	RETURNS:
2727c6fd2807SJeff Garzik  *	1 if SATA spd configuration is needed, 0 otherwise.
2728c6fd2807SJeff Garzik  */
2729936fd732STejun Heo int sata_set_spd_needed(struct ata_link *link)
2730c6fd2807SJeff Garzik {
2731c6fd2807SJeff Garzik 	u32 scontrol;
2732c6fd2807SJeff Garzik 
2733936fd732STejun Heo 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2734db64bcf3STejun Heo 		return 1;
2735c6fd2807SJeff Garzik 
2736936fd732STejun Heo 	return __sata_set_spd_needed(link, &scontrol);
2737c6fd2807SJeff Garzik }
2738c6fd2807SJeff Garzik 
2739c6fd2807SJeff Garzik /**
2740c6fd2807SJeff Garzik  *	sata_set_spd - set SATA spd according to spd limit
2741936fd732STejun Heo  *	@link: Link to set SATA spd for
2742c6fd2807SJeff Garzik  *
2743936fd732STejun Heo  *	Set SATA spd of @link according to sata_spd_limit.
2744c6fd2807SJeff Garzik  *
2745c6fd2807SJeff Garzik  *	LOCKING:
2746c6fd2807SJeff Garzik  *	Inherited from caller.
2747c6fd2807SJeff Garzik  *
2748c6fd2807SJeff Garzik  *	RETURNS:
2749c6fd2807SJeff Garzik  *	0 if spd doesn't need to be changed, 1 if spd has been
2750c6fd2807SJeff Garzik  *	changed.  Negative errno if SCR registers are inaccessible.
2751c6fd2807SJeff Garzik  */
2752936fd732STejun Heo int sata_set_spd(struct ata_link *link)
2753c6fd2807SJeff Garzik {
2754c6fd2807SJeff Garzik 	u32 scontrol;
2755c6fd2807SJeff Garzik 	int rc;
2756c6fd2807SJeff Garzik 
2757936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2758c6fd2807SJeff Garzik 		return rc;
2759c6fd2807SJeff Garzik 
2760936fd732STejun Heo 	if (!__sata_set_spd_needed(link, &scontrol))
2761c6fd2807SJeff Garzik 		return 0;
2762c6fd2807SJeff Garzik 
2763936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2764c6fd2807SJeff Garzik 		return rc;
2765c6fd2807SJeff Garzik 
2766c6fd2807SJeff Garzik 	return 1;
2767c6fd2807SJeff Garzik }
2768c6fd2807SJeff Garzik 
2769c6fd2807SJeff Garzik /*
2770c6fd2807SJeff Garzik  * This mode timing computation functionality is ported over from
2771c6fd2807SJeff Garzik  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2772c6fd2807SJeff Garzik  */
2773c6fd2807SJeff Garzik /*
2774b352e57dSAlan Cox  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2775c6fd2807SJeff Garzik  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2776b352e57dSAlan Cox  * for UDMA6, which is currently supported only by Maxtor drives.
2777b352e57dSAlan Cox  *
2778b352e57dSAlan Cox  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2779c6fd2807SJeff Garzik  */
2780c6fd2807SJeff Garzik 
2781c6fd2807SJeff Garzik static const struct ata_timing ata_timing[] = {
2782c6fd2807SJeff Garzik 
2783c6fd2807SJeff Garzik 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2784c6fd2807SJeff Garzik 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2785c6fd2807SJeff Garzik 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2786c6fd2807SJeff Garzik 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2787c6fd2807SJeff Garzik 
2788b352e57dSAlan Cox 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2789b352e57dSAlan Cox 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2790c6fd2807SJeff Garzik 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2791c6fd2807SJeff Garzik 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2792c6fd2807SJeff Garzik 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2793c6fd2807SJeff Garzik 
2794c6fd2807SJeff Garzik /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2795c6fd2807SJeff Garzik 
2796c6fd2807SJeff Garzik 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2797c6fd2807SJeff Garzik 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2798c6fd2807SJeff Garzik 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2799c6fd2807SJeff Garzik 
2800c6fd2807SJeff Garzik 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2801c6fd2807SJeff Garzik 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2802c6fd2807SJeff Garzik 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2803c6fd2807SJeff Garzik 
2804b352e57dSAlan Cox 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2805b352e57dSAlan Cox 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2806c6fd2807SJeff Garzik 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2807c6fd2807SJeff Garzik 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2808c6fd2807SJeff Garzik 
2809c6fd2807SJeff Garzik 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2810c6fd2807SJeff Garzik 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2811c6fd2807SJeff Garzik 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2812c6fd2807SJeff Garzik 
2813c6fd2807SJeff Garzik /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2814c6fd2807SJeff Garzik 
2815c6fd2807SJeff Garzik 	{ 0xFF }
2816c6fd2807SJeff Garzik };
2817c6fd2807SJeff Garzik 
2818c6fd2807SJeff Garzik #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2819c6fd2807SJeff Garzik #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2820c6fd2807SJeff Garzik 
2821c6fd2807SJeff Garzik static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2822c6fd2807SJeff Garzik {
2823c6fd2807SJeff Garzik 	q->setup   = EZ(t->setup   * 1000,  T);
2824c6fd2807SJeff Garzik 	q->act8b   = EZ(t->act8b   * 1000,  T);
2825c6fd2807SJeff Garzik 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2826c6fd2807SJeff Garzik 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2827c6fd2807SJeff Garzik 	q->active  = EZ(t->active  * 1000,  T);
2828c6fd2807SJeff Garzik 	q->recover = EZ(t->recover * 1000,  T);
2829c6fd2807SJeff Garzik 	q->cycle   = EZ(t->cycle   * 1000,  T);
2830c6fd2807SJeff Garzik 	q->udma    = EZ(t->udma    * 1000, UT);
2831c6fd2807SJeff Garzik }
2832c6fd2807SJeff Garzik 
2833c6fd2807SJeff Garzik void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2834c6fd2807SJeff Garzik 		      struct ata_timing *m, unsigned int what)
2835c6fd2807SJeff Garzik {
2836c6fd2807SJeff Garzik 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2837c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2838c6fd2807SJeff Garzik 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2839c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2840c6fd2807SJeff Garzik 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2841c6fd2807SJeff Garzik 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2842c6fd2807SJeff Garzik 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2843c6fd2807SJeff Garzik 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2844c6fd2807SJeff Garzik }
2845c6fd2807SJeff Garzik 
2846c6fd2807SJeff Garzik static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
2847c6fd2807SJeff Garzik {
2848c6fd2807SJeff Garzik 	const struct ata_timing *t;
2849c6fd2807SJeff Garzik 
2850c6fd2807SJeff Garzik 	for (t = ata_timing; t->mode != speed; t++)
2851c6fd2807SJeff Garzik 		if (t->mode == 0xFF)
2852c6fd2807SJeff Garzik 			return NULL;
2853c6fd2807SJeff Garzik 	return t;
2854c6fd2807SJeff Garzik }
2855c6fd2807SJeff Garzik 
2856c6fd2807SJeff Garzik int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2857c6fd2807SJeff Garzik 		       struct ata_timing *t, int T, int UT)
2858c6fd2807SJeff Garzik {
2859c6fd2807SJeff Garzik 	const struct ata_timing *s;
2860c6fd2807SJeff Garzik 	struct ata_timing p;
2861c6fd2807SJeff Garzik 
2862c6fd2807SJeff Garzik 	/*
2863c6fd2807SJeff Garzik 	 * Find the mode.
2864c6fd2807SJeff Garzik 	 */
2865c6fd2807SJeff Garzik 
2866c6fd2807SJeff Garzik 	if (!(s = ata_timing_find_mode(speed)))
2867c6fd2807SJeff Garzik 		return -EINVAL;
2868c6fd2807SJeff Garzik 
2869c6fd2807SJeff Garzik 	memcpy(t, s, sizeof(*s));
2870c6fd2807SJeff Garzik 
2871c6fd2807SJeff Garzik 	/*
2872c6fd2807SJeff Garzik 	 * If the drive is an EIDE drive, it can tell us it needs extended
2873c6fd2807SJeff Garzik 	 * PIO/MW_DMA cycle timing.
2874c6fd2807SJeff Garzik 	 */
2875c6fd2807SJeff Garzik 
2876c6fd2807SJeff Garzik 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2877c6fd2807SJeff Garzik 		memset(&p, 0, sizeof(p));
2878c6fd2807SJeff Garzik 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2879c6fd2807SJeff Garzik 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2880c6fd2807SJeff Garzik 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2881c6fd2807SJeff Garzik 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2882c6fd2807SJeff Garzik 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2883c6fd2807SJeff Garzik 		}
2884c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2885c6fd2807SJeff Garzik 	}
2886c6fd2807SJeff Garzik 
2887c6fd2807SJeff Garzik 	/*
2888c6fd2807SJeff Garzik 	 * Convert the timing to bus clock counts.
2889c6fd2807SJeff Garzik 	 */
2890c6fd2807SJeff Garzik 
2891c6fd2807SJeff Garzik 	ata_timing_quantize(t, t, T, UT);
2892c6fd2807SJeff Garzik 
2893c6fd2807SJeff Garzik 	/*
2894c6fd2807SJeff Garzik 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2895c6fd2807SJeff Garzik 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2896c6fd2807SJeff Garzik 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2897c6fd2807SJeff Garzik 	 */
2898c6fd2807SJeff Garzik 
2899fd3367afSAlan 	if (speed > XFER_PIO_6) {
2900c6fd2807SJeff Garzik 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2901c6fd2807SJeff Garzik 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2902c6fd2807SJeff Garzik 	}
2903c6fd2807SJeff Garzik 
2904c6fd2807SJeff Garzik 	/*
2905c6fd2807SJeff Garzik 	 * Lengthen active & recovery time so that cycle time is correct.
2906c6fd2807SJeff Garzik 	 */
2907c6fd2807SJeff Garzik 
2908c6fd2807SJeff Garzik 	if (t->act8b + t->rec8b < t->cyc8b) {
2909c6fd2807SJeff Garzik 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2910c6fd2807SJeff Garzik 		t->rec8b = t->cyc8b - t->act8b;
2911c6fd2807SJeff Garzik 	}
2912c6fd2807SJeff Garzik 
2913c6fd2807SJeff Garzik 	if (t->active + t->recover < t->cycle) {
2914c6fd2807SJeff Garzik 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2915c6fd2807SJeff Garzik 		t->recover = t->cycle - t->active;
2916c6fd2807SJeff Garzik 	}
29174f701d1eSAlan Cox 
29184f701d1eSAlan Cox 	/* In a few cases quantisation may produce enough errors to
29194f701d1eSAlan Cox 	   leave t->cycle too low for the sum of active and recovery
29204f701d1eSAlan Cox 	   if so we must correct this */
29214f701d1eSAlan Cox 	if (t->active + t->recover > t->cycle)
29224f701d1eSAlan Cox 		t->cycle = t->active + t->recover;
2923c6fd2807SJeff Garzik 
2924c6fd2807SJeff Garzik 	return 0;
2925c6fd2807SJeff Garzik }
2926c6fd2807SJeff Garzik 
2927c6fd2807SJeff Garzik /**
2928c6fd2807SJeff Garzik  *	ata_down_xfermask_limit - adjust dev xfer masks downward
2929c6fd2807SJeff Garzik  *	@dev: Device to adjust xfer masks
2930458337dbSTejun Heo  *	@sel: ATA_DNXFER_* selector
2931c6fd2807SJeff Garzik  *
2932c6fd2807SJeff Garzik  *	Adjust xfer masks of @dev downward.  Note that this function
2933c6fd2807SJeff Garzik  *	does not apply the change.  Invoking ata_set_mode() afterwards
2934c6fd2807SJeff Garzik  *	will apply the limit.
2935c6fd2807SJeff Garzik  *
2936c6fd2807SJeff Garzik  *	LOCKING:
2937c6fd2807SJeff Garzik  *	Inherited from caller.
2938c6fd2807SJeff Garzik  *
2939c6fd2807SJeff Garzik  *	RETURNS:
2940c6fd2807SJeff Garzik  *	0 on success, negative errno on failure
2941c6fd2807SJeff Garzik  */
2942458337dbSTejun Heo int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2943c6fd2807SJeff Garzik {
2944458337dbSTejun Heo 	char buf[32];
2945458337dbSTejun Heo 	unsigned int orig_mask, xfer_mask;
2946458337dbSTejun Heo 	unsigned int pio_mask, mwdma_mask, udma_mask;
2947458337dbSTejun Heo 	int quiet, highbit;
2948c6fd2807SJeff Garzik 
2949458337dbSTejun Heo 	quiet = !!(sel & ATA_DNXFER_QUIET);
2950458337dbSTejun Heo 	sel &= ~ATA_DNXFER_QUIET;
2951458337dbSTejun Heo 
2952458337dbSTejun Heo 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2953458337dbSTejun Heo 						  dev->mwdma_mask,
2954c6fd2807SJeff Garzik 						  dev->udma_mask);
2955458337dbSTejun Heo 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2956c6fd2807SJeff Garzik 
2957458337dbSTejun Heo 	switch (sel) {
2958458337dbSTejun Heo 	case ATA_DNXFER_PIO:
2959458337dbSTejun Heo 		highbit = fls(pio_mask) - 1;
2960458337dbSTejun Heo 		pio_mask &= ~(1 << highbit);
2961458337dbSTejun Heo 		break;
2962458337dbSTejun Heo 
2963458337dbSTejun Heo 	case ATA_DNXFER_DMA:
2964458337dbSTejun Heo 		if (udma_mask) {
2965458337dbSTejun Heo 			highbit = fls(udma_mask) - 1;
2966458337dbSTejun Heo 			udma_mask &= ~(1 << highbit);
2967458337dbSTejun Heo 			if (!udma_mask)
2968458337dbSTejun Heo 				return -ENOENT;
2969458337dbSTejun Heo 		} else if (mwdma_mask) {
2970458337dbSTejun Heo 			highbit = fls(mwdma_mask) - 1;
2971458337dbSTejun Heo 			mwdma_mask &= ~(1 << highbit);
2972458337dbSTejun Heo 			if (!mwdma_mask)
2973458337dbSTejun Heo 				return -ENOENT;
2974458337dbSTejun Heo 		}
2975458337dbSTejun Heo 		break;
2976458337dbSTejun Heo 
2977458337dbSTejun Heo 	case ATA_DNXFER_40C:
2978458337dbSTejun Heo 		udma_mask &= ATA_UDMA_MASK_40C;
2979458337dbSTejun Heo 		break;
2980458337dbSTejun Heo 
2981458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO0:
2982458337dbSTejun Heo 		pio_mask &= 1;
2983458337dbSTejun Heo 	case ATA_DNXFER_FORCE_PIO:
2984458337dbSTejun Heo 		mwdma_mask = 0;
2985458337dbSTejun Heo 		udma_mask = 0;
2986458337dbSTejun Heo 		break;
2987458337dbSTejun Heo 
2988458337dbSTejun Heo 	default:
2989458337dbSTejun Heo 		BUG();
2990458337dbSTejun Heo 	}
2991458337dbSTejun Heo 
2992458337dbSTejun Heo 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2993458337dbSTejun Heo 
2994458337dbSTejun Heo 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2995458337dbSTejun Heo 		return -ENOENT;
2996458337dbSTejun Heo 
2997458337dbSTejun Heo 	if (!quiet) {
2998458337dbSTejun Heo 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2999458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s:%s",
3000458337dbSTejun Heo 				 ata_mode_string(xfer_mask),
3001458337dbSTejun Heo 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3002458337dbSTejun Heo 		else
3003458337dbSTejun Heo 			snprintf(buf, sizeof(buf), "%s",
3004458337dbSTejun Heo 				 ata_mode_string(xfer_mask));
3005458337dbSTejun Heo 
3006458337dbSTejun Heo 		ata_dev_printk(dev, KERN_WARNING,
3007458337dbSTejun Heo 			       "limiting speed to %s\n", buf);
3008458337dbSTejun Heo 	}
3009c6fd2807SJeff Garzik 
3010c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3011c6fd2807SJeff Garzik 			    &dev->udma_mask);
3012c6fd2807SJeff Garzik 
3013c6fd2807SJeff Garzik 	return 0;
3014c6fd2807SJeff Garzik }
3015c6fd2807SJeff Garzik 
3016c6fd2807SJeff Garzik static int ata_dev_set_mode(struct ata_device *dev)
3017c6fd2807SJeff Garzik {
30189af5c9c9STejun Heo 	struct ata_eh_context *ehc = &dev->link->eh_context;
3019c6fd2807SJeff Garzik 	unsigned int err_mask;
3020c6fd2807SJeff Garzik 	int rc;
3021c6fd2807SJeff Garzik 
3022c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_PIO;
3023c6fd2807SJeff Garzik 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3024c6fd2807SJeff Garzik 		dev->flags |= ATA_DFLAG_PIO;
3025c6fd2807SJeff Garzik 
3026c6fd2807SJeff Garzik 	err_mask = ata_dev_set_xfermode(dev);
30272dcb407eSJeff Garzik 
302811750a40SAlan 	/* Old CFA may refuse this command, which is just fine */
302911750a40SAlan 	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
303011750a40SAlan 		err_mask &= ~AC_ERR_DEV;
30312dcb407eSJeff Garzik 
30320bc2a79aSAlan Cox 	/* Some very old devices and some bad newer ones fail any kind of
30330bc2a79aSAlan Cox 	   SET_XFERMODE request but support PIO0-2 timings and no IORDY */
30340bc2a79aSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
30350bc2a79aSAlan Cox 			dev->pio_mode <= XFER_PIO_2)
30360bc2a79aSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30372dcb407eSJeff Garzik 
30383acaf94bSAlan Cox 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
30393acaf94bSAlan Cox 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
30403acaf94bSAlan Cox 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
30413acaf94bSAlan Cox 	    dev->dma_mode == XFER_MW_DMA_0 &&
30423acaf94bSAlan Cox 	    (dev->id[63] >> 8) & 1)
30433acaf94bSAlan Cox 		err_mask &= ~AC_ERR_DEV;
30443acaf94bSAlan Cox 
3045c6fd2807SJeff Garzik 	if (err_mask) {
3046c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3047c6fd2807SJeff Garzik 			       "(err_mask=0x%x)\n", err_mask);
3048c6fd2807SJeff Garzik 		return -EIO;
3049c6fd2807SJeff Garzik 	}
3050c6fd2807SJeff Garzik 
3051baa1e78aSTejun Heo 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3052422c9daaSTejun Heo 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3053baa1e78aSTejun Heo 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3054c6fd2807SJeff Garzik 	if (rc)
3055c6fd2807SJeff Garzik 		return rc;
3056c6fd2807SJeff Garzik 
3057c6fd2807SJeff Garzik 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3058c6fd2807SJeff Garzik 		dev->xfer_shift, (int)dev->xfer_mode);
3059c6fd2807SJeff Garzik 
3060c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3061c6fd2807SJeff Garzik 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
3062c6fd2807SJeff Garzik 	return 0;
3063c6fd2807SJeff Garzik }
3064c6fd2807SJeff Garzik 
3065c6fd2807SJeff Garzik /**
306604351821SAlan  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
30670260731fSTejun Heo  *	@link: link on which timings will be programmed
3068c6fd2807SJeff Garzik  *	@r_failed_dev: out paramter for failed device
3069c6fd2807SJeff Garzik  *
307004351821SAlan  *	Standard implementation of the function used to tune and set
307104351821SAlan  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
307204351821SAlan  *	ata_dev_set_mode() fails, pointer to the failing device is
3073c6fd2807SJeff Garzik  *	returned in @r_failed_dev.
3074c6fd2807SJeff Garzik  *
3075c6fd2807SJeff Garzik  *	LOCKING:
3076c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3077c6fd2807SJeff Garzik  *
3078c6fd2807SJeff Garzik  *	RETURNS:
3079c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
3080c6fd2807SJeff Garzik  */
308104351821SAlan 
30820260731fSTejun Heo int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3083c6fd2807SJeff Garzik {
30840260731fSTejun Heo 	struct ata_port *ap = link->ap;
3085c6fd2807SJeff Garzik 	struct ata_device *dev;
3086f58229f8STejun Heo 	int rc = 0, used_dma = 0, found = 0;
3087c6fd2807SJeff Garzik 
3088c6fd2807SJeff Garzik 	/* step 1: calculate xfer_mask */
3089f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3090c6fd2807SJeff Garzik 		unsigned int pio_mask, dma_mask;
3091b3a70601SAlan Cox 		unsigned int mode_mask;
3092c6fd2807SJeff Garzik 
3093c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3094c6fd2807SJeff Garzik 			continue;
3095c6fd2807SJeff Garzik 
3096b3a70601SAlan Cox 		mode_mask = ATA_DMA_MASK_ATA;
3097b3a70601SAlan Cox 		if (dev->class == ATA_DEV_ATAPI)
3098b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_ATAPI;
3099b3a70601SAlan Cox 		else if (ata_id_is_cfa(dev->id))
3100b3a70601SAlan Cox 			mode_mask = ATA_DMA_MASK_CFA;
3101b3a70601SAlan Cox 
3102c6fd2807SJeff Garzik 		ata_dev_xfermask(dev);
3103c6fd2807SJeff Garzik 
3104c6fd2807SJeff Garzik 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3105c6fd2807SJeff Garzik 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3106b3a70601SAlan Cox 
3107b3a70601SAlan Cox 		if (libata_dma_mask & mode_mask)
3108b3a70601SAlan Cox 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3109b3a70601SAlan Cox 		else
3110b3a70601SAlan Cox 			dma_mask = 0;
3111b3a70601SAlan Cox 
3112c6fd2807SJeff Garzik 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3113c6fd2807SJeff Garzik 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3114c6fd2807SJeff Garzik 
3115c6fd2807SJeff Garzik 		found = 1;
3116c6fd2807SJeff Garzik 		if (dev->dma_mode)
3117c6fd2807SJeff Garzik 			used_dma = 1;
3118c6fd2807SJeff Garzik 	}
3119c6fd2807SJeff Garzik 	if (!found)
3120c6fd2807SJeff Garzik 		goto out;
3121c6fd2807SJeff Garzik 
3122c6fd2807SJeff Garzik 	/* step 2: always set host PIO timings */
3123f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3124c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev))
3125c6fd2807SJeff Garzik 			continue;
3126c6fd2807SJeff Garzik 
3127c6fd2807SJeff Garzik 		if (!dev->pio_mode) {
3128c6fd2807SJeff Garzik 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3129c6fd2807SJeff Garzik 			rc = -EINVAL;
3130c6fd2807SJeff Garzik 			goto out;
3131c6fd2807SJeff Garzik 		}
3132c6fd2807SJeff Garzik 
3133c6fd2807SJeff Garzik 		dev->xfer_mode = dev->pio_mode;
3134c6fd2807SJeff Garzik 		dev->xfer_shift = ATA_SHIFT_PIO;
3135c6fd2807SJeff Garzik 		if (ap->ops->set_piomode)
3136c6fd2807SJeff Garzik 			ap->ops->set_piomode(ap, dev);
3137c6fd2807SJeff Garzik 	}
3138c6fd2807SJeff Garzik 
3139c6fd2807SJeff Garzik 	/* step 3: set host DMA timings */
3140f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
3141c6fd2807SJeff Garzik 		if (!ata_dev_enabled(dev) || !dev->dma_mode)
3142c6fd2807SJeff Garzik 			continue;
3143c6fd2807SJeff Garzik 
3144c6fd2807SJeff Garzik 		dev->xfer_mode = dev->dma_mode;
3145c6fd2807SJeff Garzik 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3146c6fd2807SJeff Garzik 		if (ap->ops->set_dmamode)
3147c6fd2807SJeff Garzik 			ap->ops->set_dmamode(ap, dev);
3148c6fd2807SJeff Garzik 	}
3149c6fd2807SJeff Garzik 
3150c6fd2807SJeff Garzik 	/* step 4: update devices' xfer mode */
3151f58229f8STejun Heo 	ata_link_for_each_dev(dev, link) {
315218d90debSAlan 		/* don't update suspended devices' xfer mode */
31539666f400STejun Heo 		if (!ata_dev_enabled(dev))
3154c6fd2807SJeff Garzik 			continue;
3155c6fd2807SJeff Garzik 
3156c6fd2807SJeff Garzik 		rc = ata_dev_set_mode(dev);
3157c6fd2807SJeff Garzik 		if (rc)
3158c6fd2807SJeff Garzik 			goto out;
3159c6fd2807SJeff Garzik 	}
3160c6fd2807SJeff Garzik 
3161c6fd2807SJeff Garzik 	/* Record simplex status. If we selected DMA then the other
3162c6fd2807SJeff Garzik 	 * host channels are not permitted to do so.
3163c6fd2807SJeff Garzik 	 */
3164cca3974eSJeff Garzik 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3165032af1ceSAlan 		ap->host->simplex_claimed = ap;
3166c6fd2807SJeff Garzik 
3167c6fd2807SJeff Garzik  out:
3168c6fd2807SJeff Garzik 	if (rc)
3169c6fd2807SJeff Garzik 		*r_failed_dev = dev;
3170c6fd2807SJeff Garzik 	return rc;
3171c6fd2807SJeff Garzik }
3172c6fd2807SJeff Garzik 
3173c6fd2807SJeff Garzik /**
317404351821SAlan  *	ata_set_mode - Program timings and issue SET FEATURES - XFER
31750260731fSTejun Heo  *	@link: link on which timings will be programmed
317604351821SAlan  *	@r_failed_dev: out paramter for failed device
317704351821SAlan  *
317804351821SAlan  *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
317904351821SAlan  *	ata_set_mode() fails, pointer to the failing device is
318004351821SAlan  *	returned in @r_failed_dev.
318104351821SAlan  *
318204351821SAlan  *	LOCKING:
318304351821SAlan  *	PCI/etc. bus probe sem.
318404351821SAlan  *
318504351821SAlan  *	RETURNS:
318604351821SAlan  *	0 on success, negative errno otherwise
318704351821SAlan  */
31880260731fSTejun Heo int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
318904351821SAlan {
31900260731fSTejun Heo 	struct ata_port *ap = link->ap;
31910260731fSTejun Heo 
319204351821SAlan 	/* has private set_mode? */
319304351821SAlan 	if (ap->ops->set_mode)
31940260731fSTejun Heo 		return ap->ops->set_mode(link, r_failed_dev);
31950260731fSTejun Heo 	return ata_do_set_mode(link, r_failed_dev);
319604351821SAlan }
319704351821SAlan 
319804351821SAlan /**
3199c6fd2807SJeff Garzik  *	ata_tf_to_host - issue ATA taskfile to host controller
3200c6fd2807SJeff Garzik  *	@ap: port to which command is being issued
3201c6fd2807SJeff Garzik  *	@tf: ATA taskfile register set
3202c6fd2807SJeff Garzik  *
3203c6fd2807SJeff Garzik  *	Issues ATA taskfile register set to ATA host controller,
3204c6fd2807SJeff Garzik  *	with proper synchronization with interrupt handler and
3205c6fd2807SJeff Garzik  *	other threads.
3206c6fd2807SJeff Garzik  *
3207c6fd2807SJeff Garzik  *	LOCKING:
3208cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
3209c6fd2807SJeff Garzik  */
3210c6fd2807SJeff Garzik 
3211c6fd2807SJeff Garzik static inline void ata_tf_to_host(struct ata_port *ap,
3212c6fd2807SJeff Garzik 				  const struct ata_taskfile *tf)
3213c6fd2807SJeff Garzik {
3214c6fd2807SJeff Garzik 	ap->ops->tf_load(ap, tf);
3215c6fd2807SJeff Garzik 	ap->ops->exec_command(ap, tf);
3216c6fd2807SJeff Garzik }
3217c6fd2807SJeff Garzik 
3218c6fd2807SJeff Garzik /**
3219c6fd2807SJeff Garzik  *	ata_busy_sleep - sleep until BSY clears, or timeout
3220c6fd2807SJeff Garzik  *	@ap: port containing status register to be polled
3221c6fd2807SJeff Garzik  *	@tmout_pat: impatience timeout
3222c6fd2807SJeff Garzik  *	@tmout: overall timeout
3223c6fd2807SJeff Garzik  *
3224c6fd2807SJeff Garzik  *	Sleep until ATA Status register bit BSY clears,
3225c6fd2807SJeff Garzik  *	or a timeout occurs.
3226c6fd2807SJeff Garzik  *
3227d1adc1bbSTejun Heo  *	LOCKING:
3228d1adc1bbSTejun Heo  *	Kernel thread context (may sleep).
3229d1adc1bbSTejun Heo  *
3230d1adc1bbSTejun Heo  *	RETURNS:
3231d1adc1bbSTejun Heo  *	0 on success, -errno otherwise.
3232c6fd2807SJeff Garzik  */
3233d1adc1bbSTejun Heo int ata_busy_sleep(struct ata_port *ap,
3234c6fd2807SJeff Garzik 		   unsigned long tmout_pat, unsigned long tmout)
3235c6fd2807SJeff Garzik {
3236c6fd2807SJeff Garzik 	unsigned long timer_start, timeout;
3237c6fd2807SJeff Garzik 	u8 status;
3238c6fd2807SJeff Garzik 
3239c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 300);
3240c6fd2807SJeff Garzik 	timer_start = jiffies;
3241c6fd2807SJeff Garzik 	timeout = timer_start + tmout_pat;
3242d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3243d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3244c6fd2807SJeff Garzik 		msleep(50);
3245c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 3);
3246c6fd2807SJeff Garzik 	}
3247c6fd2807SJeff Garzik 
3248d1adc1bbSTejun Heo 	if (status != 0xff && (status & ATA_BUSY))
3249c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING,
325035aa7a43SJeff Garzik 				"port is slow to respond, please be patient "
325135aa7a43SJeff Garzik 				"(Status 0x%x)\n", status);
3252c6fd2807SJeff Garzik 
3253c6fd2807SJeff Garzik 	timeout = timer_start + tmout;
3254d1adc1bbSTejun Heo 	while (status != 0xff && (status & ATA_BUSY) &&
3255d1adc1bbSTejun Heo 	       time_before(jiffies, timeout)) {
3256c6fd2807SJeff Garzik 		msleep(50);
3257c6fd2807SJeff Garzik 		status = ata_chk_status(ap);
3258c6fd2807SJeff Garzik 	}
3259c6fd2807SJeff Garzik 
3260d1adc1bbSTejun Heo 	if (status == 0xff)
3261d1adc1bbSTejun Heo 		return -ENODEV;
3262d1adc1bbSTejun Heo 
3263c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
3264c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "port failed to respond "
326535aa7a43SJeff Garzik 				"(%lu secs, Status 0x%x)\n",
326635aa7a43SJeff Garzik 				tmout / HZ, status);
3267d1adc1bbSTejun Heo 		return -EBUSY;
3268c6fd2807SJeff Garzik 	}
3269c6fd2807SJeff Garzik 
3270c6fd2807SJeff Garzik 	return 0;
3271c6fd2807SJeff Garzik }
3272c6fd2807SJeff Garzik 
3273d4b2bab4STejun Heo /**
327488ff6eafSTejun Heo  *	ata_wait_after_reset - wait before checking status after reset
327588ff6eafSTejun Heo  *	@ap: port containing status register to be polled
327688ff6eafSTejun Heo  *	@deadline: deadline jiffies for the operation
327788ff6eafSTejun Heo  *
327888ff6eafSTejun Heo  *	After reset, we need to pause a while before reading status.
327988ff6eafSTejun Heo  *	Also, certain combination of controller and device report 0xff
328088ff6eafSTejun Heo  *	for some duration (e.g. until SATA PHY is up and running)
328188ff6eafSTejun Heo  *	which is interpreted as empty port in ATA world.  This
328288ff6eafSTejun Heo  *	function also waits for such devices to get out of 0xff
328388ff6eafSTejun Heo  *	status.
328488ff6eafSTejun Heo  *
328588ff6eafSTejun Heo  *	LOCKING:
328688ff6eafSTejun Heo  *	Kernel thread context (may sleep).
328788ff6eafSTejun Heo  */
328888ff6eafSTejun Heo void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
328988ff6eafSTejun Heo {
329088ff6eafSTejun Heo 	unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
329188ff6eafSTejun Heo 
329288ff6eafSTejun Heo 	if (time_before(until, deadline))
329388ff6eafSTejun Heo 		deadline = until;
329488ff6eafSTejun Heo 
329588ff6eafSTejun Heo 	/* Spec mandates ">= 2ms" before checking status.  We wait
329688ff6eafSTejun Heo 	 * 150ms, because that was the magic delay used for ATAPI
329788ff6eafSTejun Heo 	 * devices in Hale Landis's ATADRVR, for the period of time
329888ff6eafSTejun Heo 	 * between when the ATA command register is written, and then
329988ff6eafSTejun Heo 	 * status is checked.  Because waiting for "a while" before
330088ff6eafSTejun Heo 	 * checking status is fine, post SRST, we perform this magic
330188ff6eafSTejun Heo 	 * delay here as well.
330288ff6eafSTejun Heo 	 *
330388ff6eafSTejun Heo 	 * Old drivers/ide uses the 2mS rule and then waits for ready.
330488ff6eafSTejun Heo 	 */
330588ff6eafSTejun Heo 	msleep(150);
330688ff6eafSTejun Heo 
330788ff6eafSTejun Heo 	/* Wait for 0xff to clear.  Some SATA devices take a long time
330888ff6eafSTejun Heo 	 * to clear 0xff after reset.  For example, HHD424020F7SV00
330988ff6eafSTejun Heo 	 * iVDR needs >= 800ms while.  Quantum GoVault needs even more
331088ff6eafSTejun Heo 	 * than that.
33111974e201STejun Heo 	 *
33121974e201STejun Heo 	 * Note that some PATA controllers (pata_ali) explode if
33131974e201STejun Heo 	 * status register is read more than once when there's no
33141974e201STejun Heo 	 * device attached.
331588ff6eafSTejun Heo 	 */
33161974e201STejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
331788ff6eafSTejun Heo 		while (1) {
331888ff6eafSTejun Heo 			u8 status = ata_chk_status(ap);
331988ff6eafSTejun Heo 
332088ff6eafSTejun Heo 			if (status != 0xff || time_after(jiffies, deadline))
332188ff6eafSTejun Heo 				return;
332288ff6eafSTejun Heo 
332388ff6eafSTejun Heo 			msleep(50);
332488ff6eafSTejun Heo 		}
332588ff6eafSTejun Heo 	}
33261974e201STejun Heo }
332788ff6eafSTejun Heo 
332888ff6eafSTejun Heo /**
3329d4b2bab4STejun Heo  *	ata_wait_ready - sleep until BSY clears, or timeout
3330d4b2bab4STejun Heo  *	@ap: port containing status register to be polled
3331d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3332d4b2bab4STejun Heo  *
3333d4b2bab4STejun Heo  *	Sleep until ATA Status register bit BSY clears, or timeout
3334d4b2bab4STejun Heo  *	occurs.
3335d4b2bab4STejun Heo  *
3336d4b2bab4STejun Heo  *	LOCKING:
3337d4b2bab4STejun Heo  *	Kernel thread context (may sleep).
3338d4b2bab4STejun Heo  *
3339d4b2bab4STejun Heo  *	RETURNS:
3340d4b2bab4STejun Heo  *	0 on success, -errno otherwise.
3341d4b2bab4STejun Heo  */
3342d4b2bab4STejun Heo int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3343d4b2bab4STejun Heo {
3344d4b2bab4STejun Heo 	unsigned long start = jiffies;
3345d4b2bab4STejun Heo 	int warned = 0;
3346d4b2bab4STejun Heo 
3347d4b2bab4STejun Heo 	while (1) {
3348d4b2bab4STejun Heo 		u8 status = ata_chk_status(ap);
3349d4b2bab4STejun Heo 		unsigned long now = jiffies;
3350d4b2bab4STejun Heo 
3351d4b2bab4STejun Heo 		if (!(status & ATA_BUSY))
3352d4b2bab4STejun Heo 			return 0;
3353936fd732STejun Heo 		if (!ata_link_online(&ap->link) && status == 0xff)
3354d4b2bab4STejun Heo 			return -ENODEV;
3355d4b2bab4STejun Heo 		if (time_after(now, deadline))
3356d4b2bab4STejun Heo 			return -EBUSY;
3357d4b2bab4STejun Heo 
3358d4b2bab4STejun Heo 		if (!warned && time_after(now, start + 5 * HZ) &&
3359d4b2bab4STejun Heo 		    (deadline - now > 3 * HZ)) {
3360d4b2bab4STejun Heo 			ata_port_printk(ap, KERN_WARNING,
3361d4b2bab4STejun Heo 				"port is slow to respond, please be patient "
3362d4b2bab4STejun Heo 				"(Status 0x%x)\n", status);
3363d4b2bab4STejun Heo 			warned = 1;
3364d4b2bab4STejun Heo 		}
3365d4b2bab4STejun Heo 
3366d4b2bab4STejun Heo 		msleep(50);
3367d4b2bab4STejun Heo 	}
3368d4b2bab4STejun Heo }
3369d4b2bab4STejun Heo 
3370d4b2bab4STejun Heo static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3371d4b2bab4STejun Heo 			      unsigned long deadline)
3372c6fd2807SJeff Garzik {
3373c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3374c6fd2807SJeff Garzik 	unsigned int dev0 = devmask & (1 << 0);
3375c6fd2807SJeff Garzik 	unsigned int dev1 = devmask & (1 << 1);
33769b89391cSTejun Heo 	int rc, ret = 0;
3377c6fd2807SJeff Garzik 
3378c6fd2807SJeff Garzik 	/* if device 0 was found in ata_devchk, wait for its
3379c6fd2807SJeff Garzik 	 * BSY bit to clear
3380c6fd2807SJeff Garzik 	 */
3381d4b2bab4STejun Heo 	if (dev0) {
3382d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
33839b89391cSTejun Heo 		if (rc) {
33849b89391cSTejun Heo 			if (rc != -ENODEV)
3385d4b2bab4STejun Heo 				return rc;
33869b89391cSTejun Heo 			ret = rc;
33879b89391cSTejun Heo 		}
3388d4b2bab4STejun Heo 	}
3389c6fd2807SJeff Garzik 
3390e141d999STejun Heo 	/* if device 1 was found in ata_devchk, wait for register
3391e141d999STejun Heo 	 * access briefly, then wait for BSY to clear.
3392c6fd2807SJeff Garzik 	 */
3393e141d999STejun Heo 	if (dev1) {
3394e141d999STejun Heo 		int i;
3395c6fd2807SJeff Garzik 
3396c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3397e141d999STejun Heo 
3398e141d999STejun Heo 		/* Wait for register access.  Some ATAPI devices fail
3399e141d999STejun Heo 		 * to set nsect/lbal after reset, so don't waste too
3400e141d999STejun Heo 		 * much time on it.  We're gonna wait for !BSY anyway.
3401e141d999STejun Heo 		 */
3402e141d999STejun Heo 		for (i = 0; i < 2; i++) {
3403e141d999STejun Heo 			u8 nsect, lbal;
3404e141d999STejun Heo 
34050d5ff566STejun Heo 			nsect = ioread8(ioaddr->nsect_addr);
34060d5ff566STejun Heo 			lbal = ioread8(ioaddr->lbal_addr);
3407c6fd2807SJeff Garzik 			if ((nsect == 1) && (lbal == 1))
3408c6fd2807SJeff Garzik 				break;
3409c6fd2807SJeff Garzik 			msleep(50);	/* give drive a breather */
3410c6fd2807SJeff Garzik 		}
3411e141d999STejun Heo 
3412d4b2bab4STejun Heo 		rc = ata_wait_ready(ap, deadline);
34139b89391cSTejun Heo 		if (rc) {
34149b89391cSTejun Heo 			if (rc != -ENODEV)
3415d4b2bab4STejun Heo 				return rc;
34169b89391cSTejun Heo 			ret = rc;
34179b89391cSTejun Heo 		}
3418d4b2bab4STejun Heo 	}
3419c6fd2807SJeff Garzik 
3420c6fd2807SJeff Garzik 	/* is all this really necessary? */
3421c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3422c6fd2807SJeff Garzik 	if (dev1)
3423c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3424c6fd2807SJeff Garzik 	if (dev0)
3425c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3426d4b2bab4STejun Heo 
34279b89391cSTejun Heo 	return ret;
3428c6fd2807SJeff Garzik }
3429c6fd2807SJeff Garzik 
3430d4b2bab4STejun Heo static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3431d4b2bab4STejun Heo 			     unsigned long deadline)
3432c6fd2807SJeff Garzik {
3433c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3434c6fd2807SJeff Garzik 
343544877b4eSTejun Heo 	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3436c6fd2807SJeff Garzik 
3437c6fd2807SJeff Garzik 	/* software reset.  causes dev0 to be selected */
34380d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3439c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34400d5ff566STejun Heo 	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3441c6fd2807SJeff Garzik 	udelay(20);	/* FIXME: flush */
34420d5ff566STejun Heo 	iowrite8(ap->ctl, ioaddr->ctl_addr);
3443c6fd2807SJeff Garzik 
344488ff6eafSTejun Heo 	/* wait a while before checking status */
344588ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
3446c6fd2807SJeff Garzik 
3447c6fd2807SJeff Garzik 	/* Before we perform post reset processing we want to see if
3448c6fd2807SJeff Garzik 	 * the bus shows 0xFF because the odd clown forgets the D7
3449c6fd2807SJeff Garzik 	 * pulldown resistor.
3450c6fd2807SJeff Garzik 	 */
3451150981b0SAlan Cox 	if (ata_chk_status(ap) == 0xFF)
34529b89391cSTejun Heo 		return -ENODEV;
3453c6fd2807SJeff Garzik 
3454d4b2bab4STejun Heo 	return ata_bus_post_reset(ap, devmask, deadline);
3455c6fd2807SJeff Garzik }
3456c6fd2807SJeff Garzik 
3457c6fd2807SJeff Garzik /**
3458c6fd2807SJeff Garzik  *	ata_bus_reset - reset host port and associated ATA channel
3459c6fd2807SJeff Garzik  *	@ap: port to reset
3460c6fd2807SJeff Garzik  *
3461c6fd2807SJeff Garzik  *	This is typically the first time we actually start issuing
3462c6fd2807SJeff Garzik  *	commands to the ATA channel.  We wait for BSY to clear, then
3463c6fd2807SJeff Garzik  *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3464c6fd2807SJeff Garzik  *	result.  Determine what devices, if any, are on the channel
3465c6fd2807SJeff Garzik  *	by looking at the device 0/1 error register.  Look at the signature
3466c6fd2807SJeff Garzik  *	stored in each device's taskfile registers, to determine if
3467c6fd2807SJeff Garzik  *	the device is ATA or ATAPI.
3468c6fd2807SJeff Garzik  *
3469c6fd2807SJeff Garzik  *	LOCKING:
3470c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
3471cca3974eSJeff Garzik  *	Obtains host lock.
3472c6fd2807SJeff Garzik  *
3473c6fd2807SJeff Garzik  *	SIDE EFFECTS:
3474c6fd2807SJeff Garzik  *	Sets ATA_FLAG_DISABLED if bus reset fails.
3475c6fd2807SJeff Garzik  */
3476c6fd2807SJeff Garzik 
3477c6fd2807SJeff Garzik void ata_bus_reset(struct ata_port *ap)
3478c6fd2807SJeff Garzik {
34799af5c9c9STejun Heo 	struct ata_device *device = ap->link.device;
3480c6fd2807SJeff Garzik 	struct ata_ioports *ioaddr = &ap->ioaddr;
3481c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3482c6fd2807SJeff Garzik 	u8 err;
3483c6fd2807SJeff Garzik 	unsigned int dev0, dev1 = 0, devmask = 0;
34849b89391cSTejun Heo 	int rc;
3485c6fd2807SJeff Garzik 
348644877b4eSTejun Heo 	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3487c6fd2807SJeff Garzik 
3488c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3489c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_SATA_RESET)
3490c6fd2807SJeff Garzik 		dev0 = 1;
3491c6fd2807SJeff Garzik 	else {
3492c6fd2807SJeff Garzik 		dev0 = ata_devchk(ap, 0);
3493c6fd2807SJeff Garzik 		if (slave_possible)
3494c6fd2807SJeff Garzik 			dev1 = ata_devchk(ap, 1);
3495c6fd2807SJeff Garzik 	}
3496c6fd2807SJeff Garzik 
3497c6fd2807SJeff Garzik 	if (dev0)
3498c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3499c6fd2807SJeff Garzik 	if (dev1)
3500c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3501c6fd2807SJeff Garzik 
3502c6fd2807SJeff Garzik 	/* select device 0 again */
3503c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3504c6fd2807SJeff Garzik 
3505c6fd2807SJeff Garzik 	/* issue bus reset */
35069b89391cSTejun Heo 	if (ap->flags & ATA_FLAG_SRST) {
35079b89391cSTejun Heo 		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
35089b89391cSTejun Heo 		if (rc && rc != -ENODEV)
3509c6fd2807SJeff Garzik 			goto err_out;
35109b89391cSTejun Heo 	}
3511c6fd2807SJeff Garzik 
3512c6fd2807SJeff Garzik 	/*
3513c6fd2807SJeff Garzik 	 * determine by signature whether we have ATA or ATAPI devices
3514c6fd2807SJeff Garzik 	 */
35153f19859eSTejun Heo 	device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3516c6fd2807SJeff Garzik 	if ((slave_possible) && (err != 0x81))
35173f19859eSTejun Heo 		device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3518c6fd2807SJeff Garzik 
3519c6fd2807SJeff Garzik 	/* is double-select really necessary? */
35209af5c9c9STejun Heo 	if (device[1].class != ATA_DEV_NONE)
3521c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
35229af5c9c9STejun Heo 	if (device[0].class != ATA_DEV_NONE)
3523c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3524c6fd2807SJeff Garzik 
3525c6fd2807SJeff Garzik 	/* if no devices were detected, disable this port */
35269af5c9c9STejun Heo 	if ((device[0].class == ATA_DEV_NONE) &&
35279af5c9c9STejun Heo 	    (device[1].class == ATA_DEV_NONE))
3528c6fd2807SJeff Garzik 		goto err_out;
3529c6fd2807SJeff Garzik 
3530c6fd2807SJeff Garzik 	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3531c6fd2807SJeff Garzik 		/* set up device control for ATA_FLAG_SATA_RESET */
35320d5ff566STejun Heo 		iowrite8(ap->ctl, ioaddr->ctl_addr);
3533c6fd2807SJeff Garzik 	}
3534c6fd2807SJeff Garzik 
3535c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3536c6fd2807SJeff Garzik 	return;
3537c6fd2807SJeff Garzik 
3538c6fd2807SJeff Garzik err_out:
3539c6fd2807SJeff Garzik 	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3540ac8869d5SJeff Garzik 	ata_port_disable(ap);
3541c6fd2807SJeff Garzik 
3542c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3543c6fd2807SJeff Garzik }
3544c6fd2807SJeff Garzik 
3545c6fd2807SJeff Garzik /**
3546936fd732STejun Heo  *	sata_link_debounce - debounce SATA phy status
3547936fd732STejun Heo  *	@link: ATA link to debounce SATA phy status for
3548c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3549d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3550c6fd2807SJeff Garzik  *
3551936fd732STejun Heo *	Make sure SStatus of @link reaches stable state, determined by
3552c6fd2807SJeff Garzik  *	holding the same value where DET is not 1 for @duration polled
3553c6fd2807SJeff Garzik  *	every @interval, before @timeout.  Timeout constraints the
3554d4b2bab4STejun Heo  *	beginning of the stable state.  Because DET gets stuck at 1 on
3555d4b2bab4STejun Heo  *	some controllers after hot unplugging, this functions waits
3556c6fd2807SJeff Garzik  *	until timeout then returns 0 if DET is stable at 1.
3557c6fd2807SJeff Garzik  *
3558d4b2bab4STejun Heo  *	@timeout is further limited by @deadline.  The sooner of the
3559d4b2bab4STejun Heo  *	two is used.
3560d4b2bab4STejun Heo  *
3561c6fd2807SJeff Garzik  *	LOCKING:
3562c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3563c6fd2807SJeff Garzik  *
3564c6fd2807SJeff Garzik  *	RETURNS:
3565c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3566c6fd2807SJeff Garzik  */
3567936fd732STejun Heo int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3568d4b2bab4STejun Heo 		       unsigned long deadline)
3569c6fd2807SJeff Garzik {
3570c6fd2807SJeff Garzik 	unsigned long interval_msec = params[0];
3571d4b2bab4STejun Heo 	unsigned long duration = msecs_to_jiffies(params[1]);
3572d4b2bab4STejun Heo 	unsigned long last_jiffies, t;
3573c6fd2807SJeff Garzik 	u32 last, cur;
3574c6fd2807SJeff Garzik 	int rc;
3575c6fd2807SJeff Garzik 
3576d4b2bab4STejun Heo 	t = jiffies + msecs_to_jiffies(params[2]);
3577d4b2bab4STejun Heo 	if (time_before(t, deadline))
3578d4b2bab4STejun Heo 		deadline = t;
3579d4b2bab4STejun Heo 
3580936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3581c6fd2807SJeff Garzik 		return rc;
3582c6fd2807SJeff Garzik 	cur &= 0xf;
3583c6fd2807SJeff Garzik 
3584c6fd2807SJeff Garzik 	last = cur;
3585c6fd2807SJeff Garzik 	last_jiffies = jiffies;
3586c6fd2807SJeff Garzik 
3587c6fd2807SJeff Garzik 	while (1) {
3588c6fd2807SJeff Garzik 		msleep(interval_msec);
3589936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3590c6fd2807SJeff Garzik 			return rc;
3591c6fd2807SJeff Garzik 		cur &= 0xf;
3592c6fd2807SJeff Garzik 
3593c6fd2807SJeff Garzik 		/* DET stable? */
3594c6fd2807SJeff Garzik 		if (cur == last) {
3595d4b2bab4STejun Heo 			if (cur == 1 && time_before(jiffies, deadline))
3596c6fd2807SJeff Garzik 				continue;
3597c6fd2807SJeff Garzik 			if (time_after(jiffies, last_jiffies + duration))
3598c6fd2807SJeff Garzik 				return 0;
3599c6fd2807SJeff Garzik 			continue;
3600c6fd2807SJeff Garzik 		}
3601c6fd2807SJeff Garzik 
3602c6fd2807SJeff Garzik 		/* unstable, start over */
3603c6fd2807SJeff Garzik 		last = cur;
3604c6fd2807SJeff Garzik 		last_jiffies = jiffies;
3605c6fd2807SJeff Garzik 
3606f1545154STejun Heo 		/* Check deadline.  If debouncing failed, return
3607f1545154STejun Heo 		 * -EPIPE to tell upper layer to lower link speed.
3608f1545154STejun Heo 		 */
3609d4b2bab4STejun Heo 		if (time_after(jiffies, deadline))
3610f1545154STejun Heo 			return -EPIPE;
3611c6fd2807SJeff Garzik 	}
3612c6fd2807SJeff Garzik }
3613c6fd2807SJeff Garzik 
3614c6fd2807SJeff Garzik /**
3615936fd732STejun Heo  *	sata_link_resume - resume SATA link
3616936fd732STejun Heo  *	@link: ATA link to resume SATA
3617c6fd2807SJeff Garzik  *	@params: timing parameters { interval, duratinon, timeout } in msec
3618d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3619c6fd2807SJeff Garzik  *
3620936fd732STejun Heo  *	Resume SATA phy @link and debounce it.
3621c6fd2807SJeff Garzik  *
3622c6fd2807SJeff Garzik  *	LOCKING:
3623c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3624c6fd2807SJeff Garzik  *
3625c6fd2807SJeff Garzik  *	RETURNS:
3626c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
3627c6fd2807SJeff Garzik  */
3628936fd732STejun Heo int sata_link_resume(struct ata_link *link, const unsigned long *params,
3629d4b2bab4STejun Heo 		     unsigned long deadline)
3630c6fd2807SJeff Garzik {
3631c6fd2807SJeff Garzik 	u32 scontrol;
3632c6fd2807SJeff Garzik 	int rc;
3633c6fd2807SJeff Garzik 
3634936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3635c6fd2807SJeff Garzik 		return rc;
3636c6fd2807SJeff Garzik 
3637c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x300;
3638c6fd2807SJeff Garzik 
3639936fd732STejun Heo 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3640c6fd2807SJeff Garzik 		return rc;
3641c6fd2807SJeff Garzik 
3642c6fd2807SJeff Garzik 	/* Some PHYs react badly if SStatus is pounded immediately
3643c6fd2807SJeff Garzik 	 * after resuming.  Delay 200ms before debouncing.
3644c6fd2807SJeff Garzik 	 */
3645c6fd2807SJeff Garzik 	msleep(200);
3646c6fd2807SJeff Garzik 
3647936fd732STejun Heo 	return sata_link_debounce(link, params, deadline);
3648c6fd2807SJeff Garzik }
3649c6fd2807SJeff Garzik 
3650c6fd2807SJeff Garzik /**
3651c6fd2807SJeff Garzik  *	ata_std_prereset - prepare for reset
3652cc0680a5STejun Heo  *	@link: ATA link to be reset
3653d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3654c6fd2807SJeff Garzik  *
3655cc0680a5STejun Heo  *	@link is about to be reset.  Initialize it.  Failure from
3656b8cffc6aSTejun Heo  *	prereset makes libata abort whole reset sequence and give up
3657b8cffc6aSTejun Heo  *	that port, so prereset should be best-effort.  It does its
3658b8cffc6aSTejun Heo  *	best to prepare for reset sequence but if things go wrong, it
3659b8cffc6aSTejun Heo  *	should just whine, not fail.
3660c6fd2807SJeff Garzik  *
3661c6fd2807SJeff Garzik  *	LOCKING:
3662c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3663c6fd2807SJeff Garzik  *
3664c6fd2807SJeff Garzik  *	RETURNS:
3665c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3666c6fd2807SJeff Garzik  */
3667cc0680a5STejun Heo int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3668c6fd2807SJeff Garzik {
3669cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3670936fd732STejun Heo 	struct ata_eh_context *ehc = &link->eh_context;
3671c6fd2807SJeff Garzik 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3672c6fd2807SJeff Garzik 	int rc;
3673c6fd2807SJeff Garzik 
367431daabdaSTejun Heo 	/* handle link resume */
3675c6fd2807SJeff Garzik 	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
36760c88758bSTejun Heo 	    (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3677c6fd2807SJeff Garzik 		ehc->i.action |= ATA_EH_HARDRESET;
3678c6fd2807SJeff Garzik 
3679633273a3STejun Heo 	/* Some PMPs don't work with only SRST, force hardreset if PMP
3680633273a3STejun Heo 	 * is supported.
3681633273a3STejun Heo 	 */
3682633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP)
3683633273a3STejun Heo 		ehc->i.action |= ATA_EH_HARDRESET;
3684633273a3STejun Heo 
3685c6fd2807SJeff Garzik 	/* if we're about to do hardreset, nothing more to do */
3686c6fd2807SJeff Garzik 	if (ehc->i.action & ATA_EH_HARDRESET)
3687c6fd2807SJeff Garzik 		return 0;
3688c6fd2807SJeff Garzik 
3689936fd732STejun Heo 	/* if SATA, resume link */
3690a16abc0bSTejun Heo 	if (ap->flags & ATA_FLAG_SATA) {
3691936fd732STejun Heo 		rc = sata_link_resume(link, timing, deadline);
3692b8cffc6aSTejun Heo 		/* whine about phy resume failure but proceed */
3693b8cffc6aSTejun Heo 		if (rc && rc != -EOPNOTSUPP)
3694cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3695c6fd2807SJeff Garzik 					"link for reset (errno=%d)\n", rc);
3696c6fd2807SJeff Garzik 	}
3697c6fd2807SJeff Garzik 
3698c6fd2807SJeff Garzik 	/* Wait for !BSY if the controller can wait for the first D2H
3699c6fd2807SJeff Garzik 	 * Reg FIS and we don't know that no device is attached.
3700c6fd2807SJeff Garzik 	 */
37010c88758bSTejun Heo 	if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3702b8cffc6aSTejun Heo 		rc = ata_wait_ready(ap, deadline);
37036dffaf61STejun Heo 		if (rc && rc != -ENODEV) {
3704cc0680a5STejun Heo 			ata_link_printk(link, KERN_WARNING, "device not ready "
3705b8cffc6aSTejun Heo 					"(errno=%d), forcing hardreset\n", rc);
3706b8cffc6aSTejun Heo 			ehc->i.action |= ATA_EH_HARDRESET;
3707b8cffc6aSTejun Heo 		}
3708b8cffc6aSTejun Heo 	}
3709c6fd2807SJeff Garzik 
3710c6fd2807SJeff Garzik 	return 0;
3711c6fd2807SJeff Garzik }
3712c6fd2807SJeff Garzik 
3713c6fd2807SJeff Garzik /**
3714c6fd2807SJeff Garzik  *	ata_std_softreset - reset host port via ATA SRST
3715cc0680a5STejun Heo  *	@link: ATA link to reset
3716c6fd2807SJeff Garzik  *	@classes: resulting classes of attached devices
3717d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3718c6fd2807SJeff Garzik  *
3719c6fd2807SJeff Garzik  *	Reset host port using ATA SRST.
3720c6fd2807SJeff Garzik  *
3721c6fd2807SJeff Garzik  *	LOCKING:
3722c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3723c6fd2807SJeff Garzik  *
3724c6fd2807SJeff Garzik  *	RETURNS:
3725c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3726c6fd2807SJeff Garzik  */
3727cc0680a5STejun Heo int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3728d4b2bab4STejun Heo 		      unsigned long deadline)
3729c6fd2807SJeff Garzik {
3730cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3731c6fd2807SJeff Garzik 	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3732d4b2bab4STejun Heo 	unsigned int devmask = 0;
3733d4b2bab4STejun Heo 	int rc;
3734c6fd2807SJeff Garzik 	u8 err;
3735c6fd2807SJeff Garzik 
3736c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3737c6fd2807SJeff Garzik 
3738936fd732STejun Heo 	if (ata_link_offline(link)) {
3739c6fd2807SJeff Garzik 		classes[0] = ATA_DEV_NONE;
3740c6fd2807SJeff Garzik 		goto out;
3741c6fd2807SJeff Garzik 	}
3742c6fd2807SJeff Garzik 
3743c6fd2807SJeff Garzik 	/* determine if device 0/1 are present */
3744c6fd2807SJeff Garzik 	if (ata_devchk(ap, 0))
3745c6fd2807SJeff Garzik 		devmask |= (1 << 0);
3746c6fd2807SJeff Garzik 	if (slave_possible && ata_devchk(ap, 1))
3747c6fd2807SJeff Garzik 		devmask |= (1 << 1);
3748c6fd2807SJeff Garzik 
3749c6fd2807SJeff Garzik 	/* select device 0 again */
3750c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);
3751c6fd2807SJeff Garzik 
3752c6fd2807SJeff Garzik 	/* issue bus reset */
3753c6fd2807SJeff Garzik 	DPRINTK("about to softreset, devmask=%x\n", devmask);
3754d4b2bab4STejun Heo 	rc = ata_bus_softreset(ap, devmask, deadline);
37559b89391cSTejun Heo 	/* if link is occupied, -ENODEV too is an error */
3756936fd732STejun Heo 	if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3757cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3758d4b2bab4STejun Heo 		return rc;
3759c6fd2807SJeff Garzik 	}
3760c6fd2807SJeff Garzik 
3761c6fd2807SJeff Garzik 	/* determine by signature whether we have ATA or ATAPI devices */
37623f19859eSTejun Heo 	classes[0] = ata_dev_try_classify(&link->device[0],
37633f19859eSTejun Heo 					  devmask & (1 << 0), &err);
3764c6fd2807SJeff Garzik 	if (slave_possible && err != 0x81)
37653f19859eSTejun Heo 		classes[1] = ata_dev_try_classify(&link->device[1],
37663f19859eSTejun Heo 						  devmask & (1 << 1), &err);
3767c6fd2807SJeff Garzik 
3768c6fd2807SJeff Garzik  out:
3769c6fd2807SJeff Garzik 	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3770c6fd2807SJeff Garzik 	return 0;
3771c6fd2807SJeff Garzik }
3772c6fd2807SJeff Garzik 
3773c6fd2807SJeff Garzik /**
3774cc0680a5STejun Heo  *	sata_link_hardreset - reset link via SATA phy reset
3775cc0680a5STejun Heo  *	@link: link to reset
3776b6103f6dSTejun Heo  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3777d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3778c6fd2807SJeff Garzik  *
3779cc0680a5STejun Heo  *	SATA phy-reset @link using DET bits of SControl register.
3780c6fd2807SJeff Garzik  *
3781c6fd2807SJeff Garzik  *	LOCKING:
3782c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3783c6fd2807SJeff Garzik  *
3784c6fd2807SJeff Garzik  *	RETURNS:
3785c6fd2807SJeff Garzik  *	0 on success, -errno otherwise.
3786c6fd2807SJeff Garzik  */
3787cc0680a5STejun Heo int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3788d4b2bab4STejun Heo 			unsigned long deadline)
3789c6fd2807SJeff Garzik {
3790c6fd2807SJeff Garzik 	u32 scontrol;
3791c6fd2807SJeff Garzik 	int rc;
3792c6fd2807SJeff Garzik 
3793c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3794c6fd2807SJeff Garzik 
3795936fd732STejun Heo 	if (sata_set_spd_needed(link)) {
3796c6fd2807SJeff Garzik 		/* SATA spec says nothing about how to reconfigure
3797c6fd2807SJeff Garzik 		 * spd.  To be on the safe side, turn off phy during
3798c6fd2807SJeff Garzik 		 * reconfiguration.  This works for at least ICH7 AHCI
3799c6fd2807SJeff Garzik 		 * and Sil3124.
3800c6fd2807SJeff Garzik 		 */
3801936fd732STejun Heo 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3802b6103f6dSTejun Heo 			goto out;
3803c6fd2807SJeff Garzik 
3804cea0d336SJeff Garzik 		scontrol = (scontrol & 0x0f0) | 0x304;
3805c6fd2807SJeff Garzik 
3806936fd732STejun Heo 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3807b6103f6dSTejun Heo 			goto out;
3808c6fd2807SJeff Garzik 
3809936fd732STejun Heo 		sata_set_spd(link);
3810c6fd2807SJeff Garzik 	}
3811c6fd2807SJeff Garzik 
3812c6fd2807SJeff Garzik 	/* issue phy wake/reset */
3813936fd732STejun Heo 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3814b6103f6dSTejun Heo 		goto out;
3815c6fd2807SJeff Garzik 
3816c6fd2807SJeff Garzik 	scontrol = (scontrol & 0x0f0) | 0x301;
3817c6fd2807SJeff Garzik 
3818936fd732STejun Heo 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3819b6103f6dSTejun Heo 		goto out;
3820c6fd2807SJeff Garzik 
3821c6fd2807SJeff Garzik 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3822c6fd2807SJeff Garzik 	 * 10.4.2 says at least 1 ms.
3823c6fd2807SJeff Garzik 	 */
3824c6fd2807SJeff Garzik 	msleep(1);
3825c6fd2807SJeff Garzik 
3826936fd732STejun Heo 	/* bring link back */
3827936fd732STejun Heo 	rc = sata_link_resume(link, timing, deadline);
3828b6103f6dSTejun Heo  out:
3829b6103f6dSTejun Heo 	DPRINTK("EXIT, rc=%d\n", rc);
3830b6103f6dSTejun Heo 	return rc;
3831b6103f6dSTejun Heo }
3832b6103f6dSTejun Heo 
3833b6103f6dSTejun Heo /**
3834b6103f6dSTejun Heo  *	sata_std_hardreset - reset host port via SATA phy reset
3835cc0680a5STejun Heo  *	@link: link to reset
3836b6103f6dSTejun Heo  *	@class: resulting class of attached device
3837d4b2bab4STejun Heo  *	@deadline: deadline jiffies for the operation
3838b6103f6dSTejun Heo  *
3839b6103f6dSTejun Heo  *	SATA phy-reset host port using DET bits of SControl register,
3840b6103f6dSTejun Heo  *	wait for !BSY and classify the attached device.
3841b6103f6dSTejun Heo  *
3842b6103f6dSTejun Heo  *	LOCKING:
3843b6103f6dSTejun Heo  *	Kernel thread context (may sleep)
3844b6103f6dSTejun Heo  *
3845b6103f6dSTejun Heo  *	RETURNS:
3846b6103f6dSTejun Heo  *	0 on success, -errno otherwise.
3847b6103f6dSTejun Heo  */
3848cc0680a5STejun Heo int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3849d4b2bab4STejun Heo 		       unsigned long deadline)
3850b6103f6dSTejun Heo {
3851cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3852936fd732STejun Heo 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3853b6103f6dSTejun Heo 	int rc;
3854b6103f6dSTejun Heo 
3855b6103f6dSTejun Heo 	DPRINTK("ENTER\n");
3856b6103f6dSTejun Heo 
3857b6103f6dSTejun Heo 	/* do hardreset */
3858cc0680a5STejun Heo 	rc = sata_link_hardreset(link, timing, deadline);
3859b6103f6dSTejun Heo 	if (rc) {
3860cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3861b6103f6dSTejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3862b6103f6dSTejun Heo 		return rc;
3863b6103f6dSTejun Heo 	}
3864c6fd2807SJeff Garzik 
3865c6fd2807SJeff Garzik 	/* TODO: phy layer with polling, timeouts, etc. */
3866936fd732STejun Heo 	if (ata_link_offline(link)) {
3867c6fd2807SJeff Garzik 		*class = ATA_DEV_NONE;
3868c6fd2807SJeff Garzik 		DPRINTK("EXIT, link offline\n");
3869c6fd2807SJeff Garzik 		return 0;
3870c6fd2807SJeff Garzik 	}
3871c6fd2807SJeff Garzik 
387288ff6eafSTejun Heo 	/* wait a while before checking status */
387388ff6eafSTejun Heo 	ata_wait_after_reset(ap, deadline);
387434fee227STejun Heo 
3875633273a3STejun Heo 	/* If PMP is supported, we have to do follow-up SRST.  Note
3876633273a3STejun Heo 	 * that some PMPs don't send D2H Reg FIS after hardreset at
3877633273a3STejun Heo 	 * all if the first port is empty.  Wait for it just for a
3878633273a3STejun Heo 	 * second and request follow-up SRST.
3879633273a3STejun Heo 	 */
3880633273a3STejun Heo 	if (ap->flags & ATA_FLAG_PMP) {
3881633273a3STejun Heo 		ata_wait_ready(ap, jiffies + HZ);
3882633273a3STejun Heo 		return -EAGAIN;
3883633273a3STejun Heo 	}
3884633273a3STejun Heo 
3885d4b2bab4STejun Heo 	rc = ata_wait_ready(ap, deadline);
38869b89391cSTejun Heo 	/* link occupied, -ENODEV too is an error */
38879b89391cSTejun Heo 	if (rc) {
3888cc0680a5STejun Heo 		ata_link_printk(link, KERN_ERR,
3889d4b2bab4STejun Heo 				"COMRESET failed (errno=%d)\n", rc);
3890d4b2bab4STejun Heo 		return rc;
3891c6fd2807SJeff Garzik 	}
3892c6fd2807SJeff Garzik 
3893c6fd2807SJeff Garzik 	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3894c6fd2807SJeff Garzik 
38953f19859eSTejun Heo 	*class = ata_dev_try_classify(link->device, 1, NULL);
3896c6fd2807SJeff Garzik 
3897c6fd2807SJeff Garzik 	DPRINTK("EXIT, class=%u\n", *class);
3898c6fd2807SJeff Garzik 	return 0;
3899c6fd2807SJeff Garzik }
3900c6fd2807SJeff Garzik 
3901c6fd2807SJeff Garzik /**
3902c6fd2807SJeff Garzik  *	ata_std_postreset - standard postreset callback
3903cc0680a5STejun Heo  *	@link: the target ata_link
3904c6fd2807SJeff Garzik  *	@classes: classes of attached devices
3905c6fd2807SJeff Garzik  *
3906c6fd2807SJeff Garzik  *	This function is invoked after a successful reset.  Note that
3907c6fd2807SJeff Garzik  *	the device might have been reset more than once using
3908c6fd2807SJeff Garzik  *	different reset methods before postreset is invoked.
3909c6fd2807SJeff Garzik  *
3910c6fd2807SJeff Garzik  *	LOCKING:
3911c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
3912c6fd2807SJeff Garzik  */
3913cc0680a5STejun Heo void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3914c6fd2807SJeff Garzik {
3915cc0680a5STejun Heo 	struct ata_port *ap = link->ap;
3916c6fd2807SJeff Garzik 	u32 serror;
3917c6fd2807SJeff Garzik 
3918c6fd2807SJeff Garzik 	DPRINTK("ENTER\n");
3919c6fd2807SJeff Garzik 
3920c6fd2807SJeff Garzik 	/* print link status */
3921936fd732STejun Heo 	sata_print_link_status(link);
3922c6fd2807SJeff Garzik 
3923c6fd2807SJeff Garzik 	/* clear SError */
3924936fd732STejun Heo 	if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3925936fd732STejun Heo 		sata_scr_write(link, SCR_ERROR, serror);
3926f7fe7ad4STejun Heo 	link->eh_info.serror = 0;
3927c6fd2807SJeff Garzik 
3928c6fd2807SJeff Garzik 	/* is double-select really necessary? */
3929c6fd2807SJeff Garzik 	if (classes[0] != ATA_DEV_NONE)
3930c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 1);
3931c6fd2807SJeff Garzik 	if (classes[1] != ATA_DEV_NONE)
3932c6fd2807SJeff Garzik 		ap->ops->dev_select(ap, 0);
3933c6fd2807SJeff Garzik 
3934c6fd2807SJeff Garzik 	/* bail out if no device is present */
3935c6fd2807SJeff Garzik 	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3936c6fd2807SJeff Garzik 		DPRINTK("EXIT, no device\n");
3937c6fd2807SJeff Garzik 		return;
3938c6fd2807SJeff Garzik 	}
3939c6fd2807SJeff Garzik 
3940c6fd2807SJeff Garzik 	/* set up device control */
39410d5ff566STejun Heo 	if (ap->ioaddr.ctl_addr)
39420d5ff566STejun Heo 		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3943c6fd2807SJeff Garzik 
3944c6fd2807SJeff Garzik 	DPRINTK("EXIT\n");
3945c6fd2807SJeff Garzik }
3946c6fd2807SJeff Garzik 
3947c6fd2807SJeff Garzik /**
3948c6fd2807SJeff Garzik  *	ata_dev_same_device - Determine whether new ID matches configured device
3949c6fd2807SJeff Garzik  *	@dev: device to compare against
3950c6fd2807SJeff Garzik  *	@new_class: class of the new device
3951c6fd2807SJeff Garzik  *	@new_id: IDENTIFY page of the new device
3952c6fd2807SJeff Garzik  *
3953c6fd2807SJeff Garzik  *	Compare @new_class and @new_id against @dev and determine
3954c6fd2807SJeff Garzik  *	whether @dev is the device indicated by @new_class and
3955c6fd2807SJeff Garzik  *	@new_id.
3956c6fd2807SJeff Garzik  *
3957c6fd2807SJeff Garzik  *	LOCKING:
3958c6fd2807SJeff Garzik  *	None.
3959c6fd2807SJeff Garzik  *
3960c6fd2807SJeff Garzik  *	RETURNS:
3961c6fd2807SJeff Garzik  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3962c6fd2807SJeff Garzik  */
3963c6fd2807SJeff Garzik static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3964c6fd2807SJeff Garzik 			       const u16 *new_id)
3965c6fd2807SJeff Garzik {
3966c6fd2807SJeff Garzik 	const u16 *old_id = dev->id;
3967a0cf733bSTejun Heo 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3968a0cf733bSTejun Heo 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3969c6fd2807SJeff Garzik 
3970c6fd2807SJeff Garzik 	if (dev->class != new_class) {
3971c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3972c6fd2807SJeff Garzik 			       dev->class, new_class);
3973c6fd2807SJeff Garzik 		return 0;
3974c6fd2807SJeff Garzik 	}
3975c6fd2807SJeff Garzik 
3976a0cf733bSTejun Heo 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3977a0cf733bSTejun Heo 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3978a0cf733bSTejun Heo 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3979a0cf733bSTejun Heo 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3980c6fd2807SJeff Garzik 
3981c6fd2807SJeff Garzik 	if (strcmp(model[0], model[1])) {
3982c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3983c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", model[0], model[1]);
3984c6fd2807SJeff Garzik 		return 0;
3985c6fd2807SJeff Garzik 	}
3986c6fd2807SJeff Garzik 
3987c6fd2807SJeff Garzik 	if (strcmp(serial[0], serial[1])) {
3988c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3989c6fd2807SJeff Garzik 			       "'%s' != '%s'\n", serial[0], serial[1]);
3990c6fd2807SJeff Garzik 		return 0;
3991c6fd2807SJeff Garzik 	}
3992c6fd2807SJeff Garzik 
3993c6fd2807SJeff Garzik 	return 1;
3994c6fd2807SJeff Garzik }
3995c6fd2807SJeff Garzik 
3996c6fd2807SJeff Garzik /**
3997fe30911bSTejun Heo  *	ata_dev_reread_id - Re-read IDENTIFY data
39983fae450cSHenrik Kretzschmar  *	@dev: target ATA device
3999bff04647STejun Heo  *	@readid_flags: read ID flags
4000c6fd2807SJeff Garzik  *
4001c6fd2807SJeff Garzik  *	Re-read IDENTIFY page and make sure @dev is still attached to
4002c6fd2807SJeff Garzik  *	the port.
4003c6fd2807SJeff Garzik  *
4004c6fd2807SJeff Garzik  *	LOCKING:
4005c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4006c6fd2807SJeff Garzik  *
4007c6fd2807SJeff Garzik  *	RETURNS:
4008c6fd2807SJeff Garzik  *	0 on success, negative errno otherwise
4009c6fd2807SJeff Garzik  */
4010fe30911bSTejun Heo int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4011c6fd2807SJeff Garzik {
4012c6fd2807SJeff Garzik 	unsigned int class = dev->class;
40139af5c9c9STejun Heo 	u16 *id = (void *)dev->link->ap->sector_buf;
4014c6fd2807SJeff Garzik 	int rc;
4015c6fd2807SJeff Garzik 
4016c6fd2807SJeff Garzik 	/* read ID data */
4017bff04647STejun Heo 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
4018c6fd2807SJeff Garzik 	if (rc)
4019fe30911bSTejun Heo 		return rc;
4020c6fd2807SJeff Garzik 
4021c6fd2807SJeff Garzik 	/* is the device still there? */
4022fe30911bSTejun Heo 	if (!ata_dev_same_device(dev, class, id))
4023fe30911bSTejun Heo 		return -ENODEV;
4024c6fd2807SJeff Garzik 
4025c6fd2807SJeff Garzik 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4026fe30911bSTejun Heo 	return 0;
4027fe30911bSTejun Heo }
4028fe30911bSTejun Heo 
4029fe30911bSTejun Heo /**
4030fe30911bSTejun Heo  *	ata_dev_revalidate - Revalidate ATA device
4031fe30911bSTejun Heo  *	@dev: device to revalidate
4032422c9daaSTejun Heo  *	@new_class: new class code
4033fe30911bSTejun Heo  *	@readid_flags: read ID flags
4034fe30911bSTejun Heo  *
4035fe30911bSTejun Heo  *	Re-read IDENTIFY page, make sure @dev is still attached to the
4036fe30911bSTejun Heo  *	port and reconfigure it according to the new IDENTIFY page.
4037fe30911bSTejun Heo  *
4038fe30911bSTejun Heo  *	LOCKING:
4039fe30911bSTejun Heo  *	Kernel thread context (may sleep)
4040fe30911bSTejun Heo  *
4041fe30911bSTejun Heo  *	RETURNS:
4042fe30911bSTejun Heo  *	0 on success, negative errno otherwise
4043fe30911bSTejun Heo  */
4044422c9daaSTejun Heo int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4045422c9daaSTejun Heo 		       unsigned int readid_flags)
4046fe30911bSTejun Heo {
40476ddcd3b0STejun Heo 	u64 n_sectors = dev->n_sectors;
4048fe30911bSTejun Heo 	int rc;
4049fe30911bSTejun Heo 
4050fe30911bSTejun Heo 	if (!ata_dev_enabled(dev))
4051fe30911bSTejun Heo 		return -ENODEV;
4052fe30911bSTejun Heo 
4053422c9daaSTejun Heo 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4054422c9daaSTejun Heo 	if (ata_class_enabled(new_class) &&
4055422c9daaSTejun Heo 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4056422c9daaSTejun Heo 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4057422c9daaSTejun Heo 			       dev->class, new_class);
4058422c9daaSTejun Heo 		rc = -ENODEV;
4059422c9daaSTejun Heo 		goto fail;
4060422c9daaSTejun Heo 	}
4061422c9daaSTejun Heo 
4062fe30911bSTejun Heo 	/* re-read ID */
4063fe30911bSTejun Heo 	rc = ata_dev_reread_id(dev, readid_flags);
4064fe30911bSTejun Heo 	if (rc)
4065fe30911bSTejun Heo 		goto fail;
4066c6fd2807SJeff Garzik 
4067c6fd2807SJeff Garzik 	/* configure device according to the new ID */
4068efdaedc4STejun Heo 	rc = ata_dev_configure(dev);
40696ddcd3b0STejun Heo 	if (rc)
40706ddcd3b0STejun Heo 		goto fail;
40716ddcd3b0STejun Heo 
40726ddcd3b0STejun Heo 	/* verify n_sectors hasn't changed */
4073b54eebd6STejun Heo 	if (dev->class == ATA_DEV_ATA && n_sectors &&
4074b54eebd6STejun Heo 	    dev->n_sectors != n_sectors) {
40756ddcd3b0STejun Heo 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
40766ddcd3b0STejun Heo 			       "%llu != %llu\n",
40776ddcd3b0STejun Heo 			       (unsigned long long)n_sectors,
40786ddcd3b0STejun Heo 			       (unsigned long long)dev->n_sectors);
40798270bec4STejun Heo 
40808270bec4STejun Heo 		/* restore original n_sectors */
40818270bec4STejun Heo 		dev->n_sectors = n_sectors;
40828270bec4STejun Heo 
40836ddcd3b0STejun Heo 		rc = -ENODEV;
40846ddcd3b0STejun Heo 		goto fail;
40856ddcd3b0STejun Heo 	}
40866ddcd3b0STejun Heo 
4087c6fd2807SJeff Garzik 	return 0;
4088c6fd2807SJeff Garzik 
4089c6fd2807SJeff Garzik  fail:
4090c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4091c6fd2807SJeff Garzik 	return rc;
4092c6fd2807SJeff Garzik }
4093c6fd2807SJeff Garzik 
40946919a0a6SAlan Cox struct ata_blacklist_entry {
40956919a0a6SAlan Cox 	const char *model_num;
40966919a0a6SAlan Cox 	const char *model_rev;
40976919a0a6SAlan Cox 	unsigned long horkage;
40986919a0a6SAlan Cox };
40996919a0a6SAlan Cox 
41006919a0a6SAlan Cox static const struct ata_blacklist_entry ata_device_blacklist [] = {
41016919a0a6SAlan Cox 	/* Devices with DMA related problems under Linux */
41026919a0a6SAlan Cox 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
41036919a0a6SAlan Cox 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
41046919a0a6SAlan Cox 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
41056919a0a6SAlan Cox 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
41066919a0a6SAlan Cox 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
41076919a0a6SAlan Cox 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
41086919a0a6SAlan Cox 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
41096919a0a6SAlan Cox 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
41106919a0a6SAlan Cox 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
41116919a0a6SAlan Cox 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
41126919a0a6SAlan Cox 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
41136919a0a6SAlan Cox 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
41146919a0a6SAlan Cox 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
41156919a0a6SAlan Cox 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
41166919a0a6SAlan Cox 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
41176919a0a6SAlan Cox 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
41186919a0a6SAlan Cox 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
41196919a0a6SAlan Cox 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
41206919a0a6SAlan Cox 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
41216919a0a6SAlan Cox 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
41226919a0a6SAlan Cox 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
41236919a0a6SAlan Cox 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
41246919a0a6SAlan Cox 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
41256919a0a6SAlan Cox 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
41266919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
41276919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
41286919a0a6SAlan Cox 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
41296919a0a6SAlan Cox 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
41306919a0a6SAlan Cox 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
413139f19886SDave Jones 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
41323af9a77aSTejun Heo 	/* Odd clown on sil3726/4726 PMPs */
41333af9a77aSTejun Heo 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
41343af9a77aSTejun Heo 						ATA_HORKAGE_SKIP_PM },
41356919a0a6SAlan Cox 
413618d6e9d5SAlbert Lee 	/* Weird ATAPI devices */
413740a1d531STejun Heo 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
413818d6e9d5SAlbert Lee 
41396919a0a6SAlan Cox 	/* Devices we expect to fail diagnostics */
41406919a0a6SAlan Cox 
41416919a0a6SAlan Cox 	/* Devices where NCQ should be avoided */
41426919a0a6SAlan Cox 	/* NCQ is slow */
41436919a0a6SAlan Cox 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4144459ad688STejun Heo 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
414509125ea6STejun Heo 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
414609125ea6STejun Heo 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
41477acfaf30SPaul Rolland 	/* NCQ is broken */
4148539cc7c7SJeff Garzik 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
41490e3dbc01SAlan Cox 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
41500b0a43e0SDavid Milburn 	{ "HITACHI HDS7250SASUN500G*", NULL,    ATA_HORKAGE_NONCQ },
41510b0a43e0SDavid Milburn 	{ "HITACHI HDS7225SBSUN250G*", NULL,    ATA_HORKAGE_NONCQ },
4152da6f0ec2SPaolo Ornati 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4153539cc7c7SJeff Garzik 
415436e337d0SRobert Hancock 	/* Blacklist entries taken from Silicon Image 3124/3132
415536e337d0SRobert Hancock 	   Windows driver .inf file - also several Linux problem reports */
415636e337d0SRobert Hancock 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
415736e337d0SRobert Hancock 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
415836e337d0SRobert Hancock 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
41596919a0a6SAlan Cox 
416016c55b03STejun Heo 	/* devices which puke on READ_NATIVE_MAX */
416116c55b03STejun Heo 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
416216c55b03STejun Heo 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
416316c55b03STejun Heo 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
416416c55b03STejun Heo 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
41656919a0a6SAlan Cox 
416693328e11SAlan Cox 	/* Devices which report 1 sector over size HPA */
416793328e11SAlan Cox 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
416893328e11SAlan Cox 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
416993328e11SAlan Cox 
41706bbfd53dSAlan Cox 	/* Devices which get the IVB wrong */
41716bbfd53dSAlan Cox 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
41726bbfd53dSAlan Cox 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
4173e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
4174e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
4175e9f33406SPeter Missel 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
41766bbfd53dSAlan Cox 
41776919a0a6SAlan Cox 	/* End Marker */
41786919a0a6SAlan Cox 	{ }
4179c6fd2807SJeff Garzik };
4180c6fd2807SJeff Garzik 
4181741b7763SAdrian Bunk static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4182539cc7c7SJeff Garzik {
4183539cc7c7SJeff Garzik 	const char *p;
4184539cc7c7SJeff Garzik 	int len;
4185539cc7c7SJeff Garzik 
4186539cc7c7SJeff Garzik 	/*
4187539cc7c7SJeff Garzik 	 * check for trailing wildcard: *\0
4188539cc7c7SJeff Garzik 	 */
4189539cc7c7SJeff Garzik 	p = strchr(patt, wildchar);
4190539cc7c7SJeff Garzik 	if (p && ((*(p + 1)) == 0))
4191539cc7c7SJeff Garzik 		len = p - patt;
4192317b50b8SAndrew Paprocki 	else {
4193539cc7c7SJeff Garzik 		len = strlen(name);
4194317b50b8SAndrew Paprocki 		if (!len) {
4195317b50b8SAndrew Paprocki 			if (!*patt)
4196317b50b8SAndrew Paprocki 				return 0;
4197317b50b8SAndrew Paprocki 			return -1;
4198317b50b8SAndrew Paprocki 		}
4199317b50b8SAndrew Paprocki 	}
4200539cc7c7SJeff Garzik 
4201539cc7c7SJeff Garzik 	return strncmp(patt, name, len);
4202539cc7c7SJeff Garzik }
4203539cc7c7SJeff Garzik 
420475683fe7STejun Heo static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4205c6fd2807SJeff Garzik {
42068bfa79fcSTejun Heo 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
42078bfa79fcSTejun Heo 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
42086919a0a6SAlan Cox 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4209c6fd2807SJeff Garzik 
42108bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
42118bfa79fcSTejun Heo 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4212c6fd2807SJeff Garzik 
42136919a0a6SAlan Cox 	while (ad->model_num) {
4214539cc7c7SJeff Garzik 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
42156919a0a6SAlan Cox 			if (ad->model_rev == NULL)
42166919a0a6SAlan Cox 				return ad->horkage;
4217539cc7c7SJeff Garzik 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
42186919a0a6SAlan Cox 				return ad->horkage;
4219c6fd2807SJeff Garzik 		}
42206919a0a6SAlan Cox 		ad++;
4221c6fd2807SJeff Garzik 	}
4222c6fd2807SJeff Garzik 	return 0;
4223c6fd2807SJeff Garzik }
4224c6fd2807SJeff Garzik 
42256919a0a6SAlan Cox static int ata_dma_blacklisted(const struct ata_device *dev)
42266919a0a6SAlan Cox {
42276919a0a6SAlan Cox 	/* We don't support polling DMA.
42286919a0a6SAlan Cox 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
42296919a0a6SAlan Cox 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
42306919a0a6SAlan Cox 	 */
42319af5c9c9STejun Heo 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
42326919a0a6SAlan Cox 	    (dev->flags & ATA_DFLAG_CDB_INTR))
42336919a0a6SAlan Cox 		return 1;
423475683fe7STejun Heo 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
42356919a0a6SAlan Cox }
42366919a0a6SAlan Cox 
4237c6fd2807SJeff Garzik /**
42386bbfd53dSAlan Cox  *	ata_is_40wire		-	check drive side detection
42396bbfd53dSAlan Cox  *	@dev: device
42406bbfd53dSAlan Cox  *
42416bbfd53dSAlan Cox  *	Perform drive side detection decoding, allowing for device vendors
42426bbfd53dSAlan Cox  *	who can't follow the documentation.
42436bbfd53dSAlan Cox  */
42446bbfd53dSAlan Cox 
42456bbfd53dSAlan Cox static int ata_is_40wire(struct ata_device *dev)
42466bbfd53dSAlan Cox {
42476bbfd53dSAlan Cox 	if (dev->horkage & ATA_HORKAGE_IVB)
42486bbfd53dSAlan Cox 		return ata_drive_40wire_relaxed(dev->id);
42496bbfd53dSAlan Cox 	return ata_drive_40wire(dev->id);
42506bbfd53dSAlan Cox }
42516bbfd53dSAlan Cox 
42526bbfd53dSAlan Cox /**
4253c6fd2807SJeff Garzik  *	ata_dev_xfermask - Compute supported xfermask of the given device
4254c6fd2807SJeff Garzik  *	@dev: Device to compute xfermask for
4255c6fd2807SJeff Garzik  *
4256c6fd2807SJeff Garzik  *	Compute supported xfermask of @dev and store it in
4257c6fd2807SJeff Garzik  *	dev->*_mask.  This function is responsible for applying all
4258c6fd2807SJeff Garzik  *	known limits including host controller limits, device
4259c6fd2807SJeff Garzik  *	blacklist, etc...
4260c6fd2807SJeff Garzik  *
4261c6fd2807SJeff Garzik  *	LOCKING:
4262c6fd2807SJeff Garzik  *	None.
4263c6fd2807SJeff Garzik  */
4264c6fd2807SJeff Garzik static void ata_dev_xfermask(struct ata_device *dev)
4265c6fd2807SJeff Garzik {
42669af5c9c9STejun Heo 	struct ata_link *link = dev->link;
42679af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
4268cca3974eSJeff Garzik 	struct ata_host *host = ap->host;
4269c6fd2807SJeff Garzik 	unsigned long xfer_mask;
4270c6fd2807SJeff Garzik 
4271c6fd2807SJeff Garzik 	/* controller modes available */
4272c6fd2807SJeff Garzik 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4273c6fd2807SJeff Garzik 				      ap->mwdma_mask, ap->udma_mask);
4274c6fd2807SJeff Garzik 
42758343f889SRobert Hancock 	/* drive modes available */
4276c6fd2807SJeff Garzik 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4277c6fd2807SJeff Garzik 				       dev->mwdma_mask, dev->udma_mask);
4278c6fd2807SJeff Garzik 	xfer_mask &= ata_id_xfermask(dev->id);
4279c6fd2807SJeff Garzik 
4280b352e57dSAlan Cox 	/*
4281b352e57dSAlan Cox 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4282b352e57dSAlan Cox 	 *	cable
4283b352e57dSAlan Cox 	 */
4284b352e57dSAlan Cox 	if (ata_dev_pair(dev)) {
4285b352e57dSAlan Cox 		/* No PIO5 or PIO6 */
4286b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4287b352e57dSAlan Cox 		/* No MWDMA3 or MWDMA 4 */
4288b352e57dSAlan Cox 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4289b352e57dSAlan Cox 	}
4290b352e57dSAlan Cox 
4291c6fd2807SJeff Garzik 	if (ata_dma_blacklisted(dev)) {
4292c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4293c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING,
4294c6fd2807SJeff Garzik 			       "device is on DMA blacklist, disabling DMA\n");
4295c6fd2807SJeff Garzik 	}
4296c6fd2807SJeff Garzik 
429714d66ab7SPetr Vandrovec 	if ((host->flags & ATA_HOST_SIMPLEX) &&
429814d66ab7SPetr Vandrovec 	    host->simplex_claimed && host->simplex_claimed != ap) {
4299c6fd2807SJeff Garzik 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4300c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4301c6fd2807SJeff Garzik 			       "other device, disabling DMA\n");
4302c6fd2807SJeff Garzik 	}
4303c6fd2807SJeff Garzik 
4304e424675fSJeff Garzik 	if (ap->flags & ATA_FLAG_NO_IORDY)
4305e424675fSJeff Garzik 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4306e424675fSJeff Garzik 
4307c6fd2807SJeff Garzik 	if (ap->ops->mode_filter)
4308a76b62caSAlan Cox 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4309c6fd2807SJeff Garzik 
43108343f889SRobert Hancock 	/* Apply cable rule here.  Don't apply it early because when
43118343f889SRobert Hancock 	 * we handle hot plug the cable type can itself change.
43128343f889SRobert Hancock 	 * Check this last so that we know if the transfer rate was
43138343f889SRobert Hancock 	 * solely limited by the cable.
43148343f889SRobert Hancock 	 * Unknown or 80 wire cables reported host side are checked
43158343f889SRobert Hancock 	 * drive side as well. Cases where we know a 40wire cable
43168343f889SRobert Hancock 	 * is used safely for 80 are not checked here.
43178343f889SRobert Hancock 	 */
43188343f889SRobert Hancock 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
43198343f889SRobert Hancock 		/* UDMA/44 or higher would be available */
43208343f889SRobert Hancock 		if ((ap->cbl == ATA_CBL_PATA40) ||
43216bbfd53dSAlan Cox 		    (ata_is_40wire(dev) &&
43228343f889SRobert Hancock 		    (ap->cbl == ATA_CBL_PATA_UNK ||
43238343f889SRobert Hancock 		     ap->cbl == ATA_CBL_PATA80))) {
43248343f889SRobert Hancock 			ata_dev_printk(dev, KERN_WARNING,
43258343f889SRobert Hancock 				 "limited to UDMA/33 due to 40-wire cable\n");
43268343f889SRobert Hancock 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
43278343f889SRobert Hancock 		}
43288343f889SRobert Hancock 
4329c6fd2807SJeff Garzik 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4330c6fd2807SJeff Garzik 			    &dev->mwdma_mask, &dev->udma_mask);
4331c6fd2807SJeff Garzik }
4332c6fd2807SJeff Garzik 
4333c6fd2807SJeff Garzik /**
4334c6fd2807SJeff Garzik  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4335c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4336c6fd2807SJeff Garzik  *
4337c6fd2807SJeff Garzik  *	Issue SET FEATURES - XFER MODE command to device @dev
4338c6fd2807SJeff Garzik  *	on port @ap.
4339c6fd2807SJeff Garzik  *
4340c6fd2807SJeff Garzik  *	LOCKING:
4341c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
4342c6fd2807SJeff Garzik  *
4343c6fd2807SJeff Garzik  *	RETURNS:
4344c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4345c6fd2807SJeff Garzik  */
4346c6fd2807SJeff Garzik 
4347c6fd2807SJeff Garzik static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4348c6fd2807SJeff Garzik {
4349c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4350c6fd2807SJeff Garzik 	unsigned int err_mask;
4351c6fd2807SJeff Garzik 
4352c6fd2807SJeff Garzik 	/* set up set-features taskfile */
4353c6fd2807SJeff Garzik 	DPRINTK("set features - xfer mode\n");
4354c6fd2807SJeff Garzik 
4355464cf177STejun Heo 	/* Some controllers and ATAPI devices show flaky interrupt
4356464cf177STejun Heo 	 * behavior after setting xfer mode.  Use polling instead.
4357464cf177STejun Heo 	 */
4358c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4359c6fd2807SJeff Garzik 	tf.command = ATA_CMD_SET_FEATURES;
4360c6fd2807SJeff Garzik 	tf.feature = SETFEATURES_XFER;
4361464cf177STejun Heo 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4362c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4363c6fd2807SJeff Garzik 	tf.nsect = dev->xfer_mode;
4364c6fd2807SJeff Garzik 
43652b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4366c6fd2807SJeff Garzik 
4367c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4368c6fd2807SJeff Garzik 	return err_mask;
4369c6fd2807SJeff Garzik }
4370c6fd2807SJeff Garzik /**
4371218f3d30SJeff Garzik  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
43729f45cbd3SKristen Carlson Accardi  *	@dev: Device to which command will be sent
43739f45cbd3SKristen Carlson Accardi  *	@enable: Whether to enable or disable the feature
4374218f3d30SJeff Garzik  *	@feature: The sector count represents the feature to set
43759f45cbd3SKristen Carlson Accardi  *
43769f45cbd3SKristen Carlson Accardi  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4377218f3d30SJeff Garzik  *	on port @ap with sector count
43789f45cbd3SKristen Carlson Accardi  *
43799f45cbd3SKristen Carlson Accardi  *	LOCKING:
43809f45cbd3SKristen Carlson Accardi  *	PCI/etc. bus probe sem.
43819f45cbd3SKristen Carlson Accardi  *
43829f45cbd3SKristen Carlson Accardi  *	RETURNS:
43839f45cbd3SKristen Carlson Accardi  *	0 on success, AC_ERR_* mask otherwise.
43849f45cbd3SKristen Carlson Accardi  */
4385218f3d30SJeff Garzik static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4386218f3d30SJeff Garzik 					u8 feature)
43879f45cbd3SKristen Carlson Accardi {
43889f45cbd3SKristen Carlson Accardi 	struct ata_taskfile tf;
43899f45cbd3SKristen Carlson Accardi 	unsigned int err_mask;
43909f45cbd3SKristen Carlson Accardi 
43919f45cbd3SKristen Carlson Accardi 	/* set up set-features taskfile */
43929f45cbd3SKristen Carlson Accardi 	DPRINTK("set features - SATA features\n");
43939f45cbd3SKristen Carlson Accardi 
43949f45cbd3SKristen Carlson Accardi 	ata_tf_init(dev, &tf);
43959f45cbd3SKristen Carlson Accardi 	tf.command = ATA_CMD_SET_FEATURES;
43969f45cbd3SKristen Carlson Accardi 	tf.feature = enable;
43979f45cbd3SKristen Carlson Accardi 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
43989f45cbd3SKristen Carlson Accardi 	tf.protocol = ATA_PROT_NODATA;
4399218f3d30SJeff Garzik 	tf.nsect = feature;
44009f45cbd3SKristen Carlson Accardi 
44012b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
44029f45cbd3SKristen Carlson Accardi 
44039f45cbd3SKristen Carlson Accardi 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
44049f45cbd3SKristen Carlson Accardi 	return err_mask;
44059f45cbd3SKristen Carlson Accardi }
44069f45cbd3SKristen Carlson Accardi 
44079f45cbd3SKristen Carlson Accardi /**
4408c6fd2807SJeff Garzik  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4409c6fd2807SJeff Garzik  *	@dev: Device to which command will be sent
4410c6fd2807SJeff Garzik  *	@heads: Number of heads (taskfile parameter)
4411c6fd2807SJeff Garzik  *	@sectors: Number of sectors (taskfile parameter)
4412c6fd2807SJeff Garzik  *
4413c6fd2807SJeff Garzik  *	LOCKING:
4414c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
4415c6fd2807SJeff Garzik  *
4416c6fd2807SJeff Garzik  *	RETURNS:
4417c6fd2807SJeff Garzik  *	0 on success, AC_ERR_* mask otherwise.
4418c6fd2807SJeff Garzik  */
4419c6fd2807SJeff Garzik static unsigned int ata_dev_init_params(struct ata_device *dev,
4420c6fd2807SJeff Garzik 					u16 heads, u16 sectors)
4421c6fd2807SJeff Garzik {
4422c6fd2807SJeff Garzik 	struct ata_taskfile tf;
4423c6fd2807SJeff Garzik 	unsigned int err_mask;
4424c6fd2807SJeff Garzik 
4425c6fd2807SJeff Garzik 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4426c6fd2807SJeff Garzik 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4427c6fd2807SJeff Garzik 		return AC_ERR_INVALID;
4428c6fd2807SJeff Garzik 
4429c6fd2807SJeff Garzik 	/* set up init dev params taskfile */
4430c6fd2807SJeff Garzik 	DPRINTK("init dev params \n");
4431c6fd2807SJeff Garzik 
4432c6fd2807SJeff Garzik 	ata_tf_init(dev, &tf);
4433c6fd2807SJeff Garzik 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4434c6fd2807SJeff Garzik 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4435c6fd2807SJeff Garzik 	tf.protocol = ATA_PROT_NODATA;
4436c6fd2807SJeff Garzik 	tf.nsect = sectors;
4437c6fd2807SJeff Garzik 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4438c6fd2807SJeff Garzik 
44392b789108STejun Heo 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
444018b2466cSAlan Cox 	/* A clean abort indicates an original or just out of spec drive
444118b2466cSAlan Cox 	   and we should continue as we issue the setup based on the
444218b2466cSAlan Cox 	   drive reported working geometry */
444318b2466cSAlan Cox 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
444418b2466cSAlan Cox 		err_mask = 0;
4445c6fd2807SJeff Garzik 
4446c6fd2807SJeff Garzik 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4447c6fd2807SJeff Garzik 	return err_mask;
4448c6fd2807SJeff Garzik }
4449c6fd2807SJeff Garzik 
4450c6fd2807SJeff Garzik /**
4451c6fd2807SJeff Garzik  *	ata_sg_clean - Unmap DMA memory associated with command
4452c6fd2807SJeff Garzik  *	@qc: Command containing DMA memory to be released
4453c6fd2807SJeff Garzik  *
4454c6fd2807SJeff Garzik  *	Unmap all mapped DMA memory associated with this command.
4455c6fd2807SJeff Garzik  *
4456c6fd2807SJeff Garzik  *	LOCKING:
4457cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4458c6fd2807SJeff Garzik  */
445970e6ad0cSTejun Heo void ata_sg_clean(struct ata_queued_cmd *qc)
4460c6fd2807SJeff Garzik {
4461c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4462c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4463c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4464c6fd2807SJeff Garzik 	void *pad_buf = NULL;
4465c6fd2807SJeff Garzik 
4466c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4467c6fd2807SJeff Garzik 	WARN_ON(sg == NULL);
4468c6fd2807SJeff Garzik 
4469c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SINGLE)
4470c6fd2807SJeff Garzik 		WARN_ON(qc->n_elem > 1);
4471c6fd2807SJeff Garzik 
4472c6fd2807SJeff Garzik 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4473c6fd2807SJeff Garzik 
4474c6fd2807SJeff Garzik 	/* if we padded the buffer out to 32-bit bound, and data
4475c6fd2807SJeff Garzik 	 * xfer direction is from-device, we must copy from the
4476c6fd2807SJeff Garzik 	 * pad buffer back into the supplied buffer
4477c6fd2807SJeff Garzik 	 */
4478c6fd2807SJeff Garzik 	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4479c6fd2807SJeff Garzik 		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4480c6fd2807SJeff Garzik 
4481c6fd2807SJeff Garzik 	if (qc->flags & ATA_QCFLAG_SG) {
4482c6fd2807SJeff Garzik 		if (qc->n_elem)
4483c6fd2807SJeff Garzik 			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4484c6fd2807SJeff Garzik 		/* restore last sg */
448587260216SJens Axboe 		sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
4486c6fd2807SJeff Garzik 		if (pad_buf) {
4487c6fd2807SJeff Garzik 			struct scatterlist *psg = &qc->pad_sgent;
448845711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4489c6fd2807SJeff Garzik 			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4490c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4491c6fd2807SJeff Garzik 		}
4492c6fd2807SJeff Garzik 	} else {
4493c6fd2807SJeff Garzik 		if (qc->n_elem)
4494c6fd2807SJeff Garzik 			dma_unmap_single(ap->dev,
4495c6fd2807SJeff Garzik 				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4496c6fd2807SJeff Garzik 				dir);
4497c6fd2807SJeff Garzik 		/* restore sg */
4498c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4499c6fd2807SJeff Garzik 		if (pad_buf)
4500c6fd2807SJeff Garzik 			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4501c6fd2807SJeff Garzik 			       pad_buf, qc->pad_len);
4502c6fd2807SJeff Garzik 	}
4503c6fd2807SJeff Garzik 
4504c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4505c6fd2807SJeff Garzik 	qc->__sg = NULL;
4506c6fd2807SJeff Garzik }
4507c6fd2807SJeff Garzik 
4508c6fd2807SJeff Garzik /**
4509c6fd2807SJeff Garzik  *	ata_fill_sg - Fill PCI IDE PRD table
4510c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be transferred
4511c6fd2807SJeff Garzik  *
4512c6fd2807SJeff Garzik  *	Fill PCI IDE PRD (scatter-gather) table with segments
4513c6fd2807SJeff Garzik  *	associated with the current disk command.
4514c6fd2807SJeff Garzik  *
4515c6fd2807SJeff Garzik  *	LOCKING:
4516cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4517c6fd2807SJeff Garzik  *
4518c6fd2807SJeff Garzik  */
4519c6fd2807SJeff Garzik static void ata_fill_sg(struct ata_queued_cmd *qc)
4520c6fd2807SJeff Garzik {
4521c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4522c6fd2807SJeff Garzik 	struct scatterlist *sg;
4523c6fd2807SJeff Garzik 	unsigned int idx;
4524c6fd2807SJeff Garzik 
4525c6fd2807SJeff Garzik 	WARN_ON(qc->__sg == NULL);
4526c6fd2807SJeff Garzik 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4527c6fd2807SJeff Garzik 
4528c6fd2807SJeff Garzik 	idx = 0;
4529c6fd2807SJeff Garzik 	ata_for_each_sg(sg, qc) {
4530c6fd2807SJeff Garzik 		u32 addr, offset;
4531c6fd2807SJeff Garzik 		u32 sg_len, len;
4532c6fd2807SJeff Garzik 
4533c6fd2807SJeff Garzik 		/* determine if physical DMA addr spans 64K boundary.
4534c6fd2807SJeff Garzik 		 * Note h/w doesn't support 64-bit, so we unconditionally
4535c6fd2807SJeff Garzik 		 * truncate dma_addr_t to u32.
4536c6fd2807SJeff Garzik 		 */
4537c6fd2807SJeff Garzik 		addr = (u32) sg_dma_address(sg);
4538c6fd2807SJeff Garzik 		sg_len = sg_dma_len(sg);
4539c6fd2807SJeff Garzik 
4540c6fd2807SJeff Garzik 		while (sg_len) {
4541c6fd2807SJeff Garzik 			offset = addr & 0xffff;
4542c6fd2807SJeff Garzik 			len = sg_len;
4543c6fd2807SJeff Garzik 			if ((offset + sg_len) > 0x10000)
4544c6fd2807SJeff Garzik 				len = 0x10000 - offset;
4545c6fd2807SJeff Garzik 
4546c6fd2807SJeff Garzik 			ap->prd[idx].addr = cpu_to_le32(addr);
4547c6fd2807SJeff Garzik 			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4548c6fd2807SJeff Garzik 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4549c6fd2807SJeff Garzik 
4550c6fd2807SJeff Garzik 			idx++;
4551c6fd2807SJeff Garzik 			sg_len -= len;
4552c6fd2807SJeff Garzik 			addr += len;
4553c6fd2807SJeff Garzik 		}
4554c6fd2807SJeff Garzik 	}
4555c6fd2807SJeff Garzik 
4556c6fd2807SJeff Garzik 	if (idx)
4557c6fd2807SJeff Garzik 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4558c6fd2807SJeff Garzik }
4559b9a4197eSTejun Heo 
4560c6fd2807SJeff Garzik /**
4561d26fc955SAlan Cox  *	ata_fill_sg_dumb - Fill PCI IDE PRD table
4562d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be transferred
4563d26fc955SAlan Cox  *
4564d26fc955SAlan Cox  *	Fill PCI IDE PRD (scatter-gather) table with segments
4565d26fc955SAlan Cox  *	associated with the current disk command. Perform the fill
4566d26fc955SAlan Cox  *	so that we avoid writing any length 64K records for
4567d26fc955SAlan Cox  *	controllers that don't follow the spec.
4568d26fc955SAlan Cox  *
4569d26fc955SAlan Cox  *	LOCKING:
4570d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4571d26fc955SAlan Cox  *
4572d26fc955SAlan Cox  */
4573d26fc955SAlan Cox static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4574d26fc955SAlan Cox {
4575d26fc955SAlan Cox 	struct ata_port *ap = qc->ap;
4576d26fc955SAlan Cox 	struct scatterlist *sg;
4577d26fc955SAlan Cox 	unsigned int idx;
4578d26fc955SAlan Cox 
4579d26fc955SAlan Cox 	WARN_ON(qc->__sg == NULL);
4580d26fc955SAlan Cox 	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4581d26fc955SAlan Cox 
4582d26fc955SAlan Cox 	idx = 0;
4583d26fc955SAlan Cox 	ata_for_each_sg(sg, qc) {
4584d26fc955SAlan Cox 		u32 addr, offset;
4585d26fc955SAlan Cox 		u32 sg_len, len, blen;
4586d26fc955SAlan Cox 
4587d26fc955SAlan Cox 		/* determine if physical DMA addr spans 64K boundary.
4588d26fc955SAlan Cox 		 * Note h/w doesn't support 64-bit, so we unconditionally
4589d26fc955SAlan Cox 		 * truncate dma_addr_t to u32.
4590d26fc955SAlan Cox 		 */
4591d26fc955SAlan Cox 		addr = (u32) sg_dma_address(sg);
4592d26fc955SAlan Cox 		sg_len = sg_dma_len(sg);
4593d26fc955SAlan Cox 
4594d26fc955SAlan Cox 		while (sg_len) {
4595d26fc955SAlan Cox 			offset = addr & 0xffff;
4596d26fc955SAlan Cox 			len = sg_len;
4597d26fc955SAlan Cox 			if ((offset + sg_len) > 0x10000)
4598d26fc955SAlan Cox 				len = 0x10000 - offset;
4599d26fc955SAlan Cox 
4600d26fc955SAlan Cox 			blen = len & 0xffff;
4601d26fc955SAlan Cox 			ap->prd[idx].addr = cpu_to_le32(addr);
4602d26fc955SAlan Cox 			if (blen == 0) {
4603d26fc955SAlan Cox 			   /* Some PATA chipsets like the CS5530 can't
4604d26fc955SAlan Cox 			      cope with 0x0000 meaning 64K as the spec says */
4605d26fc955SAlan Cox 				ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4606d26fc955SAlan Cox 				blen = 0x8000;
4607d26fc955SAlan Cox 				ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4608d26fc955SAlan Cox 			}
4609d26fc955SAlan Cox 			ap->prd[idx].flags_len = cpu_to_le32(blen);
4610d26fc955SAlan Cox 			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4611d26fc955SAlan Cox 
4612d26fc955SAlan Cox 			idx++;
4613d26fc955SAlan Cox 			sg_len -= len;
4614d26fc955SAlan Cox 			addr += len;
4615d26fc955SAlan Cox 		}
4616d26fc955SAlan Cox 	}
4617d26fc955SAlan Cox 
4618d26fc955SAlan Cox 	if (idx)
4619d26fc955SAlan Cox 		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4620d26fc955SAlan Cox }
4621d26fc955SAlan Cox 
4622d26fc955SAlan Cox /**
4623c6fd2807SJeff Garzik  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4624c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to check
4625c6fd2807SJeff Garzik  *
4626c6fd2807SJeff Garzik  *	Allow low-level driver to filter ATA PACKET commands, returning
4627c6fd2807SJeff Garzik  *	a status indicating whether or not it is OK to use DMA for the
4628c6fd2807SJeff Garzik  *	supplied PACKET command.
4629c6fd2807SJeff Garzik  *
4630c6fd2807SJeff Garzik  *	LOCKING:
4631cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4632c6fd2807SJeff Garzik  *
4633c6fd2807SJeff Garzik  *	RETURNS: 0 when ATAPI DMA can be used
4634c6fd2807SJeff Garzik  *               nonzero otherwise
4635c6fd2807SJeff Garzik  */
4636c6fd2807SJeff Garzik int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4637c6fd2807SJeff Garzik {
4638c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4639c6fd2807SJeff Garzik 
4640b9a4197eSTejun Heo 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4641b9a4197eSTejun Heo 	 * few ATAPI devices choke on such DMA requests.
4642b9a4197eSTejun Heo 	 */
4643b9a4197eSTejun Heo 	if (unlikely(qc->nbytes & 15))
46446f23a31dSAlbert Lee 		return 1;
46456f23a31dSAlbert Lee 
4646c6fd2807SJeff Garzik 	if (ap->ops->check_atapi_dma)
4647b9a4197eSTejun Heo 		return ap->ops->check_atapi_dma(qc);
4648c6fd2807SJeff Garzik 
4649b9a4197eSTejun Heo 	return 0;
4650c6fd2807SJeff Garzik }
4651b9a4197eSTejun Heo 
4652c6fd2807SJeff Garzik /**
465331cc23b3STejun Heo  *	ata_std_qc_defer - Check whether a qc needs to be deferred
465431cc23b3STejun Heo  *	@qc: ATA command in question
465531cc23b3STejun Heo  *
465631cc23b3STejun Heo  *	Non-NCQ commands cannot run with any other command, NCQ or
465731cc23b3STejun Heo  *	not.  As upper layer only knows the queue depth, we are
465831cc23b3STejun Heo  *	responsible for maintaining exclusion.  This function checks
465931cc23b3STejun Heo  *	whether a new command @qc can be issued.
466031cc23b3STejun Heo  *
466131cc23b3STejun Heo  *	LOCKING:
466231cc23b3STejun Heo  *	spin_lock_irqsave(host lock)
466331cc23b3STejun Heo  *
466431cc23b3STejun Heo  *	RETURNS:
466531cc23b3STejun Heo  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
466631cc23b3STejun Heo  */
466731cc23b3STejun Heo int ata_std_qc_defer(struct ata_queued_cmd *qc)
466831cc23b3STejun Heo {
466931cc23b3STejun Heo 	struct ata_link *link = qc->dev->link;
467031cc23b3STejun Heo 
467131cc23b3STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
467231cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag))
467331cc23b3STejun Heo 			return 0;
467431cc23b3STejun Heo 	} else {
467531cc23b3STejun Heo 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
467631cc23b3STejun Heo 			return 0;
467731cc23b3STejun Heo 	}
467831cc23b3STejun Heo 
467931cc23b3STejun Heo 	return ATA_DEFER_LINK;
468031cc23b3STejun Heo }
468131cc23b3STejun Heo 
468231cc23b3STejun Heo /**
4683c6fd2807SJeff Garzik  *	ata_qc_prep - Prepare taskfile for submission
4684c6fd2807SJeff Garzik  *	@qc: Metadata associated with taskfile to be prepared
4685c6fd2807SJeff Garzik  *
4686c6fd2807SJeff Garzik  *	Prepare ATA taskfile for submission.
4687c6fd2807SJeff Garzik  *
4688c6fd2807SJeff Garzik  *	LOCKING:
4689cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4690c6fd2807SJeff Garzik  */
4691c6fd2807SJeff Garzik void ata_qc_prep(struct ata_queued_cmd *qc)
4692c6fd2807SJeff Garzik {
4693c6fd2807SJeff Garzik 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4694c6fd2807SJeff Garzik 		return;
4695c6fd2807SJeff Garzik 
4696c6fd2807SJeff Garzik 	ata_fill_sg(qc);
4697c6fd2807SJeff Garzik }
4698c6fd2807SJeff Garzik 
4699d26fc955SAlan Cox /**
4700d26fc955SAlan Cox  *	ata_dumb_qc_prep - Prepare taskfile for submission
4701d26fc955SAlan Cox  *	@qc: Metadata associated with taskfile to be prepared
4702d26fc955SAlan Cox  *
4703d26fc955SAlan Cox  *	Prepare ATA taskfile for submission.
4704d26fc955SAlan Cox  *
4705d26fc955SAlan Cox  *	LOCKING:
4706d26fc955SAlan Cox  *	spin_lock_irqsave(host lock)
4707d26fc955SAlan Cox  */
4708d26fc955SAlan Cox void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4709d26fc955SAlan Cox {
4710d26fc955SAlan Cox 	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4711d26fc955SAlan Cox 		return;
4712d26fc955SAlan Cox 
4713d26fc955SAlan Cox 	ata_fill_sg_dumb(qc);
4714d26fc955SAlan Cox }
4715d26fc955SAlan Cox 
4716c6fd2807SJeff Garzik void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4717c6fd2807SJeff Garzik 
4718c6fd2807SJeff Garzik /**
4719c6fd2807SJeff Garzik  *	ata_sg_init_one - Associate command with memory buffer
4720c6fd2807SJeff Garzik  *	@qc: Command to be associated
4721c6fd2807SJeff Garzik  *	@buf: Memory buffer
4722c6fd2807SJeff Garzik  *	@buflen: Length of memory buffer, in bytes.
4723c6fd2807SJeff Garzik  *
4724c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4725c6fd2807SJeff Garzik  *	to point to a single memory buffer, @buf of byte length @buflen.
4726c6fd2807SJeff Garzik  *
4727c6fd2807SJeff Garzik  *	LOCKING:
4728cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4729c6fd2807SJeff Garzik  */
4730c6fd2807SJeff Garzik 
4731c6fd2807SJeff Garzik void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4732c6fd2807SJeff Garzik {
4733c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SINGLE;
4734c6fd2807SJeff Garzik 
4735c6fd2807SJeff Garzik 	qc->__sg = &qc->sgent;
4736c6fd2807SJeff Garzik 	qc->n_elem = 1;
4737c6fd2807SJeff Garzik 	qc->orig_n_elem = 1;
4738c6fd2807SJeff Garzik 	qc->buf_virt = buf;
4739c6fd2807SJeff Garzik 	qc->nbytes = buflen;
474087260216SJens Axboe 	qc->cursg = qc->__sg;
4741c6fd2807SJeff Garzik 
474261c0596cSTejun Heo 	sg_init_one(&qc->sgent, buf, buflen);
4743c6fd2807SJeff Garzik }
4744c6fd2807SJeff Garzik 
4745c6fd2807SJeff Garzik /**
4746c6fd2807SJeff Garzik  *	ata_sg_init - Associate command with scatter-gather table.
4747c6fd2807SJeff Garzik  *	@qc: Command to be associated
4748c6fd2807SJeff Garzik  *	@sg: Scatter-gather table.
4749c6fd2807SJeff Garzik  *	@n_elem: Number of elements in s/g table.
4750c6fd2807SJeff Garzik  *
4751c6fd2807SJeff Garzik  *	Initialize the data-related elements of queued_cmd @qc
4752c6fd2807SJeff Garzik  *	to point to a scatter-gather table @sg, containing @n_elem
4753c6fd2807SJeff Garzik  *	elements.
4754c6fd2807SJeff Garzik  *
4755c6fd2807SJeff Garzik  *	LOCKING:
4756cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4757c6fd2807SJeff Garzik  */
4758c6fd2807SJeff Garzik 
4759c6fd2807SJeff Garzik void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4760c6fd2807SJeff Garzik 		 unsigned int n_elem)
4761c6fd2807SJeff Garzik {
4762c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_SG;
4763c6fd2807SJeff Garzik 	qc->__sg = sg;
4764c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4765c6fd2807SJeff Garzik 	qc->orig_n_elem = n_elem;
476687260216SJens Axboe 	qc->cursg = qc->__sg;
4767c6fd2807SJeff Garzik }
4768c6fd2807SJeff Garzik 
4769c6fd2807SJeff Garzik /**
4770c6fd2807SJeff Garzik  *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4771c6fd2807SJeff Garzik  *	@qc: Command with memory buffer to be mapped.
4772c6fd2807SJeff Garzik  *
4773c6fd2807SJeff Garzik  *	DMA-map the memory buffer associated with queued_cmd @qc.
4774c6fd2807SJeff Garzik  *
4775c6fd2807SJeff Garzik  *	LOCKING:
4776cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4777c6fd2807SJeff Garzik  *
4778c6fd2807SJeff Garzik  *	RETURNS:
4779c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4780c6fd2807SJeff Garzik  */
4781c6fd2807SJeff Garzik 
4782c6fd2807SJeff Garzik static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4783c6fd2807SJeff Garzik {
4784c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4785c6fd2807SJeff Garzik 	int dir = qc->dma_dir;
4786c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
4787c6fd2807SJeff Garzik 	dma_addr_t dma_address;
4788c6fd2807SJeff Garzik 	int trim_sg = 0;
4789c6fd2807SJeff Garzik 
4790c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4791c6fd2807SJeff Garzik 	qc->pad_len = sg->length & 3;
4792c6fd2807SJeff Garzik 	if (qc->pad_len) {
4793c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4794c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4795c6fd2807SJeff Garzik 
4796c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4797c6fd2807SJeff Garzik 
4798c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4799c6fd2807SJeff Garzik 
4800c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE)
4801c6fd2807SJeff Garzik 			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4802c6fd2807SJeff Garzik 			       qc->pad_len);
4803c6fd2807SJeff Garzik 
4804c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4805c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4806c6fd2807SJeff Garzik 		/* trim sg */
4807c6fd2807SJeff Garzik 		sg->length -= qc->pad_len;
4808c6fd2807SJeff Garzik 		if (sg->length == 0)
4809c6fd2807SJeff Garzik 			trim_sg = 1;
4810c6fd2807SJeff Garzik 
4811c6fd2807SJeff Garzik 		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4812c6fd2807SJeff Garzik 			sg->length, qc->pad_len);
4813c6fd2807SJeff Garzik 	}
4814c6fd2807SJeff Garzik 
4815c6fd2807SJeff Garzik 	if (trim_sg) {
4816c6fd2807SJeff Garzik 		qc->n_elem--;
4817c6fd2807SJeff Garzik 		goto skip_map;
4818c6fd2807SJeff Garzik 	}
4819c6fd2807SJeff Garzik 
4820c6fd2807SJeff Garzik 	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4821c6fd2807SJeff Garzik 				     sg->length, dir);
4822c6fd2807SJeff Garzik 	if (dma_mapping_error(dma_address)) {
4823c6fd2807SJeff Garzik 		/* restore sg */
4824c6fd2807SJeff Garzik 		sg->length += qc->pad_len;
4825c6fd2807SJeff Garzik 		return -1;
4826c6fd2807SJeff Garzik 	}
4827c6fd2807SJeff Garzik 
4828c6fd2807SJeff Garzik 	sg_dma_address(sg) = dma_address;
4829c6fd2807SJeff Garzik 	sg_dma_len(sg) = sg->length;
4830c6fd2807SJeff Garzik 
4831c6fd2807SJeff Garzik skip_map:
4832c6fd2807SJeff Garzik 	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4833c6fd2807SJeff Garzik 		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4834c6fd2807SJeff Garzik 
4835c6fd2807SJeff Garzik 	return 0;
4836c6fd2807SJeff Garzik }
4837c6fd2807SJeff Garzik 
4838c6fd2807SJeff Garzik /**
4839c6fd2807SJeff Garzik  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4840c6fd2807SJeff Garzik  *	@qc: Command with scatter-gather table to be mapped.
4841c6fd2807SJeff Garzik  *
4842c6fd2807SJeff Garzik  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4843c6fd2807SJeff Garzik  *
4844c6fd2807SJeff Garzik  *	LOCKING:
4845cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
4846c6fd2807SJeff Garzik  *
4847c6fd2807SJeff Garzik  *	RETURNS:
4848c6fd2807SJeff Garzik  *	Zero on success, negative on error.
4849c6fd2807SJeff Garzik  *
4850c6fd2807SJeff Garzik  */
4851c6fd2807SJeff Garzik 
4852c6fd2807SJeff Garzik static int ata_sg_setup(struct ata_queued_cmd *qc)
4853c6fd2807SJeff Garzik {
4854c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
4855c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
485687260216SJens Axboe 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
4857c6fd2807SJeff Garzik 	int n_elem, pre_n_elem, dir, trim_sg = 0;
4858c6fd2807SJeff Garzik 
485944877b4eSTejun Heo 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4860c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4861c6fd2807SJeff Garzik 
4862c6fd2807SJeff Garzik 	/* we must lengthen transfers to end on a 32-bit boundary */
4863c6fd2807SJeff Garzik 	qc->pad_len = lsg->length & 3;
4864c6fd2807SJeff Garzik 	if (qc->pad_len) {
4865c6fd2807SJeff Garzik 		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4866c6fd2807SJeff Garzik 		struct scatterlist *psg = &qc->pad_sgent;
4867c6fd2807SJeff Garzik 		unsigned int offset;
4868c6fd2807SJeff Garzik 
4869c6fd2807SJeff Garzik 		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4870c6fd2807SJeff Garzik 
4871c6fd2807SJeff Garzik 		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4872c6fd2807SJeff Garzik 
4873c6fd2807SJeff Garzik 		/*
4874c6fd2807SJeff Garzik 		 * psg->page/offset are used to copy to-be-written
4875c6fd2807SJeff Garzik 		 * data in this function or read data in ata_sg_clean.
4876c6fd2807SJeff Garzik 		 */
4877c6fd2807SJeff Garzik 		offset = lsg->offset + lsg->length - qc->pad_len;
4878acd054a5SAnton Blanchard 		sg_init_table(psg, 1);
4879642f1490SJens Axboe 		sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4880642f1490SJens Axboe 				qc->pad_len, offset_in_page(offset));
4881c6fd2807SJeff Garzik 
4882c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
488345711f1aSJens Axboe 			void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4884c6fd2807SJeff Garzik 			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4885c6fd2807SJeff Garzik 			kunmap_atomic(addr, KM_IRQ0);
4886c6fd2807SJeff Garzik 		}
4887c6fd2807SJeff Garzik 
4888c6fd2807SJeff Garzik 		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4889c6fd2807SJeff Garzik 		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4890c6fd2807SJeff Garzik 		/* trim last sg */
4891c6fd2807SJeff Garzik 		lsg->length -= qc->pad_len;
4892c6fd2807SJeff Garzik 		if (lsg->length == 0)
4893c6fd2807SJeff Garzik 			trim_sg = 1;
4894c6fd2807SJeff Garzik 
4895c6fd2807SJeff Garzik 		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4896c6fd2807SJeff Garzik 			qc->n_elem - 1, lsg->length, qc->pad_len);
4897c6fd2807SJeff Garzik 	}
4898c6fd2807SJeff Garzik 
4899c6fd2807SJeff Garzik 	pre_n_elem = qc->n_elem;
4900c6fd2807SJeff Garzik 	if (trim_sg && pre_n_elem)
4901c6fd2807SJeff Garzik 		pre_n_elem--;
4902c6fd2807SJeff Garzik 
4903c6fd2807SJeff Garzik 	if (!pre_n_elem) {
4904c6fd2807SJeff Garzik 		n_elem = 0;
4905c6fd2807SJeff Garzik 		goto skip_map;
4906c6fd2807SJeff Garzik 	}
4907c6fd2807SJeff Garzik 
4908c6fd2807SJeff Garzik 	dir = qc->dma_dir;
4909c6fd2807SJeff Garzik 	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4910c6fd2807SJeff Garzik 	if (n_elem < 1) {
4911c6fd2807SJeff Garzik 		/* restore last sg */
4912c6fd2807SJeff Garzik 		lsg->length += qc->pad_len;
4913c6fd2807SJeff Garzik 		return -1;
4914c6fd2807SJeff Garzik 	}
4915c6fd2807SJeff Garzik 
4916c6fd2807SJeff Garzik 	DPRINTK("%d sg elements mapped\n", n_elem);
4917c6fd2807SJeff Garzik 
4918c6fd2807SJeff Garzik skip_map:
4919c6fd2807SJeff Garzik 	qc->n_elem = n_elem;
4920c6fd2807SJeff Garzik 
4921c6fd2807SJeff Garzik 	return 0;
4922c6fd2807SJeff Garzik }
4923c6fd2807SJeff Garzik 
4924c6fd2807SJeff Garzik /**
4925c6fd2807SJeff Garzik  *	swap_buf_le16 - swap halves of 16-bit words in place
4926c6fd2807SJeff Garzik  *	@buf:  Buffer to swap
4927c6fd2807SJeff Garzik  *	@buf_words:  Number of 16-bit words in buffer.
4928c6fd2807SJeff Garzik  *
4929c6fd2807SJeff Garzik  *	Swap halves of 16-bit words if needed to convert from
4930c6fd2807SJeff Garzik  *	little-endian byte order to native cpu byte order, or
4931c6fd2807SJeff Garzik  *	vice-versa.
4932c6fd2807SJeff Garzik  *
4933c6fd2807SJeff Garzik  *	LOCKING:
4934c6fd2807SJeff Garzik  *	Inherited from caller.
4935c6fd2807SJeff Garzik  */
4936c6fd2807SJeff Garzik void swap_buf_le16(u16 *buf, unsigned int buf_words)
4937c6fd2807SJeff Garzik {
4938c6fd2807SJeff Garzik #ifdef __BIG_ENDIAN
4939c6fd2807SJeff Garzik 	unsigned int i;
4940c6fd2807SJeff Garzik 
4941c6fd2807SJeff Garzik 	for (i = 0; i < buf_words; i++)
4942c6fd2807SJeff Garzik 		buf[i] = le16_to_cpu(buf[i]);
4943c6fd2807SJeff Garzik #endif /* __BIG_ENDIAN */
4944c6fd2807SJeff Garzik }
4945c6fd2807SJeff Garzik 
4946c6fd2807SJeff Garzik /**
49470d5ff566STejun Heo  *	ata_data_xfer - Transfer data by PIO
4948c6fd2807SJeff Garzik  *	@adev: device to target
4949c6fd2807SJeff Garzik  *	@buf: data buffer
4950c6fd2807SJeff Garzik  *	@buflen: buffer length
4951c6fd2807SJeff Garzik  *	@write_data: read/write
4952c6fd2807SJeff Garzik  *
4953c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO.
4954c6fd2807SJeff Garzik  *
4955c6fd2807SJeff Garzik  *	LOCKING:
4956c6fd2807SJeff Garzik  *	Inherited from caller.
4957c6fd2807SJeff Garzik  */
49580d5ff566STejun Heo void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4959c6fd2807SJeff Garzik 		   unsigned int buflen, int write_data)
4960c6fd2807SJeff Garzik {
49619af5c9c9STejun Heo 	struct ata_port *ap = adev->link->ap;
4962c6fd2807SJeff Garzik 	unsigned int words = buflen >> 1;
4963c6fd2807SJeff Garzik 
4964c6fd2807SJeff Garzik 	/* Transfer multiple of 2 bytes */
4965c6fd2807SJeff Garzik 	if (write_data)
49660d5ff566STejun Heo 		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4967c6fd2807SJeff Garzik 	else
49680d5ff566STejun Heo 		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4969c6fd2807SJeff Garzik 
4970c6fd2807SJeff Garzik 	/* Transfer trailing 1 byte, if any. */
4971c6fd2807SJeff Garzik 	if (unlikely(buflen & 0x01)) {
4972c6fd2807SJeff Garzik 		u16 align_buf[1] = { 0 };
4973c6fd2807SJeff Garzik 		unsigned char *trailing_buf = buf + buflen - 1;
4974c6fd2807SJeff Garzik 
4975c6fd2807SJeff Garzik 		if (write_data) {
4976c6fd2807SJeff Garzik 			memcpy(align_buf, trailing_buf, 1);
49770d5ff566STejun Heo 			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4978c6fd2807SJeff Garzik 		} else {
49790d5ff566STejun Heo 			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4980c6fd2807SJeff Garzik 			memcpy(trailing_buf, align_buf, 1);
4981c6fd2807SJeff Garzik 		}
4982c6fd2807SJeff Garzik 	}
4983c6fd2807SJeff Garzik }
4984c6fd2807SJeff Garzik 
4985c6fd2807SJeff Garzik /**
49860d5ff566STejun Heo  *	ata_data_xfer_noirq - Transfer data by PIO
4987c6fd2807SJeff Garzik  *	@adev: device to target
4988c6fd2807SJeff Garzik  *	@buf: data buffer
4989c6fd2807SJeff Garzik  *	@buflen: buffer length
4990c6fd2807SJeff Garzik  *	@write_data: read/write
4991c6fd2807SJeff Garzik  *
4992c6fd2807SJeff Garzik  *	Transfer data from/to the device data register by PIO. Do the
4993c6fd2807SJeff Garzik  *	transfer with interrupts disabled.
4994c6fd2807SJeff Garzik  *
4995c6fd2807SJeff Garzik  *	LOCKING:
4996c6fd2807SJeff Garzik  *	Inherited from caller.
4997c6fd2807SJeff Garzik  */
49980d5ff566STejun Heo void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4999c6fd2807SJeff Garzik 			 unsigned int buflen, int write_data)
5000c6fd2807SJeff Garzik {
5001c6fd2807SJeff Garzik 	unsigned long flags;
5002c6fd2807SJeff Garzik 	local_irq_save(flags);
50030d5ff566STejun Heo 	ata_data_xfer(adev, buf, buflen, write_data);
5004c6fd2807SJeff Garzik 	local_irq_restore(flags);
5005c6fd2807SJeff Garzik }
5006c6fd2807SJeff Garzik 
5007c6fd2807SJeff Garzik 
5008c6fd2807SJeff Garzik /**
50095a5dbd18SMark Lord  *	ata_pio_sector - Transfer a sector of data.
5010c6fd2807SJeff Garzik  *	@qc: Command on going
5011c6fd2807SJeff Garzik  *
50125a5dbd18SMark Lord  *	Transfer qc->sect_size bytes of data from/to the ATA device.
5013c6fd2807SJeff Garzik  *
5014c6fd2807SJeff Garzik  *	LOCKING:
5015c6fd2807SJeff Garzik  *	Inherited from caller.
5016c6fd2807SJeff Garzik  */
5017c6fd2807SJeff Garzik 
5018c6fd2807SJeff Garzik static void ata_pio_sector(struct ata_queued_cmd *qc)
5019c6fd2807SJeff Garzik {
5020c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5021c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5022c6fd2807SJeff Garzik 	struct page *page;
5023c6fd2807SJeff Garzik 	unsigned int offset;
5024c6fd2807SJeff Garzik 	unsigned char *buf;
5025c6fd2807SJeff Garzik 
50265a5dbd18SMark Lord 	if (qc->curbytes == qc->nbytes - qc->sect_size)
5027c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5028c6fd2807SJeff Garzik 
502945711f1aSJens Axboe 	page = sg_page(qc->cursg);
503087260216SJens Axboe 	offset = qc->cursg->offset + qc->cursg_ofs;
5031c6fd2807SJeff Garzik 
5032c6fd2807SJeff Garzik 	/* get the current page and offset */
5033c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5034c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5035c6fd2807SJeff Garzik 
5036c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5037c6fd2807SJeff Garzik 
5038c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5039c6fd2807SJeff Garzik 		unsigned long flags;
5040c6fd2807SJeff Garzik 
5041c6fd2807SJeff Garzik 		/* FIXME: use a bounce buffer */
5042c6fd2807SJeff Garzik 		local_irq_save(flags);
5043c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5044c6fd2807SJeff Garzik 
5045c6fd2807SJeff Garzik 		/* do the actual data transfer */
50465a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5047c6fd2807SJeff Garzik 
5048c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5049c6fd2807SJeff Garzik 		local_irq_restore(flags);
5050c6fd2807SJeff Garzik 	} else {
5051c6fd2807SJeff Garzik 		buf = page_address(page);
50525a5dbd18SMark Lord 		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5053c6fd2807SJeff Garzik 	}
5054c6fd2807SJeff Garzik 
50555a5dbd18SMark Lord 	qc->curbytes += qc->sect_size;
50565a5dbd18SMark Lord 	qc->cursg_ofs += qc->sect_size;
5057c6fd2807SJeff Garzik 
505887260216SJens Axboe 	if (qc->cursg_ofs == qc->cursg->length) {
505987260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5060c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5061c6fd2807SJeff Garzik 	}
5062c6fd2807SJeff Garzik }
5063c6fd2807SJeff Garzik 
5064c6fd2807SJeff Garzik /**
50655a5dbd18SMark Lord  *	ata_pio_sectors - Transfer one or many sectors.
5066c6fd2807SJeff Garzik  *	@qc: Command on going
5067c6fd2807SJeff Garzik  *
50685a5dbd18SMark Lord  *	Transfer one or many sectors of data from/to the
5069c6fd2807SJeff Garzik  *	ATA device for the DRQ request.
5070c6fd2807SJeff Garzik  *
5071c6fd2807SJeff Garzik  *	LOCKING:
5072c6fd2807SJeff Garzik  *	Inherited from caller.
5073c6fd2807SJeff Garzik  */
5074c6fd2807SJeff Garzik 
5075c6fd2807SJeff Garzik static void ata_pio_sectors(struct ata_queued_cmd *qc)
5076c6fd2807SJeff Garzik {
5077c6fd2807SJeff Garzik 	if (is_multi_taskfile(&qc->tf)) {
5078c6fd2807SJeff Garzik 		/* READ/WRITE MULTIPLE */
5079c6fd2807SJeff Garzik 		unsigned int nsect;
5080c6fd2807SJeff Garzik 
5081c6fd2807SJeff Garzik 		WARN_ON(qc->dev->multi_count == 0);
5082c6fd2807SJeff Garzik 
50835a5dbd18SMark Lord 		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5084726f0785STejun Heo 			    qc->dev->multi_count);
5085c6fd2807SJeff Garzik 		while (nsect--)
5086c6fd2807SJeff Garzik 			ata_pio_sector(qc);
5087c6fd2807SJeff Garzik 	} else
5088c6fd2807SJeff Garzik 		ata_pio_sector(qc);
50894cc980b3SAlbert Lee 
50904cc980b3SAlbert Lee 	ata_altstatus(qc->ap); /* flush */
5091c6fd2807SJeff Garzik }
5092c6fd2807SJeff Garzik 
5093c6fd2807SJeff Garzik /**
5094c6fd2807SJeff Garzik  *	atapi_send_cdb - Write CDB bytes to hardware
5095c6fd2807SJeff Garzik  *	@ap: Port to which ATAPI device is attached.
5096c6fd2807SJeff Garzik  *	@qc: Taskfile currently active
5097c6fd2807SJeff Garzik  *
5098c6fd2807SJeff Garzik  *	When device has indicated its readiness to accept
5099c6fd2807SJeff Garzik  *	a CDB, this function is called.  Send the CDB.
5100c6fd2807SJeff Garzik  *
5101c6fd2807SJeff Garzik  *	LOCKING:
5102c6fd2807SJeff Garzik  *	caller.
5103c6fd2807SJeff Garzik  */
5104c6fd2807SJeff Garzik 
5105c6fd2807SJeff Garzik static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5106c6fd2807SJeff Garzik {
5107c6fd2807SJeff Garzik 	/* send SCSI cdb */
5108c6fd2807SJeff Garzik 	DPRINTK("send cdb\n");
5109c6fd2807SJeff Garzik 	WARN_ON(qc->dev->cdb_len < 12);
5110c6fd2807SJeff Garzik 
5111c6fd2807SJeff Garzik 	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5112c6fd2807SJeff Garzik 	ata_altstatus(ap); /* flush */
5113c6fd2807SJeff Garzik 
5114c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5115c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5116c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST;
5117c6fd2807SJeff Garzik 		break;
5118c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
5119c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5120c6fd2807SJeff Garzik 		break;
5121c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5122c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5123c6fd2807SJeff Garzik 		/* initiate bmdma */
5124c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);
5125c6fd2807SJeff Garzik 		break;
5126c6fd2807SJeff Garzik 	}
5127c6fd2807SJeff Garzik }
5128c6fd2807SJeff Garzik 
5129c6fd2807SJeff Garzik /**
5130c6fd2807SJeff Garzik  *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
5131c6fd2807SJeff Garzik  *	@qc: Command on going
5132c6fd2807SJeff Garzik  *	@bytes: number of bytes
5133c6fd2807SJeff Garzik  *
5134c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5135c6fd2807SJeff Garzik  *
5136c6fd2807SJeff Garzik  *	LOCKING:
5137c6fd2807SJeff Garzik  *	Inherited from caller.
5138c6fd2807SJeff Garzik  *
5139c6fd2807SJeff Garzik  */
5140c6fd2807SJeff Garzik 
5141c6fd2807SJeff Garzik static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5142c6fd2807SJeff Garzik {
5143c6fd2807SJeff Garzik 	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5144c6fd2807SJeff Garzik 	struct scatterlist *sg = qc->__sg;
51450874ee76SFUJITA Tomonori 	struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
5146c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5147c6fd2807SJeff Garzik 	struct page *page;
5148c6fd2807SJeff Garzik 	unsigned char *buf;
5149c6fd2807SJeff Garzik 	unsigned int offset, count;
51500874ee76SFUJITA Tomonori 	int no_more_sg = 0;
5151c6fd2807SJeff Garzik 
5152c6fd2807SJeff Garzik 	if (qc->curbytes + bytes >= qc->nbytes)
5153c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5154c6fd2807SJeff Garzik 
5155c6fd2807SJeff Garzik next_sg:
51560874ee76SFUJITA Tomonori 	if (unlikely(no_more_sg)) {
5157c6fd2807SJeff Garzik 		/*
5158c6fd2807SJeff Garzik 		 * The end of qc->sg is reached and the device expects
5159c6fd2807SJeff Garzik 		 * more data to transfer. In order not to overrun qc->sg
5160c6fd2807SJeff Garzik 		 * and fulfill length specified in the byte count register,
5161c6fd2807SJeff Garzik 		 *    - for read case, discard trailing data from the device
5162c6fd2807SJeff Garzik 		 *    - for write case, padding zero data to the device
5163c6fd2807SJeff Garzik 		 */
5164c6fd2807SJeff Garzik 		u16 pad_buf[1] = { 0 };
5165c6fd2807SJeff Garzik 		unsigned int words = bytes >> 1;
5166c6fd2807SJeff Garzik 		unsigned int i;
5167c6fd2807SJeff Garzik 
5168c6fd2807SJeff Garzik 		if (words) /* warning if bytes > 1 */
5169c6fd2807SJeff Garzik 			ata_dev_printk(qc->dev, KERN_WARNING,
5170c6fd2807SJeff Garzik 				       "%u bytes trailing data\n", bytes);
5171c6fd2807SJeff Garzik 
5172c6fd2807SJeff Garzik 		for (i = 0; i < words; i++)
5173c6fd2807SJeff Garzik 			ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5174c6fd2807SJeff Garzik 
5175c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
5176c6fd2807SJeff Garzik 		return;
5177c6fd2807SJeff Garzik 	}
5178c6fd2807SJeff Garzik 
517987260216SJens Axboe 	sg = qc->cursg;
5180c6fd2807SJeff Garzik 
518145711f1aSJens Axboe 	page = sg_page(sg);
5182c6fd2807SJeff Garzik 	offset = sg->offset + qc->cursg_ofs;
5183c6fd2807SJeff Garzik 
5184c6fd2807SJeff Garzik 	/* get the current page and offset */
5185c6fd2807SJeff Garzik 	page = nth_page(page, (offset >> PAGE_SHIFT));
5186c6fd2807SJeff Garzik 	offset %= PAGE_SIZE;
5187c6fd2807SJeff Garzik 
5188c6fd2807SJeff Garzik 	/* don't overrun current sg */
5189c6fd2807SJeff Garzik 	count = min(sg->length - qc->cursg_ofs, bytes);
5190c6fd2807SJeff Garzik 
5191c6fd2807SJeff Garzik 	/* don't cross page boundaries */
5192c6fd2807SJeff Garzik 	count = min(count, (unsigned int)PAGE_SIZE - offset);
5193c6fd2807SJeff Garzik 
5194c6fd2807SJeff Garzik 	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5195c6fd2807SJeff Garzik 
5196c6fd2807SJeff Garzik 	if (PageHighMem(page)) {
5197c6fd2807SJeff Garzik 		unsigned long flags;
5198c6fd2807SJeff Garzik 
5199c6fd2807SJeff Garzik 		/* FIXME: use bounce buffer */
5200c6fd2807SJeff Garzik 		local_irq_save(flags);
5201c6fd2807SJeff Garzik 		buf = kmap_atomic(page, KM_IRQ0);
5202c6fd2807SJeff Garzik 
5203c6fd2807SJeff Garzik 		/* do the actual data transfer */
5204c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5205c6fd2807SJeff Garzik 
5206c6fd2807SJeff Garzik 		kunmap_atomic(buf, KM_IRQ0);
5207c6fd2807SJeff Garzik 		local_irq_restore(flags);
5208c6fd2807SJeff Garzik 	} else {
5209c6fd2807SJeff Garzik 		buf = page_address(page);
5210c6fd2807SJeff Garzik 		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
5211c6fd2807SJeff Garzik 	}
5212c6fd2807SJeff Garzik 
5213c6fd2807SJeff Garzik 	bytes -= count;
5214c6fd2807SJeff Garzik 	qc->curbytes += count;
5215c6fd2807SJeff Garzik 	qc->cursg_ofs += count;
5216c6fd2807SJeff Garzik 
5217c6fd2807SJeff Garzik 	if (qc->cursg_ofs == sg->length) {
52180874ee76SFUJITA Tomonori 		if (qc->cursg == lsg)
52190874ee76SFUJITA Tomonori 			no_more_sg = 1;
52200874ee76SFUJITA Tomonori 
522187260216SJens Axboe 		qc->cursg = sg_next(qc->cursg);
5222c6fd2807SJeff Garzik 		qc->cursg_ofs = 0;
5223c6fd2807SJeff Garzik 	}
5224c6fd2807SJeff Garzik 
5225c6fd2807SJeff Garzik 	if (bytes)
5226c6fd2807SJeff Garzik 		goto next_sg;
5227c6fd2807SJeff Garzik }
5228c6fd2807SJeff Garzik 
5229c6fd2807SJeff Garzik /**
5230c6fd2807SJeff Garzik  *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
5231c6fd2807SJeff Garzik  *	@qc: Command on going
5232c6fd2807SJeff Garzik  *
5233c6fd2807SJeff Garzik  *	Transfer Transfer data from/to the ATAPI device.
5234c6fd2807SJeff Garzik  *
5235c6fd2807SJeff Garzik  *	LOCKING:
5236c6fd2807SJeff Garzik  *	Inherited from caller.
5237c6fd2807SJeff Garzik  */
5238c6fd2807SJeff Garzik 
5239c6fd2807SJeff Garzik static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5240c6fd2807SJeff Garzik {
5241c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5242c6fd2807SJeff Garzik 	struct ata_device *dev = qc->dev;
5243c6fd2807SJeff Garzik 	unsigned int ireason, bc_lo, bc_hi, bytes;
5244c6fd2807SJeff Garzik 	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5245c6fd2807SJeff Garzik 
5246c6fd2807SJeff Garzik 	/* Abuse qc->result_tf for temp storage of intermediate TF
5247c6fd2807SJeff Garzik 	 * here to save some kernel stack usage.
5248c6fd2807SJeff Garzik 	 * For normal completion, qc->result_tf is not relevant. For
5249c6fd2807SJeff Garzik 	 * error, qc->result_tf is later overwritten by ata_qc_complete().
5250c6fd2807SJeff Garzik 	 * So, the correctness of qc->result_tf is not affected.
5251c6fd2807SJeff Garzik 	 */
5252c6fd2807SJeff Garzik 	ap->ops->tf_read(ap, &qc->result_tf);
5253c6fd2807SJeff Garzik 	ireason = qc->result_tf.nsect;
5254c6fd2807SJeff Garzik 	bc_lo = qc->result_tf.lbam;
5255c6fd2807SJeff Garzik 	bc_hi = qc->result_tf.lbah;
5256c6fd2807SJeff Garzik 	bytes = (bc_hi << 8) | bc_lo;
5257c6fd2807SJeff Garzik 
5258c6fd2807SJeff Garzik 	/* shall be cleared to zero, indicating xfer of data */
5259c6fd2807SJeff Garzik 	if (ireason & (1 << 0))
5260c6fd2807SJeff Garzik 		goto err_out;
5261c6fd2807SJeff Garzik 
5262c6fd2807SJeff Garzik 	/* make sure transfer direction matches expected */
5263c6fd2807SJeff Garzik 	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5264c6fd2807SJeff Garzik 	if (do_write != i_write)
5265c6fd2807SJeff Garzik 		goto err_out;
5266c6fd2807SJeff Garzik 
526744877b4eSTejun Heo 	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5268c6fd2807SJeff Garzik 
5269c6fd2807SJeff Garzik 	__atapi_pio_bytes(qc, bytes);
52704cc980b3SAlbert Lee 	ata_altstatus(ap); /* flush */
5271c6fd2807SJeff Garzik 
5272c6fd2807SJeff Garzik 	return;
5273c6fd2807SJeff Garzik 
5274c6fd2807SJeff Garzik err_out:
5275c6fd2807SJeff Garzik 	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5276c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_HSM;
5277c6fd2807SJeff Garzik 	ap->hsm_task_state = HSM_ST_ERR;
5278c6fd2807SJeff Garzik }
5279c6fd2807SJeff Garzik 
5280c6fd2807SJeff Garzik /**
5281c6fd2807SJeff Garzik  *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5282c6fd2807SJeff Garzik  *	@ap: the target ata_port
5283c6fd2807SJeff Garzik  *	@qc: qc on going
5284c6fd2807SJeff Garzik  *
5285c6fd2807SJeff Garzik  *	RETURNS:
5286c6fd2807SJeff Garzik  *	1 if ok in workqueue, 0 otherwise.
5287c6fd2807SJeff Garzik  */
5288c6fd2807SJeff Garzik 
5289c6fd2807SJeff Garzik static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5290c6fd2807SJeff Garzik {
5291c6fd2807SJeff Garzik 	if (qc->tf.flags & ATA_TFLAG_POLLING)
5292c6fd2807SJeff Garzik 		return 1;
5293c6fd2807SJeff Garzik 
5294c6fd2807SJeff Garzik 	if (ap->hsm_task_state == HSM_ST_FIRST) {
5295c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO &&
5296c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_WRITE))
5297c6fd2807SJeff Garzik 		    return 1;
5298c6fd2807SJeff Garzik 
5299c6fd2807SJeff Garzik 		if (is_atapi_taskfile(&qc->tf) &&
5300c6fd2807SJeff Garzik 		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5301c6fd2807SJeff Garzik 			return 1;
5302c6fd2807SJeff Garzik 	}
5303c6fd2807SJeff Garzik 
5304c6fd2807SJeff Garzik 	return 0;
5305c6fd2807SJeff Garzik }
5306c6fd2807SJeff Garzik 
5307c6fd2807SJeff Garzik /**
5308c6fd2807SJeff Garzik  *	ata_hsm_qc_complete - finish a qc running on standard HSM
5309c6fd2807SJeff Garzik  *	@qc: Command to complete
5310c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5311c6fd2807SJeff Garzik  *
5312c6fd2807SJeff Garzik  *	Finish @qc which is running on standard HSM.
5313c6fd2807SJeff Garzik  *
5314c6fd2807SJeff Garzik  *	LOCKING:
5315cca3974eSJeff Garzik  *	If @in_wq is zero, spin_lock_irqsave(host lock).
5316c6fd2807SJeff Garzik  *	Otherwise, none on entry and grabs host lock.
5317c6fd2807SJeff Garzik  */
5318c6fd2807SJeff Garzik static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5319c6fd2807SJeff Garzik {
5320c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5321c6fd2807SJeff Garzik 	unsigned long flags;
5322c6fd2807SJeff Garzik 
5323c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
5324c6fd2807SJeff Garzik 		if (in_wq) {
5325c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5326c6fd2807SJeff Garzik 
5327cca3974eSJeff Garzik 			/* EH might have kicked in while host lock is
5328cca3974eSJeff Garzik 			 * released.
5329c6fd2807SJeff Garzik 			 */
5330c6fd2807SJeff Garzik 			qc = ata_qc_from_tag(ap, qc->tag);
5331c6fd2807SJeff Garzik 			if (qc) {
5332c6fd2807SJeff Garzik 				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
533383625006SAkira Iguchi 					ap->ops->irq_on(ap);
5334c6fd2807SJeff Garzik 					ata_qc_complete(qc);
5335c6fd2807SJeff Garzik 				} else
5336c6fd2807SJeff Garzik 					ata_port_freeze(ap);
5337c6fd2807SJeff Garzik 			}
5338c6fd2807SJeff Garzik 
5339c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5340c6fd2807SJeff Garzik 		} else {
5341c6fd2807SJeff Garzik 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
5342c6fd2807SJeff Garzik 				ata_qc_complete(qc);
5343c6fd2807SJeff Garzik 			else
5344c6fd2807SJeff Garzik 				ata_port_freeze(ap);
5345c6fd2807SJeff Garzik 		}
5346c6fd2807SJeff Garzik 	} else {
5347c6fd2807SJeff Garzik 		if (in_wq) {
5348c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
534983625006SAkira Iguchi 			ap->ops->irq_on(ap);
5350c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5351c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5352c6fd2807SJeff Garzik 		} else
5353c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5354c6fd2807SJeff Garzik 	}
5355c6fd2807SJeff Garzik }
5356c6fd2807SJeff Garzik 
5357c6fd2807SJeff Garzik /**
5358c6fd2807SJeff Garzik  *	ata_hsm_move - move the HSM to the next state.
5359c6fd2807SJeff Garzik  *	@ap: the target ata_port
5360c6fd2807SJeff Garzik  *	@qc: qc on going
5361c6fd2807SJeff Garzik  *	@status: current device status
5362c6fd2807SJeff Garzik  *	@in_wq: 1 if called from workqueue, 0 otherwise
5363c6fd2807SJeff Garzik  *
5364c6fd2807SJeff Garzik  *	RETURNS:
5365c6fd2807SJeff Garzik  *	1 when poll next status needed, 0 otherwise.
5366c6fd2807SJeff Garzik  */
5367c6fd2807SJeff Garzik int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5368c6fd2807SJeff Garzik 		 u8 status, int in_wq)
5369c6fd2807SJeff Garzik {
5370c6fd2807SJeff Garzik 	unsigned long flags = 0;
5371c6fd2807SJeff Garzik 	int poll_next;
5372c6fd2807SJeff Garzik 
5373c6fd2807SJeff Garzik 	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5374c6fd2807SJeff Garzik 
5375c6fd2807SJeff Garzik 	/* Make sure ata_qc_issue_prot() does not throw things
5376c6fd2807SJeff Garzik 	 * like DMA polling into the workqueue. Notice that
5377c6fd2807SJeff Garzik 	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5378c6fd2807SJeff Garzik 	 */
5379c6fd2807SJeff Garzik 	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5380c6fd2807SJeff Garzik 
5381c6fd2807SJeff Garzik fsm_start:
5382c6fd2807SJeff Garzik 	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
538344877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5384c6fd2807SJeff Garzik 
5385c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
5386c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
5387c6fd2807SJeff Garzik 		/* Send first data block or PACKET CDB */
5388c6fd2807SJeff Garzik 
5389c6fd2807SJeff Garzik 		/* If polling, we will stay in the work queue after
5390c6fd2807SJeff Garzik 		 * sending the data. Otherwise, interrupt handler
5391c6fd2807SJeff Garzik 		 * takes over after sending the data.
5392c6fd2807SJeff Garzik 		 */
5393c6fd2807SJeff Garzik 		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5394c6fd2807SJeff Garzik 
5395c6fd2807SJeff Garzik 		/* check device status */
5396c6fd2807SJeff Garzik 		if (unlikely((status & ATA_DRQ) == 0)) {
5397c6fd2807SJeff Garzik 			/* handle BSY=0, DRQ=0 as error */
5398c6fd2807SJeff Garzik 			if (likely(status & (ATA_ERR | ATA_DF)))
5399c6fd2807SJeff Garzik 				/* device stops HSM for abort/error */
5400c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5401c6fd2807SJeff Garzik 			else
5402c6fd2807SJeff Garzik 				/* HSM violation. Let EH handle this */
5403c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5404c6fd2807SJeff Garzik 
5405c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5406c6fd2807SJeff Garzik 			goto fsm_start;
5407c6fd2807SJeff Garzik 		}
5408c6fd2807SJeff Garzik 
5409c6fd2807SJeff Garzik 		/* Device should not ask for data transfer (DRQ=1)
5410c6fd2807SJeff Garzik 		 * when it finds something wrong.
5411c6fd2807SJeff Garzik 		 * We ignore DRQ here and stop the HSM by
5412c6fd2807SJeff Garzik 		 * changing hsm_task_state to HSM_ST_ERR and
5413c6fd2807SJeff Garzik 		 * let the EH abort the command or reset the device.
5414c6fd2807SJeff Garzik 		 */
5415c6fd2807SJeff Garzik 		if (unlikely(status & (ATA_ERR | ATA_DF))) {
54162d3b8eeaSAlbert Lee 			/* Some ATAPI tape drives forget to clear the ERR bit
54172d3b8eeaSAlbert Lee 			 * when doing the next command (mostly request sense).
54182d3b8eeaSAlbert Lee 			 * We ignore ERR here to workaround and proceed sending
54192d3b8eeaSAlbert Lee 			 * the CDB.
54202d3b8eeaSAlbert Lee 			 */
54212d3b8eeaSAlbert Lee 			if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
54222d3b8eeaSAlbert Lee 				ata_port_printk(ap, KERN_WARNING,
54232d3b8eeaSAlbert Lee 						"DRQ=1 with device error, "
54242d3b8eeaSAlbert Lee 						"dev_stat 0x%X\n", status);
5425c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5426c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5427c6fd2807SJeff Garzik 				goto fsm_start;
5428c6fd2807SJeff Garzik 			}
54292d3b8eeaSAlbert Lee 		}
5430c6fd2807SJeff Garzik 
5431c6fd2807SJeff Garzik 		/* Send the CDB (atapi) or the first data block (ata pio out).
5432c6fd2807SJeff Garzik 		 * During the state transition, interrupt handler shouldn't
5433c6fd2807SJeff Garzik 		 * be invoked before the data transfer is complete and
5434c6fd2807SJeff Garzik 		 * hsm_task_state is changed. Hence, the following locking.
5435c6fd2807SJeff Garzik 		 */
5436c6fd2807SJeff Garzik 		if (in_wq)
5437c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
5438c6fd2807SJeff Garzik 
5439c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_PIO) {
5440c6fd2807SJeff Garzik 			/* PIO data out protocol.
5441c6fd2807SJeff Garzik 			 * send first data block.
5442c6fd2807SJeff Garzik 			 */
5443c6fd2807SJeff Garzik 
5444c6fd2807SJeff Garzik 			/* ata_pio_sectors() might change the state
5445c6fd2807SJeff Garzik 			 * to HSM_ST_LAST. so, the state is changed here
5446c6fd2807SJeff Garzik 			 * before ata_pio_sectors().
5447c6fd2807SJeff Garzik 			 */
5448c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
5449c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5450c6fd2807SJeff Garzik 		} else
5451c6fd2807SJeff Garzik 			/* send CDB */
5452c6fd2807SJeff Garzik 			atapi_send_cdb(ap, qc);
5453c6fd2807SJeff Garzik 
5454c6fd2807SJeff Garzik 		if (in_wq)
5455c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
5456c6fd2807SJeff Garzik 
5457c6fd2807SJeff Garzik 		/* if polling, ata_pio_task() handles the rest.
5458c6fd2807SJeff Garzik 		 * otherwise, interrupt handler takes over from here.
5459c6fd2807SJeff Garzik 		 */
5460c6fd2807SJeff Garzik 		break;
5461c6fd2807SJeff Garzik 
5462c6fd2807SJeff Garzik 	case HSM_ST:
5463c6fd2807SJeff Garzik 		/* complete command or read/write the data register */
5464c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_ATAPI) {
5465c6fd2807SJeff Garzik 			/* ATAPI PIO protocol */
5466c6fd2807SJeff Garzik 			if ((status & ATA_DRQ) == 0) {
5467c6fd2807SJeff Garzik 				/* No more data to transfer or device error.
5468c6fd2807SJeff Garzik 				 * Device error will be tagged in HSM_ST_LAST.
5469c6fd2807SJeff Garzik 				 */
5470c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_LAST;
5471c6fd2807SJeff Garzik 				goto fsm_start;
5472c6fd2807SJeff Garzik 			}
5473c6fd2807SJeff Garzik 
5474c6fd2807SJeff Garzik 			/* Device should not ask for data transfer (DRQ=1)
5475c6fd2807SJeff Garzik 			 * when it finds something wrong.
5476c6fd2807SJeff Garzik 			 * We ignore DRQ here and stop the HSM by
5477c6fd2807SJeff Garzik 			 * changing hsm_task_state to HSM_ST_ERR and
5478c6fd2807SJeff Garzik 			 * let the EH abort the command or reset the device.
5479c6fd2807SJeff Garzik 			 */
5480c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
548144877b4eSTejun Heo 				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
548244877b4eSTejun Heo 						"device error, dev_stat 0x%X\n",
548344877b4eSTejun Heo 						status);
5484c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HSM;
5485c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5486c6fd2807SJeff Garzik 				goto fsm_start;
5487c6fd2807SJeff Garzik 			}
5488c6fd2807SJeff Garzik 
5489c6fd2807SJeff Garzik 			atapi_pio_bytes(qc);
5490c6fd2807SJeff Garzik 
5491c6fd2807SJeff Garzik 			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5492c6fd2807SJeff Garzik 				/* bad ireason reported by device */
5493c6fd2807SJeff Garzik 				goto fsm_start;
5494c6fd2807SJeff Garzik 
5495c6fd2807SJeff Garzik 		} else {
5496c6fd2807SJeff Garzik 			/* ATA PIO protocol */
5497c6fd2807SJeff Garzik 			if (unlikely((status & ATA_DRQ) == 0)) {
5498c6fd2807SJeff Garzik 				/* handle BSY=0, DRQ=0 as error */
5499c6fd2807SJeff Garzik 				if (likely(status & (ATA_ERR | ATA_DF)))
5500c6fd2807SJeff Garzik 					/* device stops HSM for abort/error */
5501c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_DEV;
5502c6fd2807SJeff Garzik 				else
550355a8e2c8STejun Heo 					/* HSM violation. Let EH handle this.
550455a8e2c8STejun Heo 					 * Phantom devices also trigger this
550555a8e2c8STejun Heo 					 * condition.  Mark hint.
550655a8e2c8STejun Heo 					 */
550755a8e2c8STejun Heo 					qc->err_mask |= AC_ERR_HSM |
550855a8e2c8STejun Heo 							AC_ERR_NODEV_HINT;
5509c6fd2807SJeff Garzik 
5510c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5511c6fd2807SJeff Garzik 				goto fsm_start;
5512c6fd2807SJeff Garzik 			}
5513c6fd2807SJeff Garzik 
5514c6fd2807SJeff Garzik 			/* For PIO reads, some devices may ask for
5515c6fd2807SJeff Garzik 			 * data transfer (DRQ=1) alone with ERR=1.
5516c6fd2807SJeff Garzik 			 * We respect DRQ here and transfer one
5517c6fd2807SJeff Garzik 			 * block of junk data before changing the
5518c6fd2807SJeff Garzik 			 * hsm_task_state to HSM_ST_ERR.
5519c6fd2807SJeff Garzik 			 *
5520c6fd2807SJeff Garzik 			 * For PIO writes, ERR=1 DRQ=1 doesn't make
5521c6fd2807SJeff Garzik 			 * sense since the data block has been
5522c6fd2807SJeff Garzik 			 * transferred to the device.
5523c6fd2807SJeff Garzik 			 */
5524c6fd2807SJeff Garzik 			if (unlikely(status & (ATA_ERR | ATA_DF))) {
5525c6fd2807SJeff Garzik 				/* data might be corrputed */
5526c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_DEV;
5527c6fd2807SJeff Garzik 
5528c6fd2807SJeff Garzik 				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5529c6fd2807SJeff Garzik 					ata_pio_sectors(qc);
5530c6fd2807SJeff Garzik 					status = ata_wait_idle(ap);
5531c6fd2807SJeff Garzik 				}
5532c6fd2807SJeff Garzik 
5533c6fd2807SJeff Garzik 				if (status & (ATA_BUSY | ATA_DRQ))
5534c6fd2807SJeff Garzik 					qc->err_mask |= AC_ERR_HSM;
5535c6fd2807SJeff Garzik 
5536c6fd2807SJeff Garzik 				/* ata_pio_sectors() might change the
5537c6fd2807SJeff Garzik 				 * state to HSM_ST_LAST. so, the state
5538c6fd2807SJeff Garzik 				 * is changed after ata_pio_sectors().
5539c6fd2807SJeff Garzik 				 */
5540c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
5541c6fd2807SJeff Garzik 				goto fsm_start;
5542c6fd2807SJeff Garzik 			}
5543c6fd2807SJeff Garzik 
5544c6fd2807SJeff Garzik 			ata_pio_sectors(qc);
5545c6fd2807SJeff Garzik 
5546c6fd2807SJeff Garzik 			if (ap->hsm_task_state == HSM_ST_LAST &&
5547c6fd2807SJeff Garzik 			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5548c6fd2807SJeff Garzik 				/* all data read */
5549c6fd2807SJeff Garzik 				status = ata_wait_idle(ap);
5550c6fd2807SJeff Garzik 				goto fsm_start;
5551c6fd2807SJeff Garzik 			}
5552c6fd2807SJeff Garzik 		}
5553c6fd2807SJeff Garzik 
5554c6fd2807SJeff Garzik 		poll_next = 1;
5555c6fd2807SJeff Garzik 		break;
5556c6fd2807SJeff Garzik 
5557c6fd2807SJeff Garzik 	case HSM_ST_LAST:
5558c6fd2807SJeff Garzik 		if (unlikely(!ata_ok(status))) {
5559c6fd2807SJeff Garzik 			qc->err_mask |= __ac_err_mask(status);
5560c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_ERR;
5561c6fd2807SJeff Garzik 			goto fsm_start;
5562c6fd2807SJeff Garzik 		}
5563c6fd2807SJeff Garzik 
5564c6fd2807SJeff Garzik 		/* no more data to transfer */
5565c6fd2807SJeff Garzik 		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
556644877b4eSTejun Heo 			ap->print_id, qc->dev->devno, status);
5567c6fd2807SJeff Garzik 
5568c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask);
5569c6fd2807SJeff Garzik 
5570c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5571c6fd2807SJeff Garzik 
5572c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5573c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5574c6fd2807SJeff Garzik 
5575c6fd2807SJeff Garzik 		poll_next = 0;
5576c6fd2807SJeff Garzik 		break;
5577c6fd2807SJeff Garzik 
5578c6fd2807SJeff Garzik 	case HSM_ST_ERR:
5579c6fd2807SJeff Garzik 		/* make sure qc->err_mask is available to
5580c6fd2807SJeff Garzik 		 * know what's wrong and recover
5581c6fd2807SJeff Garzik 		 */
5582c6fd2807SJeff Garzik 		WARN_ON(qc->err_mask == 0);
5583c6fd2807SJeff Garzik 
5584c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_IDLE;
5585c6fd2807SJeff Garzik 
5586c6fd2807SJeff Garzik 		/* complete taskfile transaction */
5587c6fd2807SJeff Garzik 		ata_hsm_qc_complete(qc, in_wq);
5588c6fd2807SJeff Garzik 
5589c6fd2807SJeff Garzik 		poll_next = 0;
5590c6fd2807SJeff Garzik 		break;
5591c6fd2807SJeff Garzik 	default:
5592c6fd2807SJeff Garzik 		poll_next = 0;
5593c6fd2807SJeff Garzik 		BUG();
5594c6fd2807SJeff Garzik 	}
5595c6fd2807SJeff Garzik 
5596c6fd2807SJeff Garzik 	return poll_next;
5597c6fd2807SJeff Garzik }
5598c6fd2807SJeff Garzik 
559965f27f38SDavid Howells static void ata_pio_task(struct work_struct *work)
5600c6fd2807SJeff Garzik {
560165f27f38SDavid Howells 	struct ata_port *ap =
560265f27f38SDavid Howells 		container_of(work, struct ata_port, port_task.work);
560365f27f38SDavid Howells 	struct ata_queued_cmd *qc = ap->port_task_data;
5604c6fd2807SJeff Garzik 	u8 status;
5605c6fd2807SJeff Garzik 	int poll_next;
5606c6fd2807SJeff Garzik 
5607c6fd2807SJeff Garzik fsm_start:
5608c6fd2807SJeff Garzik 	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5609c6fd2807SJeff Garzik 
5610c6fd2807SJeff Garzik 	/*
5611c6fd2807SJeff Garzik 	 * This is purely heuristic.  This is a fast path.
5612c6fd2807SJeff Garzik 	 * Sometimes when we enter, BSY will be cleared in
5613c6fd2807SJeff Garzik 	 * a chk-status or two.  If not, the drive is probably seeking
5614c6fd2807SJeff Garzik 	 * or something.  Snooze for a couple msecs, then
5615c6fd2807SJeff Garzik 	 * chk-status again.  If still busy, queue delayed work.
5616c6fd2807SJeff Garzik 	 */
5617c6fd2807SJeff Garzik 	status = ata_busy_wait(ap, ATA_BUSY, 5);
5618c6fd2807SJeff Garzik 	if (status & ATA_BUSY) {
5619c6fd2807SJeff Garzik 		msleep(2);
5620c6fd2807SJeff Garzik 		status = ata_busy_wait(ap, ATA_BUSY, 10);
5621c6fd2807SJeff Garzik 		if (status & ATA_BUSY) {
5622c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5623c6fd2807SJeff Garzik 			return;
5624c6fd2807SJeff Garzik 		}
5625c6fd2807SJeff Garzik 	}
5626c6fd2807SJeff Garzik 
5627c6fd2807SJeff Garzik 	/* move the HSM */
5628c6fd2807SJeff Garzik 	poll_next = ata_hsm_move(ap, qc, status, 1);
5629c6fd2807SJeff Garzik 
5630c6fd2807SJeff Garzik 	/* another command or interrupt handler
5631c6fd2807SJeff Garzik 	 * may be running at this point.
5632c6fd2807SJeff Garzik 	 */
5633c6fd2807SJeff Garzik 	if (poll_next)
5634c6fd2807SJeff Garzik 		goto fsm_start;
5635c6fd2807SJeff Garzik }
5636c6fd2807SJeff Garzik 
5637c6fd2807SJeff Garzik /**
5638c6fd2807SJeff Garzik  *	ata_qc_new - Request an available ATA command, for queueing
5639c6fd2807SJeff Garzik  *	@ap: Port associated with device @dev
5640c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5641c6fd2807SJeff Garzik  *
5642c6fd2807SJeff Garzik  *	LOCKING:
5643c6fd2807SJeff Garzik  *	None.
5644c6fd2807SJeff Garzik  */
5645c6fd2807SJeff Garzik 
5646c6fd2807SJeff Garzik static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5647c6fd2807SJeff Garzik {
5648c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc = NULL;
5649c6fd2807SJeff Garzik 	unsigned int i;
5650c6fd2807SJeff Garzik 
5651c6fd2807SJeff Garzik 	/* no command while frozen */
5652c6fd2807SJeff Garzik 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5653c6fd2807SJeff Garzik 		return NULL;
5654c6fd2807SJeff Garzik 
5655c6fd2807SJeff Garzik 	/* the last tag is reserved for internal command. */
5656c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5657c6fd2807SJeff Garzik 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5658c6fd2807SJeff Garzik 			qc = __ata_qc_from_tag(ap, i);
5659c6fd2807SJeff Garzik 			break;
5660c6fd2807SJeff Garzik 		}
5661c6fd2807SJeff Garzik 
5662c6fd2807SJeff Garzik 	if (qc)
5663c6fd2807SJeff Garzik 		qc->tag = i;
5664c6fd2807SJeff Garzik 
5665c6fd2807SJeff Garzik 	return qc;
5666c6fd2807SJeff Garzik }
5667c6fd2807SJeff Garzik 
5668c6fd2807SJeff Garzik /**
5669c6fd2807SJeff Garzik  *	ata_qc_new_init - Request an available ATA command, and initialize it
5670c6fd2807SJeff Garzik  *	@dev: Device from whom we request an available command structure
5671c6fd2807SJeff Garzik  *
5672c6fd2807SJeff Garzik  *	LOCKING:
5673c6fd2807SJeff Garzik  *	None.
5674c6fd2807SJeff Garzik  */
5675c6fd2807SJeff Garzik 
5676c6fd2807SJeff Garzik struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5677c6fd2807SJeff Garzik {
56789af5c9c9STejun Heo 	struct ata_port *ap = dev->link->ap;
5679c6fd2807SJeff Garzik 	struct ata_queued_cmd *qc;
5680c6fd2807SJeff Garzik 
5681c6fd2807SJeff Garzik 	qc = ata_qc_new(ap);
5682c6fd2807SJeff Garzik 	if (qc) {
5683c6fd2807SJeff Garzik 		qc->scsicmd = NULL;
5684c6fd2807SJeff Garzik 		qc->ap = ap;
5685c6fd2807SJeff Garzik 		qc->dev = dev;
5686c6fd2807SJeff Garzik 
5687c6fd2807SJeff Garzik 		ata_qc_reinit(qc);
5688c6fd2807SJeff Garzik 	}
5689c6fd2807SJeff Garzik 
5690c6fd2807SJeff Garzik 	return qc;
5691c6fd2807SJeff Garzik }
5692c6fd2807SJeff Garzik 
5693c6fd2807SJeff Garzik /**
5694c6fd2807SJeff Garzik  *	ata_qc_free - free unused ata_queued_cmd
5695c6fd2807SJeff Garzik  *	@qc: Command to complete
5696c6fd2807SJeff Garzik  *
5697c6fd2807SJeff Garzik  *	Designed to free unused ata_queued_cmd object
5698c6fd2807SJeff Garzik  *	in case something prevents using it.
5699c6fd2807SJeff Garzik  *
5700c6fd2807SJeff Garzik  *	LOCKING:
5701cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5702c6fd2807SJeff Garzik  */
5703c6fd2807SJeff Garzik void ata_qc_free(struct ata_queued_cmd *qc)
5704c6fd2807SJeff Garzik {
5705c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5706c6fd2807SJeff Garzik 	unsigned int tag;
5707c6fd2807SJeff Garzik 
5708c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5709c6fd2807SJeff Garzik 
5710c6fd2807SJeff Garzik 	qc->flags = 0;
5711c6fd2807SJeff Garzik 	tag = qc->tag;
5712c6fd2807SJeff Garzik 	if (likely(ata_tag_valid(tag))) {
5713c6fd2807SJeff Garzik 		qc->tag = ATA_TAG_POISON;
5714c6fd2807SJeff Garzik 		clear_bit(tag, &ap->qc_allocated);
5715c6fd2807SJeff Garzik 	}
5716c6fd2807SJeff Garzik }
5717c6fd2807SJeff Garzik 
5718c6fd2807SJeff Garzik void __ata_qc_complete(struct ata_queued_cmd *qc)
5719c6fd2807SJeff Garzik {
5720c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
57219af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5722c6fd2807SJeff Garzik 
5723c6fd2807SJeff Garzik 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5724c6fd2807SJeff Garzik 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5725c6fd2807SJeff Garzik 
5726c6fd2807SJeff Garzik 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5727c6fd2807SJeff Garzik 		ata_sg_clean(qc);
5728c6fd2807SJeff Garzik 
5729c6fd2807SJeff Garzik 	/* command should be marked inactive atomically with qc completion */
5730da917d69STejun Heo 	if (qc->tf.protocol == ATA_PROT_NCQ) {
57319af5c9c9STejun Heo 		link->sactive &= ~(1 << qc->tag);
5732da917d69STejun Heo 		if (!link->sactive)
5733da917d69STejun Heo 			ap->nr_active_links--;
5734da917d69STejun Heo 	} else {
57359af5c9c9STejun Heo 		link->active_tag = ATA_TAG_POISON;
5736da917d69STejun Heo 		ap->nr_active_links--;
5737da917d69STejun Heo 	}
5738da917d69STejun Heo 
5739da917d69STejun Heo 	/* clear exclusive status */
5740da917d69STejun Heo 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5741da917d69STejun Heo 		     ap->excl_link == link))
5742da917d69STejun Heo 		ap->excl_link = NULL;
5743c6fd2807SJeff Garzik 
5744c6fd2807SJeff Garzik 	/* atapi: mark qc as inactive to prevent the interrupt handler
5745c6fd2807SJeff Garzik 	 * from completing the command twice later, before the error handler
5746c6fd2807SJeff Garzik 	 * is called. (when rc != 0 and atapi request sense is needed)
5747c6fd2807SJeff Garzik 	 */
5748c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5749c6fd2807SJeff Garzik 	ap->qc_active &= ~(1 << qc->tag);
5750c6fd2807SJeff Garzik 
5751c6fd2807SJeff Garzik 	/* call completion callback */
5752c6fd2807SJeff Garzik 	qc->complete_fn(qc);
5753c6fd2807SJeff Garzik }
5754c6fd2807SJeff Garzik 
575539599a53STejun Heo static void fill_result_tf(struct ata_queued_cmd *qc)
575639599a53STejun Heo {
575739599a53STejun Heo 	struct ata_port *ap = qc->ap;
575839599a53STejun Heo 
575939599a53STejun Heo 	qc->result_tf.flags = qc->tf.flags;
57604742d54fSMark Lord 	ap->ops->tf_read(ap, &qc->result_tf);
576139599a53STejun Heo }
576239599a53STejun Heo 
5763c6fd2807SJeff Garzik /**
5764c6fd2807SJeff Garzik  *	ata_qc_complete - Complete an active ATA command
5765c6fd2807SJeff Garzik  *	@qc: Command to complete
5766c6fd2807SJeff Garzik  *	@err_mask: ATA Status register contents
5767c6fd2807SJeff Garzik  *
5768c6fd2807SJeff Garzik  *	Indicate to the mid and upper layers that an ATA
5769c6fd2807SJeff Garzik  *	command has completed, with either an ok or not-ok status.
5770c6fd2807SJeff Garzik  *
5771c6fd2807SJeff Garzik  *	LOCKING:
5772cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5773c6fd2807SJeff Garzik  */
5774c6fd2807SJeff Garzik void ata_qc_complete(struct ata_queued_cmd *qc)
5775c6fd2807SJeff Garzik {
5776c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5777c6fd2807SJeff Garzik 
5778c6fd2807SJeff Garzik 	/* XXX: New EH and old EH use different mechanisms to
5779c6fd2807SJeff Garzik 	 * synchronize EH with regular execution path.
5780c6fd2807SJeff Garzik 	 *
5781c6fd2807SJeff Garzik 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5782c6fd2807SJeff Garzik 	 * Normal execution path is responsible for not accessing a
5783c6fd2807SJeff Garzik 	 * failed qc.  libata core enforces the rule by returning NULL
5784c6fd2807SJeff Garzik 	 * from ata_qc_from_tag() for failed qcs.
5785c6fd2807SJeff Garzik 	 *
5786c6fd2807SJeff Garzik 	 * Old EH depends on ata_qc_complete() nullifying completion
5787c6fd2807SJeff Garzik 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
5788c6fd2807SJeff Garzik 	 * not synchronize with interrupt handler.  Only PIO task is
5789c6fd2807SJeff Garzik 	 * taken care of.
5790c6fd2807SJeff Garzik 	 */
5791c6fd2807SJeff Garzik 	if (ap->ops->error_handler) {
57924dbfa39bSTejun Heo 		struct ata_device *dev = qc->dev;
57934dbfa39bSTejun Heo 		struct ata_eh_info *ehi = &dev->link->eh_info;
57944dbfa39bSTejun Heo 
5795c6fd2807SJeff Garzik 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5796c6fd2807SJeff Garzik 
5797c6fd2807SJeff Garzik 		if (unlikely(qc->err_mask))
5798c6fd2807SJeff Garzik 			qc->flags |= ATA_QCFLAG_FAILED;
5799c6fd2807SJeff Garzik 
5800c6fd2807SJeff Garzik 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5801c6fd2807SJeff Garzik 			if (!ata_tag_internal(qc->tag)) {
5802c6fd2807SJeff Garzik 				/* always fill result TF for failed qc */
580339599a53STejun Heo 				fill_result_tf(qc);
5804c6fd2807SJeff Garzik 				ata_qc_schedule_eh(qc);
5805c6fd2807SJeff Garzik 				return;
5806c6fd2807SJeff Garzik 			}
5807c6fd2807SJeff Garzik 		}
5808c6fd2807SJeff Garzik 
5809c6fd2807SJeff Garzik 		/* read result TF if requested */
5810c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
581139599a53STejun Heo 			fill_result_tf(qc);
5812c6fd2807SJeff Garzik 
58134dbfa39bSTejun Heo 		/* Some commands need post-processing after successful
58144dbfa39bSTejun Heo 		 * completion.
58154dbfa39bSTejun Heo 		 */
58164dbfa39bSTejun Heo 		switch (qc->tf.command) {
58174dbfa39bSTejun Heo 		case ATA_CMD_SET_FEATURES:
58184dbfa39bSTejun Heo 			if (qc->tf.feature != SETFEATURES_WC_ON &&
58194dbfa39bSTejun Heo 			    qc->tf.feature != SETFEATURES_WC_OFF)
58204dbfa39bSTejun Heo 				break;
58214dbfa39bSTejun Heo 			/* fall through */
58224dbfa39bSTejun Heo 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
58234dbfa39bSTejun Heo 		case ATA_CMD_SET_MULTI: /* multi_count changed */
58244dbfa39bSTejun Heo 			/* revalidate device */
58254dbfa39bSTejun Heo 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
58264dbfa39bSTejun Heo 			ata_port_schedule_eh(ap);
58274dbfa39bSTejun Heo 			break;
5828054a5fbaSTejun Heo 
5829054a5fbaSTejun Heo 		case ATA_CMD_SLEEP:
5830054a5fbaSTejun Heo 			dev->flags |= ATA_DFLAG_SLEEPING;
5831054a5fbaSTejun Heo 			break;
58324dbfa39bSTejun Heo 		}
58334dbfa39bSTejun Heo 
5834c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5835c6fd2807SJeff Garzik 	} else {
5836c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5837c6fd2807SJeff Garzik 			return;
5838c6fd2807SJeff Garzik 
5839c6fd2807SJeff Garzik 		/* read result TF if failed or requested */
5840c6fd2807SJeff Garzik 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
584139599a53STejun Heo 			fill_result_tf(qc);
5842c6fd2807SJeff Garzik 
5843c6fd2807SJeff Garzik 		__ata_qc_complete(qc);
5844c6fd2807SJeff Garzik 	}
5845c6fd2807SJeff Garzik }
5846c6fd2807SJeff Garzik 
5847c6fd2807SJeff Garzik /**
5848c6fd2807SJeff Garzik  *	ata_qc_complete_multiple - Complete multiple qcs successfully
5849c6fd2807SJeff Garzik  *	@ap: port in question
5850c6fd2807SJeff Garzik  *	@qc_active: new qc_active mask
5851c6fd2807SJeff Garzik  *	@finish_qc: LLDD callback invoked before completing a qc
5852c6fd2807SJeff Garzik  *
5853c6fd2807SJeff Garzik  *	Complete in-flight commands.  This functions is meant to be
5854c6fd2807SJeff Garzik  *	called from low-level driver's interrupt routine to complete
5855c6fd2807SJeff Garzik  *	requests normally.  ap->qc_active and @qc_active is compared
5856c6fd2807SJeff Garzik  *	and commands are completed accordingly.
5857c6fd2807SJeff Garzik  *
5858c6fd2807SJeff Garzik  *	LOCKING:
5859cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5860c6fd2807SJeff Garzik  *
5861c6fd2807SJeff Garzik  *	RETURNS:
5862c6fd2807SJeff Garzik  *	Number of completed commands on success, -errno otherwise.
5863c6fd2807SJeff Garzik  */
5864c6fd2807SJeff Garzik int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5865c6fd2807SJeff Garzik 			     void (*finish_qc)(struct ata_queued_cmd *))
5866c6fd2807SJeff Garzik {
5867c6fd2807SJeff Garzik 	int nr_done = 0;
5868c6fd2807SJeff Garzik 	u32 done_mask;
5869c6fd2807SJeff Garzik 	int i;
5870c6fd2807SJeff Garzik 
5871c6fd2807SJeff Garzik 	done_mask = ap->qc_active ^ qc_active;
5872c6fd2807SJeff Garzik 
5873c6fd2807SJeff Garzik 	if (unlikely(done_mask & qc_active)) {
5874c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5875c6fd2807SJeff Garzik 				"(%08x->%08x)\n", ap->qc_active, qc_active);
5876c6fd2807SJeff Garzik 		return -EINVAL;
5877c6fd2807SJeff Garzik 	}
5878c6fd2807SJeff Garzik 
5879c6fd2807SJeff Garzik 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5880c6fd2807SJeff Garzik 		struct ata_queued_cmd *qc;
5881c6fd2807SJeff Garzik 
5882c6fd2807SJeff Garzik 		if (!(done_mask & (1 << i)))
5883c6fd2807SJeff Garzik 			continue;
5884c6fd2807SJeff Garzik 
5885c6fd2807SJeff Garzik 		if ((qc = ata_qc_from_tag(ap, i))) {
5886c6fd2807SJeff Garzik 			if (finish_qc)
5887c6fd2807SJeff Garzik 				finish_qc(qc);
5888c6fd2807SJeff Garzik 			ata_qc_complete(qc);
5889c6fd2807SJeff Garzik 			nr_done++;
5890c6fd2807SJeff Garzik 		}
5891c6fd2807SJeff Garzik 	}
5892c6fd2807SJeff Garzik 
5893c6fd2807SJeff Garzik 	return nr_done;
5894c6fd2807SJeff Garzik }
5895c6fd2807SJeff Garzik 
5896c6fd2807SJeff Garzik static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5897c6fd2807SJeff Garzik {
5898c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
5899c6fd2807SJeff Garzik 
5900c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
5901c6fd2807SJeff Garzik 	case ATA_PROT_NCQ:
5902c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
5903c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
5904c6fd2807SJeff Garzik 		return 1;
5905c6fd2807SJeff Garzik 
5906c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
5907c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
5908c6fd2807SJeff Garzik 		if (ap->flags & ATA_FLAG_PIO_DMA)
5909c6fd2807SJeff Garzik 			return 1;
5910c6fd2807SJeff Garzik 
5911c6fd2807SJeff Garzik 		/* fall through */
5912c6fd2807SJeff Garzik 
5913c6fd2807SJeff Garzik 	default:
5914c6fd2807SJeff Garzik 		return 0;
5915c6fd2807SJeff Garzik 	}
5916c6fd2807SJeff Garzik 
5917c6fd2807SJeff Garzik 	/* never reached */
5918c6fd2807SJeff Garzik }
5919c6fd2807SJeff Garzik 
5920c6fd2807SJeff Garzik /**
5921c6fd2807SJeff Garzik  *	ata_qc_issue - issue taskfile to device
5922c6fd2807SJeff Garzik  *	@qc: command to issue to device
5923c6fd2807SJeff Garzik  *
5924c6fd2807SJeff Garzik  *	Prepare an ATA command to submission to device.
5925c6fd2807SJeff Garzik  *	This includes mapping the data into a DMA-able
5926c6fd2807SJeff Garzik  *	area, filling in the S/G table, and finally
5927c6fd2807SJeff Garzik  *	writing the taskfile to hardware, starting the command.
5928c6fd2807SJeff Garzik  *
5929c6fd2807SJeff Garzik  *	LOCKING:
5930cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
5931c6fd2807SJeff Garzik  */
5932c6fd2807SJeff Garzik void ata_qc_issue(struct ata_queued_cmd *qc)
5933c6fd2807SJeff Garzik {
5934c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
59359af5c9c9STejun Heo 	struct ata_link *link = qc->dev->link;
5936c6fd2807SJeff Garzik 
5937c6fd2807SJeff Garzik 	/* Make sure only one non-NCQ command is outstanding.  The
5938c6fd2807SJeff Garzik 	 * check is skipped for old EH because it reuses active qc to
5939c6fd2807SJeff Garzik 	 * request ATAPI sense.
5940c6fd2807SJeff Garzik 	 */
59419af5c9c9STejun Heo 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5942c6fd2807SJeff Garzik 
5943c6fd2807SJeff Garzik 	if (qc->tf.protocol == ATA_PROT_NCQ) {
59449af5c9c9STejun Heo 		WARN_ON(link->sactive & (1 << qc->tag));
5945da917d69STejun Heo 
5946da917d69STejun Heo 		if (!link->sactive)
5947da917d69STejun Heo 			ap->nr_active_links++;
59489af5c9c9STejun Heo 		link->sactive |= 1 << qc->tag;
5949c6fd2807SJeff Garzik 	} else {
59509af5c9c9STejun Heo 		WARN_ON(link->sactive);
5951da917d69STejun Heo 
5952da917d69STejun Heo 		ap->nr_active_links++;
59539af5c9c9STejun Heo 		link->active_tag = qc->tag;
5954c6fd2807SJeff Garzik 	}
5955c6fd2807SJeff Garzik 
5956c6fd2807SJeff Garzik 	qc->flags |= ATA_QCFLAG_ACTIVE;
5957c6fd2807SJeff Garzik 	ap->qc_active |= 1 << qc->tag;
5958c6fd2807SJeff Garzik 
5959c6fd2807SJeff Garzik 	if (ata_should_dma_map(qc)) {
5960c6fd2807SJeff Garzik 		if (qc->flags & ATA_QCFLAG_SG) {
5961c6fd2807SJeff Garzik 			if (ata_sg_setup(qc))
5962c6fd2807SJeff Garzik 				goto sg_err;
5963c6fd2807SJeff Garzik 		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5964c6fd2807SJeff Garzik 			if (ata_sg_setup_one(qc))
5965c6fd2807SJeff Garzik 				goto sg_err;
5966c6fd2807SJeff Garzik 		}
5967c6fd2807SJeff Garzik 	} else {
5968c6fd2807SJeff Garzik 		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5969c6fd2807SJeff Garzik 	}
5970c6fd2807SJeff Garzik 
5971054a5fbaSTejun Heo 	/* if device is sleeping, schedule softreset and abort the link */
5972054a5fbaSTejun Heo 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5973054a5fbaSTejun Heo 		link->eh_info.action |= ATA_EH_SOFTRESET;
5974054a5fbaSTejun Heo 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5975054a5fbaSTejun Heo 		ata_link_abort(link);
5976054a5fbaSTejun Heo 		return;
5977054a5fbaSTejun Heo 	}
5978054a5fbaSTejun Heo 
5979c6fd2807SJeff Garzik 	ap->ops->qc_prep(qc);
5980c6fd2807SJeff Garzik 
5981c6fd2807SJeff Garzik 	qc->err_mask |= ap->ops->qc_issue(qc);
5982c6fd2807SJeff Garzik 	if (unlikely(qc->err_mask))
5983c6fd2807SJeff Garzik 		goto err;
5984c6fd2807SJeff Garzik 	return;
5985c6fd2807SJeff Garzik 
5986c6fd2807SJeff Garzik sg_err:
5987c6fd2807SJeff Garzik 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5988c6fd2807SJeff Garzik 	qc->err_mask |= AC_ERR_SYSTEM;
5989c6fd2807SJeff Garzik err:
5990c6fd2807SJeff Garzik 	ata_qc_complete(qc);
5991c6fd2807SJeff Garzik }
5992c6fd2807SJeff Garzik 
5993c6fd2807SJeff Garzik /**
5994c6fd2807SJeff Garzik  *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5995c6fd2807SJeff Garzik  *	@qc: command to issue to device
5996c6fd2807SJeff Garzik  *
5997c6fd2807SJeff Garzik  *	Using various libata functions and hooks, this function
5998c6fd2807SJeff Garzik  *	starts an ATA command.  ATA commands are grouped into
5999c6fd2807SJeff Garzik  *	classes called "protocols", and issuing each type of protocol
6000c6fd2807SJeff Garzik  *	is slightly different.
6001c6fd2807SJeff Garzik  *
6002c6fd2807SJeff Garzik  *	May be used as the qc_issue() entry in ata_port_operations.
6003c6fd2807SJeff Garzik  *
6004c6fd2807SJeff Garzik  *	LOCKING:
6005cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6006c6fd2807SJeff Garzik  *
6007c6fd2807SJeff Garzik  *	RETURNS:
6008c6fd2807SJeff Garzik  *	Zero on success, AC_ERR_* mask on failure
6009c6fd2807SJeff Garzik  */
6010c6fd2807SJeff Garzik 
6011c6fd2807SJeff Garzik unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6012c6fd2807SJeff Garzik {
6013c6fd2807SJeff Garzik 	struct ata_port *ap = qc->ap;
6014c6fd2807SJeff Garzik 
6015c6fd2807SJeff Garzik 	/* Use polling pio if the LLD doesn't handle
6016c6fd2807SJeff Garzik 	 * interrupt driven pio and atapi CDB interrupt.
6017c6fd2807SJeff Garzik 	 */
6018c6fd2807SJeff Garzik 	if (ap->flags & ATA_FLAG_PIO_POLLING) {
6019c6fd2807SJeff Garzik 		switch (qc->tf.protocol) {
6020c6fd2807SJeff Garzik 		case ATA_PROT_PIO:
6021e3472cbeSAlbert Lee 		case ATA_PROT_NODATA:
6022c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI:
6023c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_NODATA:
6024c6fd2807SJeff Garzik 			qc->tf.flags |= ATA_TFLAG_POLLING;
6025c6fd2807SJeff Garzik 			break;
6026c6fd2807SJeff Garzik 		case ATA_PROT_ATAPI_DMA:
6027c6fd2807SJeff Garzik 			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6028c6fd2807SJeff Garzik 				/* see ata_dma_blacklisted() */
6029c6fd2807SJeff Garzik 				BUG();
6030c6fd2807SJeff Garzik 			break;
6031c6fd2807SJeff Garzik 		default:
6032c6fd2807SJeff Garzik 			break;
6033c6fd2807SJeff Garzik 		}
6034c6fd2807SJeff Garzik 	}
6035c6fd2807SJeff Garzik 
6036c6fd2807SJeff Garzik 	/* select the device */
6037c6fd2807SJeff Garzik 	ata_dev_select(ap, qc->dev->devno, 1, 0);
6038c6fd2807SJeff Garzik 
6039c6fd2807SJeff Garzik 	/* start the command */
6040c6fd2807SJeff Garzik 	switch (qc->tf.protocol) {
6041c6fd2807SJeff Garzik 	case ATA_PROT_NODATA:
6042c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6043c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6044c6fd2807SJeff Garzik 
6045c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6046c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6047c6fd2807SJeff Garzik 
6048c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6049c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6050c6fd2807SJeff Garzik 
6051c6fd2807SJeff Garzik 		break;
6052c6fd2807SJeff Garzik 
6053c6fd2807SJeff Garzik 	case ATA_PROT_DMA:
6054c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6055c6fd2807SJeff Garzik 
6056c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6057c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6058c6fd2807SJeff Garzik 		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
6059c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_LAST;
6060c6fd2807SJeff Garzik 		break;
6061c6fd2807SJeff Garzik 
6062c6fd2807SJeff Garzik 	case ATA_PROT_PIO:
6063c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6064c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6065c6fd2807SJeff Garzik 
6066c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6067c6fd2807SJeff Garzik 
6068c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_WRITE) {
6069c6fd2807SJeff Garzik 			/* PIO data out protocol */
6070c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST_FIRST;
6071c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6072c6fd2807SJeff Garzik 
6073c6fd2807SJeff Garzik 			/* always send first data block using
6074c6fd2807SJeff Garzik 			 * the ata_pio_task() codepath.
6075c6fd2807SJeff Garzik 			 */
6076c6fd2807SJeff Garzik 		} else {
6077c6fd2807SJeff Garzik 			/* PIO data in protocol */
6078c6fd2807SJeff Garzik 			ap->hsm_task_state = HSM_ST;
6079c6fd2807SJeff Garzik 
6080c6fd2807SJeff Garzik 			if (qc->tf.flags & ATA_TFLAG_POLLING)
6081c6fd2807SJeff Garzik 				ata_port_queue_task(ap, ata_pio_task, qc, 0);
6082c6fd2807SJeff Garzik 
6083c6fd2807SJeff Garzik 			/* if polling, ata_pio_task() handles the rest.
6084c6fd2807SJeff Garzik 			 * otherwise, interrupt handler takes over from here.
6085c6fd2807SJeff Garzik 			 */
6086c6fd2807SJeff Garzik 		}
6087c6fd2807SJeff Garzik 
6088c6fd2807SJeff Garzik 		break;
6089c6fd2807SJeff Garzik 
6090c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI:
6091c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_NODATA:
6092c6fd2807SJeff Garzik 		if (qc->tf.flags & ATA_TFLAG_POLLING)
6093c6fd2807SJeff Garzik 			ata_qc_set_polling(qc);
6094c6fd2807SJeff Garzik 
6095c6fd2807SJeff Garzik 		ata_tf_to_host(ap, &qc->tf);
6096c6fd2807SJeff Garzik 
6097c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6098c6fd2807SJeff Garzik 
6099c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6100c6fd2807SJeff Garzik 		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6101c6fd2807SJeff Garzik 		    (qc->tf.flags & ATA_TFLAG_POLLING))
6102c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6103c6fd2807SJeff Garzik 		break;
6104c6fd2807SJeff Garzik 
6105c6fd2807SJeff Garzik 	case ATA_PROT_ATAPI_DMA:
6106c6fd2807SJeff Garzik 		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6107c6fd2807SJeff Garzik 
6108c6fd2807SJeff Garzik 		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
6109c6fd2807SJeff Garzik 		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
6110c6fd2807SJeff Garzik 		ap->hsm_task_state = HSM_ST_FIRST;
6111c6fd2807SJeff Garzik 
6112c6fd2807SJeff Garzik 		/* send cdb by polling if no cdb interrupt */
6113c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6114c6fd2807SJeff Garzik 			ata_port_queue_task(ap, ata_pio_task, qc, 0);
6115c6fd2807SJeff Garzik 		break;
6116c6fd2807SJeff Garzik 
6117c6fd2807SJeff Garzik 	default:
6118c6fd2807SJeff Garzik 		WARN_ON(1);
6119c6fd2807SJeff Garzik 		return AC_ERR_SYSTEM;
6120c6fd2807SJeff Garzik 	}
6121c6fd2807SJeff Garzik 
6122c6fd2807SJeff Garzik 	return 0;
6123c6fd2807SJeff Garzik }
6124c6fd2807SJeff Garzik 
6125c6fd2807SJeff Garzik /**
6126c6fd2807SJeff Garzik  *	ata_host_intr - Handle host interrupt for given (port, task)
6127c6fd2807SJeff Garzik  *	@ap: Port on which interrupt arrived (possibly...)
6128c6fd2807SJeff Garzik  *	@qc: Taskfile currently active in engine
6129c6fd2807SJeff Garzik  *
6130c6fd2807SJeff Garzik  *	Handle host interrupt for given queued command.  Currently,
6131c6fd2807SJeff Garzik  *	only DMA interrupts are handled.  All other commands are
6132c6fd2807SJeff Garzik  *	handled via polling with interrupts disabled (nIEN bit).
6133c6fd2807SJeff Garzik  *
6134c6fd2807SJeff Garzik  *	LOCKING:
6135cca3974eSJeff Garzik  *	spin_lock_irqsave(host lock)
6136c6fd2807SJeff Garzik  *
6137c6fd2807SJeff Garzik  *	RETURNS:
6138c6fd2807SJeff Garzik  *	One if interrupt was handled, zero if not (shared irq).
6139c6fd2807SJeff Garzik  */
6140c6fd2807SJeff Garzik 
6141c6fd2807SJeff Garzik inline unsigned int ata_host_intr(struct ata_port *ap,
6142c6fd2807SJeff Garzik 				  struct ata_queued_cmd *qc)
6143c6fd2807SJeff Garzik {
61449af5c9c9STejun Heo 	struct ata_eh_info *ehi = &ap->link.eh_info;
6145c6fd2807SJeff Garzik 	u8 status, host_stat = 0;
6146c6fd2807SJeff Garzik 
6147c6fd2807SJeff Garzik 	VPRINTK("ata%u: protocol %d task_state %d\n",
614844877b4eSTejun Heo 		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6149c6fd2807SJeff Garzik 
6150c6fd2807SJeff Garzik 	/* Check whether we are expecting interrupt in this state */
6151c6fd2807SJeff Garzik 	switch (ap->hsm_task_state) {
6152c6fd2807SJeff Garzik 	case HSM_ST_FIRST:
6153c6fd2807SJeff Garzik 		/* Some pre-ATAPI-4 devices assert INTRQ
6154c6fd2807SJeff Garzik 		 * at this state when ready to receive CDB.
6155c6fd2807SJeff Garzik 		 */
6156c6fd2807SJeff Garzik 
6157c6fd2807SJeff Garzik 		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6158c6fd2807SJeff Garzik 		 * The flag was turned on only for atapi devices.
6159c6fd2807SJeff Garzik 		 * No need to check is_atapi_taskfile(&qc->tf) again.
6160c6fd2807SJeff Garzik 		 */
6161c6fd2807SJeff Garzik 		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6162c6fd2807SJeff Garzik 			goto idle_irq;
6163c6fd2807SJeff Garzik 		break;
6164c6fd2807SJeff Garzik 	case HSM_ST_LAST:
6165c6fd2807SJeff Garzik 		if (qc->tf.protocol == ATA_PROT_DMA ||
6166c6fd2807SJeff Garzik 		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6167c6fd2807SJeff Garzik 			/* check status of DMA engine */
6168c6fd2807SJeff Garzik 			host_stat = ap->ops->bmdma_status(ap);
616944877b4eSTejun Heo 			VPRINTK("ata%u: host_stat 0x%X\n",
617044877b4eSTejun Heo 				ap->print_id, host_stat);
6171c6fd2807SJeff Garzik 
6172c6fd2807SJeff Garzik 			/* if it's not our irq... */
6173c6fd2807SJeff Garzik 			if (!(host_stat & ATA_DMA_INTR))
6174c6fd2807SJeff Garzik 				goto idle_irq;
6175c6fd2807SJeff Garzik 
6176c6fd2807SJeff Garzik 			/* before we do anything else, clear DMA-Start bit */
6177c6fd2807SJeff Garzik 			ap->ops->bmdma_stop(qc);
6178c6fd2807SJeff Garzik 
6179c6fd2807SJeff Garzik 			if (unlikely(host_stat & ATA_DMA_ERR)) {
6180c6fd2807SJeff Garzik 				/* error when transfering data to/from memory */
6181c6fd2807SJeff Garzik 				qc->err_mask |= AC_ERR_HOST_BUS;
6182c6fd2807SJeff Garzik 				ap->hsm_task_state = HSM_ST_ERR;
6183c6fd2807SJeff Garzik 			}
6184c6fd2807SJeff Garzik 		}
6185c6fd2807SJeff Garzik 		break;
6186c6fd2807SJeff Garzik 	case HSM_ST:
6187c6fd2807SJeff Garzik 		break;
6188c6fd2807SJeff Garzik 	default:
6189c6fd2807SJeff Garzik 		goto idle_irq;
6190c6fd2807SJeff Garzik 	}
6191c6fd2807SJeff Garzik 
6192c6fd2807SJeff Garzik 	/* check altstatus */
6193c6fd2807SJeff Garzik 	status = ata_altstatus(ap);
6194c6fd2807SJeff Garzik 	if (status & ATA_BUSY)
6195c6fd2807SJeff Garzik 		goto idle_irq;
6196c6fd2807SJeff Garzik 
6197c6fd2807SJeff Garzik 	/* check main status, clearing INTRQ */
6198c6fd2807SJeff Garzik 	status = ata_chk_status(ap);
6199c6fd2807SJeff Garzik 	if (unlikely(status & ATA_BUSY))
6200c6fd2807SJeff Garzik 		goto idle_irq;
6201c6fd2807SJeff Garzik 
6202c6fd2807SJeff Garzik 	/* ack bmdma irq events */
6203c6fd2807SJeff Garzik 	ap->ops->irq_clear(ap);
6204c6fd2807SJeff Garzik 
6205c6fd2807SJeff Garzik 	ata_hsm_move(ap, qc, status, 0);
6206ea54763fSTejun Heo 
6207ea54763fSTejun Heo 	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6208ea54763fSTejun Heo 				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6209ea54763fSTejun Heo 		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6210ea54763fSTejun Heo 
6211c6fd2807SJeff Garzik 	return 1;	/* irq handled */
6212c6fd2807SJeff Garzik 
6213c6fd2807SJeff Garzik idle_irq:
6214c6fd2807SJeff Garzik 	ap->stats.idle_irq++;
6215c6fd2807SJeff Garzik 
6216c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6217c6fd2807SJeff Garzik 	if ((ap->stats.idle_irq % 1000) == 0) {
62186d32d30fSJeff Garzik 		ata_chk_status(ap);
62196d32d30fSJeff Garzik 		ap->ops->irq_clear(ap);
6220c6fd2807SJeff Garzik 		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6221c6fd2807SJeff Garzik 		return 1;
6222c6fd2807SJeff Garzik 	}
6223c6fd2807SJeff Garzik #endif
6224c6fd2807SJeff Garzik 	return 0;	/* irq not handled */
6225c6fd2807SJeff Garzik }
6226c6fd2807SJeff Garzik 
6227c6fd2807SJeff Garzik /**
6228c6fd2807SJeff Garzik  *	ata_interrupt - Default ATA host interrupt handler
6229c6fd2807SJeff Garzik  *	@irq: irq line (unused)
6230cca3974eSJeff Garzik  *	@dev_instance: pointer to our ata_host information structure
6231c6fd2807SJeff Garzik  *
6232c6fd2807SJeff Garzik  *	Default interrupt handler for PCI IDE devices.  Calls
6233c6fd2807SJeff Garzik  *	ata_host_intr() for each port that is not disabled.
6234c6fd2807SJeff Garzik  *
6235c6fd2807SJeff Garzik  *	LOCKING:
6236cca3974eSJeff Garzik  *	Obtains host lock during operation.
6237c6fd2807SJeff Garzik  *
6238c6fd2807SJeff Garzik  *	RETURNS:
6239c6fd2807SJeff Garzik  *	IRQ_NONE or IRQ_HANDLED.
6240c6fd2807SJeff Garzik  */
6241c6fd2807SJeff Garzik 
62427d12e780SDavid Howells irqreturn_t ata_interrupt(int irq, void *dev_instance)
6243c6fd2807SJeff Garzik {
6244cca3974eSJeff Garzik 	struct ata_host *host = dev_instance;
6245c6fd2807SJeff Garzik 	unsigned int i;
6246c6fd2807SJeff Garzik 	unsigned int handled = 0;
6247c6fd2807SJeff Garzik 	unsigned long flags;
6248c6fd2807SJeff Garzik 
6249c6fd2807SJeff Garzik 	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6250cca3974eSJeff Garzik 	spin_lock_irqsave(&host->lock, flags);
6251c6fd2807SJeff Garzik 
6252cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6253c6fd2807SJeff Garzik 		struct ata_port *ap;
6254c6fd2807SJeff Garzik 
6255cca3974eSJeff Garzik 		ap = host->ports[i];
6256c6fd2807SJeff Garzik 		if (ap &&
6257c6fd2807SJeff Garzik 		    !(ap->flags & ATA_FLAG_DISABLED)) {
6258c6fd2807SJeff Garzik 			struct ata_queued_cmd *qc;
6259c6fd2807SJeff Garzik 
62609af5c9c9STejun Heo 			qc = ata_qc_from_tag(ap, ap->link.active_tag);
6261c6fd2807SJeff Garzik 			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6262c6fd2807SJeff Garzik 			    (qc->flags & ATA_QCFLAG_ACTIVE))
6263c6fd2807SJeff Garzik 				handled |= ata_host_intr(ap, qc);
6264c6fd2807SJeff Garzik 		}
6265c6fd2807SJeff Garzik 	}
6266c6fd2807SJeff Garzik 
6267cca3974eSJeff Garzik 	spin_unlock_irqrestore(&host->lock, flags);
6268c6fd2807SJeff Garzik 
6269c6fd2807SJeff Garzik 	return IRQ_RETVAL(handled);
6270c6fd2807SJeff Garzik }
6271c6fd2807SJeff Garzik 
6272c6fd2807SJeff Garzik /**
6273c6fd2807SJeff Garzik  *	sata_scr_valid - test whether SCRs are accessible
6274936fd732STejun Heo  *	@link: ATA link to test SCR accessibility for
6275c6fd2807SJeff Garzik  *
6276936fd732STejun Heo  *	Test whether SCRs are accessible for @link.
6277c6fd2807SJeff Garzik  *
6278c6fd2807SJeff Garzik  *	LOCKING:
6279c6fd2807SJeff Garzik  *	None.
6280c6fd2807SJeff Garzik  *
6281c6fd2807SJeff Garzik  *	RETURNS:
6282c6fd2807SJeff Garzik  *	1 if SCRs are accessible, 0 otherwise.
6283c6fd2807SJeff Garzik  */
6284936fd732STejun Heo int sata_scr_valid(struct ata_link *link)
6285c6fd2807SJeff Garzik {
6286936fd732STejun Heo 	struct ata_port *ap = link->ap;
6287936fd732STejun Heo 
6288a16abc0bSTejun Heo 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6289c6fd2807SJeff Garzik }
6290c6fd2807SJeff Garzik 
6291c6fd2807SJeff Garzik /**
6292c6fd2807SJeff Garzik  *	sata_scr_read - read SCR register of the specified port
6293936fd732STejun Heo  *	@link: ATA link to read SCR for
6294c6fd2807SJeff Garzik  *	@reg: SCR to read
6295c6fd2807SJeff Garzik  *	@val: Place to store read value
6296c6fd2807SJeff Garzik  *
6297936fd732STejun Heo  *	Read SCR register @reg of @link into *@val.  This function is
6298633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6299633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6300c6fd2807SJeff Garzik  *
6301c6fd2807SJeff Garzik  *	LOCKING:
6302633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6303c6fd2807SJeff Garzik  *
6304c6fd2807SJeff Garzik  *	RETURNS:
6305c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6306c6fd2807SJeff Garzik  */
6307936fd732STejun Heo int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6308c6fd2807SJeff Garzik {
6309633273a3STejun Heo 	if (ata_is_host_link(link)) {
6310936fd732STejun Heo 		struct ata_port *ap = link->ap;
6311936fd732STejun Heo 
6312936fd732STejun Heo 		if (sata_scr_valid(link))
6313da3dbb17STejun Heo 			return ap->ops->scr_read(ap, reg, val);
6314c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6315c6fd2807SJeff Garzik 	}
6316c6fd2807SJeff Garzik 
6317633273a3STejun Heo 	return sata_pmp_scr_read(link, reg, val);
6318633273a3STejun Heo }
6319633273a3STejun Heo 
6320c6fd2807SJeff Garzik /**
6321c6fd2807SJeff Garzik  *	sata_scr_write - write SCR register of the specified port
6322936fd732STejun Heo  *	@link: ATA link to write SCR for
6323c6fd2807SJeff Garzik  *	@reg: SCR to write
6324c6fd2807SJeff Garzik  *	@val: value to write
6325c6fd2807SJeff Garzik  *
6326936fd732STejun Heo  *	Write @val to SCR register @reg of @link.  This function is
6327633273a3STejun Heo  *	guaranteed to succeed if @link is ap->link, the cable type of
6328633273a3STejun Heo  *	the port is SATA and the port implements ->scr_read.
6329c6fd2807SJeff Garzik  *
6330c6fd2807SJeff Garzik  *	LOCKING:
6331633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6332c6fd2807SJeff Garzik  *
6333c6fd2807SJeff Garzik  *	RETURNS:
6334c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6335c6fd2807SJeff Garzik  */
6336936fd732STejun Heo int sata_scr_write(struct ata_link *link, int reg, u32 val)
6337c6fd2807SJeff Garzik {
6338633273a3STejun Heo 	if (ata_is_host_link(link)) {
6339936fd732STejun Heo 		struct ata_port *ap = link->ap;
6340936fd732STejun Heo 
6341936fd732STejun Heo 		if (sata_scr_valid(link))
6342da3dbb17STejun Heo 			return ap->ops->scr_write(ap, reg, val);
6343c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6344c6fd2807SJeff Garzik 	}
6345c6fd2807SJeff Garzik 
6346633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6347633273a3STejun Heo }
6348633273a3STejun Heo 
6349c6fd2807SJeff Garzik /**
6350c6fd2807SJeff Garzik  *	sata_scr_write_flush - write SCR register of the specified port and flush
6351936fd732STejun Heo  *	@link: ATA link to write SCR for
6352c6fd2807SJeff Garzik  *	@reg: SCR to write
6353c6fd2807SJeff Garzik  *	@val: value to write
6354c6fd2807SJeff Garzik  *
6355c6fd2807SJeff Garzik  *	This function is identical to sata_scr_write() except that this
6356c6fd2807SJeff Garzik  *	function performs flush after writing to the register.
6357c6fd2807SJeff Garzik  *
6358c6fd2807SJeff Garzik  *	LOCKING:
6359633273a3STejun Heo  *	None if @link is ap->link.  Kernel thread context otherwise.
6360c6fd2807SJeff Garzik  *
6361c6fd2807SJeff Garzik  *	RETURNS:
6362c6fd2807SJeff Garzik  *	0 on success, negative errno on failure.
6363c6fd2807SJeff Garzik  */
6364936fd732STejun Heo int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6365c6fd2807SJeff Garzik {
6366633273a3STejun Heo 	if (ata_is_host_link(link)) {
6367936fd732STejun Heo 		struct ata_port *ap = link->ap;
6368da3dbb17STejun Heo 		int rc;
6369da3dbb17STejun Heo 
6370936fd732STejun Heo 		if (sata_scr_valid(link)) {
6371da3dbb17STejun Heo 			rc = ap->ops->scr_write(ap, reg, val);
6372da3dbb17STejun Heo 			if (rc == 0)
6373da3dbb17STejun Heo 				rc = ap->ops->scr_read(ap, reg, &val);
6374da3dbb17STejun Heo 			return rc;
6375c6fd2807SJeff Garzik 		}
6376c6fd2807SJeff Garzik 		return -EOPNOTSUPP;
6377c6fd2807SJeff Garzik 	}
6378c6fd2807SJeff Garzik 
6379633273a3STejun Heo 	return sata_pmp_scr_write(link, reg, val);
6380633273a3STejun Heo }
6381633273a3STejun Heo 
6382c6fd2807SJeff Garzik /**
6383936fd732STejun Heo  *	ata_link_online - test whether the given link is online
6384936fd732STejun Heo  *	@link: ATA link to test
6385c6fd2807SJeff Garzik  *
6386936fd732STejun Heo  *	Test whether @link is online.  Note that this function returns
6387936fd732STejun Heo  *	0 if online status of @link cannot be obtained, so
6388936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6389c6fd2807SJeff Garzik  *
6390c6fd2807SJeff Garzik  *	LOCKING:
6391c6fd2807SJeff Garzik  *	None.
6392c6fd2807SJeff Garzik  *
6393c6fd2807SJeff Garzik  *	RETURNS:
6394c6fd2807SJeff Garzik  *	1 if the port online status is available and online.
6395c6fd2807SJeff Garzik  */
6396936fd732STejun Heo int ata_link_online(struct ata_link *link)
6397c6fd2807SJeff Garzik {
6398c6fd2807SJeff Garzik 	u32 sstatus;
6399c6fd2807SJeff Garzik 
6400936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6401936fd732STejun Heo 	    (sstatus & 0xf) == 0x3)
6402c6fd2807SJeff Garzik 		return 1;
6403c6fd2807SJeff Garzik 	return 0;
6404c6fd2807SJeff Garzik }
6405c6fd2807SJeff Garzik 
6406c6fd2807SJeff Garzik /**
6407936fd732STejun Heo  *	ata_link_offline - test whether the given link is offline
6408936fd732STejun Heo  *	@link: ATA link to test
6409c6fd2807SJeff Garzik  *
6410936fd732STejun Heo  *	Test whether @link is offline.  Note that this function
6411936fd732STejun Heo  *	returns 0 if offline status of @link cannot be obtained, so
6412936fd732STejun Heo  *	ata_link_online(link) != !ata_link_offline(link).
6413c6fd2807SJeff Garzik  *
6414c6fd2807SJeff Garzik  *	LOCKING:
6415c6fd2807SJeff Garzik  *	None.
6416c6fd2807SJeff Garzik  *
6417c6fd2807SJeff Garzik  *	RETURNS:
6418c6fd2807SJeff Garzik  *	1 if the port offline status is available and offline.
6419c6fd2807SJeff Garzik  */
6420936fd732STejun Heo int ata_link_offline(struct ata_link *link)
6421c6fd2807SJeff Garzik {
6422c6fd2807SJeff Garzik 	u32 sstatus;
6423c6fd2807SJeff Garzik 
6424936fd732STejun Heo 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6425936fd732STejun Heo 	    (sstatus & 0xf) != 0x3)
6426c6fd2807SJeff Garzik 		return 1;
6427c6fd2807SJeff Garzik 	return 0;
6428c6fd2807SJeff Garzik }
6429c6fd2807SJeff Garzik 
6430c6fd2807SJeff Garzik int ata_flush_cache(struct ata_device *dev)
6431c6fd2807SJeff Garzik {
6432c6fd2807SJeff Garzik 	unsigned int err_mask;
6433c6fd2807SJeff Garzik 	u8 cmd;
6434c6fd2807SJeff Garzik 
6435c6fd2807SJeff Garzik 	if (!ata_try_flush_cache(dev))
6436c6fd2807SJeff Garzik 		return 0;
6437c6fd2807SJeff Garzik 
64386fc49adbSTejun Heo 	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6439c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH_EXT;
6440c6fd2807SJeff Garzik 	else
6441c6fd2807SJeff Garzik 		cmd = ATA_CMD_FLUSH;
6442c6fd2807SJeff Garzik 
64434f34337bSAlan Cox 	/* This is wrong. On a failed flush we get back the LBA of the lost
64444f34337bSAlan Cox 	   sector and we should (assuming it wasn't aborted as unknown) issue
64454f34337bSAlan Cox 	   a further flush command to continue the writeback until it
64464f34337bSAlan Cox 	   does not error */
6447c6fd2807SJeff Garzik 	err_mask = ata_do_simple_cmd(dev, cmd);
6448c6fd2807SJeff Garzik 	if (err_mask) {
6449c6fd2807SJeff Garzik 		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6450c6fd2807SJeff Garzik 		return -EIO;
6451c6fd2807SJeff Garzik 	}
6452c6fd2807SJeff Garzik 
6453c6fd2807SJeff Garzik 	return 0;
6454c6fd2807SJeff Garzik }
6455c6fd2807SJeff Garzik 
64566ffa01d8STejun Heo #ifdef CONFIG_PM
6457cca3974eSJeff Garzik static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6458cca3974eSJeff Garzik 			       unsigned int action, unsigned int ehi_flags,
6459cca3974eSJeff Garzik 			       int wait)
6460c6fd2807SJeff Garzik {
6461c6fd2807SJeff Garzik 	unsigned long flags;
6462c6fd2807SJeff Garzik 	int i, rc;
6463c6fd2807SJeff Garzik 
6464cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
6465cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
6466e3667ebfSTejun Heo 		struct ata_link *link;
6467c6fd2807SJeff Garzik 
6468c6fd2807SJeff Garzik 		/* Previous resume operation might still be in
6469c6fd2807SJeff Garzik 		 * progress.  Wait for PM_PENDING to clear.
6470c6fd2807SJeff Garzik 		 */
6471c6fd2807SJeff Garzik 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6472c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6473c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6474c6fd2807SJeff Garzik 		}
6475c6fd2807SJeff Garzik 
6476c6fd2807SJeff Garzik 		/* request PM ops to EH */
6477c6fd2807SJeff Garzik 		spin_lock_irqsave(ap->lock, flags);
6478c6fd2807SJeff Garzik 
6479c6fd2807SJeff Garzik 		ap->pm_mesg = mesg;
6480c6fd2807SJeff Garzik 		if (wait) {
6481c6fd2807SJeff Garzik 			rc = 0;
6482c6fd2807SJeff Garzik 			ap->pm_result = &rc;
6483c6fd2807SJeff Garzik 		}
6484c6fd2807SJeff Garzik 
6485c6fd2807SJeff Garzik 		ap->pflags |= ATA_PFLAG_PM_PENDING;
6486e3667ebfSTejun Heo 		__ata_port_for_each_link(link, ap) {
6487e3667ebfSTejun Heo 			link->eh_info.action |= action;
6488e3667ebfSTejun Heo 			link->eh_info.flags |= ehi_flags;
6489e3667ebfSTejun Heo 		}
6490c6fd2807SJeff Garzik 
6491c6fd2807SJeff Garzik 		ata_port_schedule_eh(ap);
6492c6fd2807SJeff Garzik 
6493c6fd2807SJeff Garzik 		spin_unlock_irqrestore(ap->lock, flags);
6494c6fd2807SJeff Garzik 
6495c6fd2807SJeff Garzik 		/* wait and check result */
6496c6fd2807SJeff Garzik 		if (wait) {
6497c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
6498c6fd2807SJeff Garzik 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6499c6fd2807SJeff Garzik 			if (rc)
6500c6fd2807SJeff Garzik 				return rc;
6501c6fd2807SJeff Garzik 		}
6502c6fd2807SJeff Garzik 	}
6503c6fd2807SJeff Garzik 
6504c6fd2807SJeff Garzik 	return 0;
6505c6fd2807SJeff Garzik }
6506c6fd2807SJeff Garzik 
6507c6fd2807SJeff Garzik /**
6508cca3974eSJeff Garzik  *	ata_host_suspend - suspend host
6509cca3974eSJeff Garzik  *	@host: host to suspend
6510c6fd2807SJeff Garzik  *	@mesg: PM message
6511c6fd2807SJeff Garzik  *
6512cca3974eSJeff Garzik  *	Suspend @host.  Actual operation is performed by EH.  This
6513c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and waits for EH
6514c6fd2807SJeff Garzik  *	to finish.
6515c6fd2807SJeff Garzik  *
6516c6fd2807SJeff Garzik  *	LOCKING:
6517c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6518c6fd2807SJeff Garzik  *
6519c6fd2807SJeff Garzik  *	RETURNS:
6520c6fd2807SJeff Garzik  *	0 on success, -errno on failure.
6521c6fd2807SJeff Garzik  */
6522cca3974eSJeff Garzik int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6523c6fd2807SJeff Garzik {
65249666f400STejun Heo 	int rc;
6525c6fd2807SJeff Garzik 
6526ca77329fSKristen Carlson Accardi 	/*
6527ca77329fSKristen Carlson Accardi 	 * disable link pm on all ports before requesting
6528ca77329fSKristen Carlson Accardi 	 * any pm activity
6529ca77329fSKristen Carlson Accardi 	 */
6530ca77329fSKristen Carlson Accardi 	ata_lpm_enable(host);
6531ca77329fSKristen Carlson Accardi 
6532cca3974eSJeff Garzik 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
65339666f400STejun Heo 	if (rc == 0)
6534cca3974eSJeff Garzik 		host->dev->power.power_state = mesg;
6535c6fd2807SJeff Garzik 	return rc;
6536c6fd2807SJeff Garzik }
6537c6fd2807SJeff Garzik 
6538c6fd2807SJeff Garzik /**
6539cca3974eSJeff Garzik  *	ata_host_resume - resume host
6540cca3974eSJeff Garzik  *	@host: host to resume
6541c6fd2807SJeff Garzik  *
6542cca3974eSJeff Garzik  *	Resume @host.  Actual operation is performed by EH.  This
6543c6fd2807SJeff Garzik  *	function requests EH to perform PM operations and returns.
6544c6fd2807SJeff Garzik  *	Note that all resume operations are performed parallely.
6545c6fd2807SJeff Garzik  *
6546c6fd2807SJeff Garzik  *	LOCKING:
6547c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
6548c6fd2807SJeff Garzik  */
6549cca3974eSJeff Garzik void ata_host_resume(struct ata_host *host)
6550c6fd2807SJeff Garzik {
6551cca3974eSJeff Garzik 	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6552c6fd2807SJeff Garzik 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6553cca3974eSJeff Garzik 	host->dev->power.power_state = PMSG_ON;
6554ca77329fSKristen Carlson Accardi 
6555ca77329fSKristen Carlson Accardi 	/* reenable link pm */
6556ca77329fSKristen Carlson Accardi 	ata_lpm_disable(host);
6557c6fd2807SJeff Garzik }
65586ffa01d8STejun Heo #endif
6559c6fd2807SJeff Garzik 
6560c6fd2807SJeff Garzik /**
6561c6fd2807SJeff Garzik  *	ata_port_start - Set port up for dma.
6562c6fd2807SJeff Garzik  *	@ap: Port to initialize
6563c6fd2807SJeff Garzik  *
6564c6fd2807SJeff Garzik  *	Called just after data structures for each port are
6565c6fd2807SJeff Garzik  *	initialized.  Allocates space for PRD table.
6566c6fd2807SJeff Garzik  *
6567c6fd2807SJeff Garzik  *	May be used as the port_start() entry in ata_port_operations.
6568c6fd2807SJeff Garzik  *
6569c6fd2807SJeff Garzik  *	LOCKING:
6570c6fd2807SJeff Garzik  *	Inherited from caller.
6571c6fd2807SJeff Garzik  */
6572c6fd2807SJeff Garzik int ata_port_start(struct ata_port *ap)
6573c6fd2807SJeff Garzik {
6574c6fd2807SJeff Garzik 	struct device *dev = ap->dev;
6575c6fd2807SJeff Garzik 	int rc;
6576c6fd2807SJeff Garzik 
6577f0d36efdSTejun Heo 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6578f0d36efdSTejun Heo 				      GFP_KERNEL);
6579c6fd2807SJeff Garzik 	if (!ap->prd)
6580c6fd2807SJeff Garzik 		return -ENOMEM;
6581c6fd2807SJeff Garzik 
6582c6fd2807SJeff Garzik 	rc = ata_pad_alloc(ap, dev);
6583f0d36efdSTejun Heo 	if (rc)
6584c6fd2807SJeff Garzik 		return rc;
6585c6fd2807SJeff Garzik 
6586f0d36efdSTejun Heo 	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6587f0d36efdSTejun Heo 		(unsigned long long)ap->prd_dma);
6588c6fd2807SJeff Garzik 	return 0;
6589c6fd2807SJeff Garzik }
6590c6fd2807SJeff Garzik 
6591c6fd2807SJeff Garzik /**
6592c6fd2807SJeff Garzik  *	ata_dev_init - Initialize an ata_device structure
6593c6fd2807SJeff Garzik  *	@dev: Device structure to initialize
6594c6fd2807SJeff Garzik  *
6595c6fd2807SJeff Garzik  *	Initialize @dev in preparation for probing.
6596c6fd2807SJeff Garzik  *
6597c6fd2807SJeff Garzik  *	LOCKING:
6598c6fd2807SJeff Garzik  *	Inherited from caller.
6599c6fd2807SJeff Garzik  */
6600c6fd2807SJeff Garzik void ata_dev_init(struct ata_device *dev)
6601c6fd2807SJeff Garzik {
66029af5c9c9STejun Heo 	struct ata_link *link = dev->link;
66039af5c9c9STejun Heo 	struct ata_port *ap = link->ap;
6604c6fd2807SJeff Garzik 	unsigned long flags;
6605c6fd2807SJeff Garzik 
6606c6fd2807SJeff Garzik 	/* SATA spd limit is bound to the first device */
66079af5c9c9STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66089af5c9c9STejun Heo 	link->sata_spd = 0;
6609c6fd2807SJeff Garzik 
6610c6fd2807SJeff Garzik 	/* High bits of dev->flags are used to record warm plug
6611c6fd2807SJeff Garzik 	 * requests which occur asynchronously.  Synchronize using
6612cca3974eSJeff Garzik 	 * host lock.
6613c6fd2807SJeff Garzik 	 */
6614c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
6615c6fd2807SJeff Garzik 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
66163dcc323fSTejun Heo 	dev->horkage = 0;
6617c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
6618c6fd2807SJeff Garzik 
6619c6fd2807SJeff Garzik 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6620c6fd2807SJeff Garzik 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6621c6fd2807SJeff Garzik 	dev->pio_mask = UINT_MAX;
6622c6fd2807SJeff Garzik 	dev->mwdma_mask = UINT_MAX;
6623c6fd2807SJeff Garzik 	dev->udma_mask = UINT_MAX;
6624c6fd2807SJeff Garzik }
6625c6fd2807SJeff Garzik 
6626c6fd2807SJeff Garzik /**
66274fb37a25STejun Heo  *	ata_link_init - Initialize an ata_link structure
66284fb37a25STejun Heo  *	@ap: ATA port link is attached to
66294fb37a25STejun Heo  *	@link: Link structure to initialize
66308989805dSTejun Heo  *	@pmp: Port multiplier port number
66314fb37a25STejun Heo  *
66324fb37a25STejun Heo  *	Initialize @link.
66334fb37a25STejun Heo  *
66344fb37a25STejun Heo  *	LOCKING:
66354fb37a25STejun Heo  *	Kernel thread context (may sleep)
66364fb37a25STejun Heo  */
6637fb7fd614STejun Heo void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
66384fb37a25STejun Heo {
66394fb37a25STejun Heo 	int i;
66404fb37a25STejun Heo 
66414fb37a25STejun Heo 	/* clear everything except for devices */
66424fb37a25STejun Heo 	memset(link, 0, offsetof(struct ata_link, device[0]));
66434fb37a25STejun Heo 
66444fb37a25STejun Heo 	link->ap = ap;
66458989805dSTejun Heo 	link->pmp = pmp;
66464fb37a25STejun Heo 	link->active_tag = ATA_TAG_POISON;
66474fb37a25STejun Heo 	link->hw_sata_spd_limit = UINT_MAX;
66484fb37a25STejun Heo 
66494fb37a25STejun Heo 	/* can't use iterator, ap isn't initialized yet */
66504fb37a25STejun Heo 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
66514fb37a25STejun Heo 		struct ata_device *dev = &link->device[i];
66524fb37a25STejun Heo 
66534fb37a25STejun Heo 		dev->link = link;
66544fb37a25STejun Heo 		dev->devno = dev - link->device;
66554fb37a25STejun Heo 		ata_dev_init(dev);
66564fb37a25STejun Heo 	}
66574fb37a25STejun Heo }
66584fb37a25STejun Heo 
66594fb37a25STejun Heo /**
66604fb37a25STejun Heo  *	sata_link_init_spd - Initialize link->sata_spd_limit
66614fb37a25STejun Heo  *	@link: Link to configure sata_spd_limit for
66624fb37a25STejun Heo  *
66634fb37a25STejun Heo  *	Initialize @link->[hw_]sata_spd_limit to the currently
66644fb37a25STejun Heo  *	configured value.
66654fb37a25STejun Heo  *
66664fb37a25STejun Heo  *	LOCKING:
66674fb37a25STejun Heo  *	Kernel thread context (may sleep).
66684fb37a25STejun Heo  *
66694fb37a25STejun Heo  *	RETURNS:
66704fb37a25STejun Heo  *	0 on success, -errno on failure.
66714fb37a25STejun Heo  */
6672fb7fd614STejun Heo int sata_link_init_spd(struct ata_link *link)
66734fb37a25STejun Heo {
66744fb37a25STejun Heo 	u32 scontrol, spd;
66754fb37a25STejun Heo 	int rc;
66764fb37a25STejun Heo 
66774fb37a25STejun Heo 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
66784fb37a25STejun Heo 	if (rc)
66794fb37a25STejun Heo 		return rc;
66804fb37a25STejun Heo 
66814fb37a25STejun Heo 	spd = (scontrol >> 4) & 0xf;
66824fb37a25STejun Heo 	if (spd)
66834fb37a25STejun Heo 		link->hw_sata_spd_limit &= (1 << spd) - 1;
66844fb37a25STejun Heo 
66854fb37a25STejun Heo 	link->sata_spd_limit = link->hw_sata_spd_limit;
66864fb37a25STejun Heo 
66874fb37a25STejun Heo 	return 0;
66884fb37a25STejun Heo }
66894fb37a25STejun Heo 
66904fb37a25STejun Heo /**
6691f3187195STejun Heo  *	ata_port_alloc - allocate and initialize basic ATA port resources
6692f3187195STejun Heo  *	@host: ATA host this allocated port belongs to
6693c6fd2807SJeff Garzik  *
6694f3187195STejun Heo  *	Allocate and initialize basic ATA port resources.
6695f3187195STejun Heo  *
6696f3187195STejun Heo  *	RETURNS:
6697f3187195STejun Heo  *	Allocate ATA port on success, NULL on failure.
6698c6fd2807SJeff Garzik  *
6699c6fd2807SJeff Garzik  *	LOCKING:
6700f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6701c6fd2807SJeff Garzik  */
6702f3187195STejun Heo struct ata_port *ata_port_alloc(struct ata_host *host)
6703c6fd2807SJeff Garzik {
6704f3187195STejun Heo 	struct ata_port *ap;
6705c6fd2807SJeff Garzik 
6706f3187195STejun Heo 	DPRINTK("ENTER\n");
6707f3187195STejun Heo 
6708f3187195STejun Heo 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6709f3187195STejun Heo 	if (!ap)
6710f3187195STejun Heo 		return NULL;
6711f3187195STejun Heo 
6712f4d6d004STejun Heo 	ap->pflags |= ATA_PFLAG_INITIALIZING;
6713cca3974eSJeff Garzik 	ap->lock = &host->lock;
6714c6fd2807SJeff Garzik 	ap->flags = ATA_FLAG_DISABLED;
6715f3187195STejun Heo 	ap->print_id = -1;
6716c6fd2807SJeff Garzik 	ap->ctl = ATA_DEVCTL_OBS;
6717cca3974eSJeff Garzik 	ap->host = host;
6718f3187195STejun Heo 	ap->dev = host->dev;
6719c6fd2807SJeff Garzik 	ap->last_ctl = 0xFF;
6720c6fd2807SJeff Garzik 
6721c6fd2807SJeff Garzik #if defined(ATA_VERBOSE_DEBUG)
6722c6fd2807SJeff Garzik 	/* turn on all debugging levels */
6723c6fd2807SJeff Garzik 	ap->msg_enable = 0x00FF;
6724c6fd2807SJeff Garzik #elif defined(ATA_DEBUG)
6725c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6726c6fd2807SJeff Garzik #else
6727c6fd2807SJeff Garzik 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6728c6fd2807SJeff Garzik #endif
6729c6fd2807SJeff Garzik 
673065f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->port_task, NULL);
673165f27f38SDavid Howells 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
673265f27f38SDavid Howells 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6733c6fd2807SJeff Garzik 	INIT_LIST_HEAD(&ap->eh_done_q);
6734c6fd2807SJeff Garzik 	init_waitqueue_head(&ap->eh_wait_q);
67355ddf24c5STejun Heo 	init_timer_deferrable(&ap->fastdrain_timer);
67365ddf24c5STejun Heo 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
67375ddf24c5STejun Heo 	ap->fastdrain_timer.data = (unsigned long)ap;
6738c6fd2807SJeff Garzik 
6739c6fd2807SJeff Garzik 	ap->cbl = ATA_CBL_NONE;
6740c6fd2807SJeff Garzik 
67418989805dSTejun Heo 	ata_link_init(ap, &ap->link, 0);
6742c6fd2807SJeff Garzik 
6743c6fd2807SJeff Garzik #ifdef ATA_IRQ_TRAP
6744c6fd2807SJeff Garzik 	ap->stats.unhandled_irq = 1;
6745c6fd2807SJeff Garzik 	ap->stats.idle_irq = 1;
6746c6fd2807SJeff Garzik #endif
6747c6fd2807SJeff Garzik 	return ap;
6748c6fd2807SJeff Garzik }
6749c6fd2807SJeff Garzik 
6750f0d36efdSTejun Heo static void ata_host_release(struct device *gendev, void *res)
6751f0d36efdSTejun Heo {
6752f0d36efdSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
6753f0d36efdSTejun Heo 	int i;
6754f0d36efdSTejun Heo 
6755f0d36efdSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
6756f0d36efdSTejun Heo 		struct ata_port *ap = host->ports[i];
6757f0d36efdSTejun Heo 
6758ecef7253STejun Heo 		if (!ap)
6759ecef7253STejun Heo 			continue;
6760ecef7253STejun Heo 
67614911487aSTejun Heo 		if (ap->scsi_host)
67621aa506e4STejun Heo 			scsi_host_put(ap->scsi_host);
67631aa506e4STejun Heo 
6764633273a3STejun Heo 		kfree(ap->pmp_link);
67654911487aSTejun Heo 		kfree(ap);
67661aa506e4STejun Heo 		host->ports[i] = NULL;
67671aa506e4STejun Heo 	}
67681aa506e4STejun Heo 
67691aa56ccaSTejun Heo 	dev_set_drvdata(gendev, NULL);
6770f0d36efdSTejun Heo }
6771f0d36efdSTejun Heo 
6772c6fd2807SJeff Garzik /**
6773f3187195STejun Heo  *	ata_host_alloc - allocate and init basic ATA host resources
6774f3187195STejun Heo  *	@dev: generic device this host is associated with
6775f3187195STejun Heo  *	@max_ports: maximum number of ATA ports associated with this host
6776f3187195STejun Heo  *
6777f3187195STejun Heo  *	Allocate and initialize basic ATA host resources.  LLD calls
6778f3187195STejun Heo  *	this function to allocate a host, initializes it fully and
6779f3187195STejun Heo  *	attaches it using ata_host_register().
6780f3187195STejun Heo  *
6781f3187195STejun Heo  *	@max_ports ports are allocated and host->n_ports is
6782f3187195STejun Heo  *	initialized to @max_ports.  The caller is allowed to decrease
6783f3187195STejun Heo  *	host->n_ports before calling ata_host_register().  The unused
6784f3187195STejun Heo  *	ports will be automatically freed on registration.
6785f3187195STejun Heo  *
6786f3187195STejun Heo  *	RETURNS:
6787f3187195STejun Heo  *	Allocate ATA host on success, NULL on failure.
6788f3187195STejun Heo  *
6789f3187195STejun Heo  *	LOCKING:
6790f3187195STejun Heo  *	Inherited from calling layer (may sleep).
6791f3187195STejun Heo  */
6792f3187195STejun Heo struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6793f3187195STejun Heo {
6794f3187195STejun Heo 	struct ata_host *host;
6795f3187195STejun Heo 	size_t sz;
6796f3187195STejun Heo 	int i;
6797f3187195STejun Heo 
6798f3187195STejun Heo 	DPRINTK("ENTER\n");
6799f3187195STejun Heo 
6800f3187195STejun Heo 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6801f3187195STejun Heo 		return NULL;
6802f3187195STejun Heo 
6803f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6804f3187195STejun Heo 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6805f3187195STejun Heo 	/* alloc a container for our list of ATA ports (buses) */
6806f3187195STejun Heo 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6807f3187195STejun Heo 	if (!host)
6808f3187195STejun Heo 		goto err_out;
6809f3187195STejun Heo 
6810f3187195STejun Heo 	devres_add(dev, host);
6811f3187195STejun Heo 	dev_set_drvdata(dev, host);
6812f3187195STejun Heo 
6813f3187195STejun Heo 	spin_lock_init(&host->lock);
6814f3187195STejun Heo 	host->dev = dev;
6815f3187195STejun Heo 	host->n_ports = max_ports;
6816f3187195STejun Heo 
6817f3187195STejun Heo 	/* allocate ports bound to this host */
6818f3187195STejun Heo 	for (i = 0; i < max_ports; i++) {
6819f3187195STejun Heo 		struct ata_port *ap;
6820f3187195STejun Heo 
6821f3187195STejun Heo 		ap = ata_port_alloc(host);
6822f3187195STejun Heo 		if (!ap)
6823f3187195STejun Heo 			goto err_out;
6824f3187195STejun Heo 
6825f3187195STejun Heo 		ap->port_no = i;
6826f3187195STejun Heo 		host->ports[i] = ap;
6827f3187195STejun Heo 	}
6828f3187195STejun Heo 
6829f3187195STejun Heo 	devres_remove_group(dev, NULL);
6830f3187195STejun Heo 	return host;
6831f3187195STejun Heo 
6832f3187195STejun Heo  err_out:
6833f3187195STejun Heo 	devres_release_group(dev, NULL);
6834f3187195STejun Heo 	return NULL;
6835f3187195STejun Heo }
6836f3187195STejun Heo 
6837f3187195STejun Heo /**
6838f5cda257STejun Heo  *	ata_host_alloc_pinfo - alloc host and init with port_info array
6839f5cda257STejun Heo  *	@dev: generic device this host is associated with
6840f5cda257STejun Heo  *	@ppi: array of ATA port_info to initialize host with
6841f5cda257STejun Heo  *	@n_ports: number of ATA ports attached to this host
6842f5cda257STejun Heo  *
6843f5cda257STejun Heo  *	Allocate ATA host and initialize with info from @ppi.  If NULL
6844f5cda257STejun Heo  *	terminated, @ppi may contain fewer entries than @n_ports.  The
6845f5cda257STejun Heo  *	last entry will be used for the remaining ports.
6846f5cda257STejun Heo  *
6847f5cda257STejun Heo  *	RETURNS:
6848f5cda257STejun Heo  *	Allocate ATA host on success, NULL on failure.
6849f5cda257STejun Heo  *
6850f5cda257STejun Heo  *	LOCKING:
6851f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
6852f5cda257STejun Heo  */
6853f5cda257STejun Heo struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6854f5cda257STejun Heo 				      const struct ata_port_info * const * ppi,
6855f5cda257STejun Heo 				      int n_ports)
6856f5cda257STejun Heo {
6857f5cda257STejun Heo 	const struct ata_port_info *pi;
6858f5cda257STejun Heo 	struct ata_host *host;
6859f5cda257STejun Heo 	int i, j;
6860f5cda257STejun Heo 
6861f5cda257STejun Heo 	host = ata_host_alloc(dev, n_ports);
6862f5cda257STejun Heo 	if (!host)
6863f5cda257STejun Heo 		return NULL;
6864f5cda257STejun Heo 
6865f5cda257STejun Heo 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6866f5cda257STejun Heo 		struct ata_port *ap = host->ports[i];
6867f5cda257STejun Heo 
6868f5cda257STejun Heo 		if (ppi[j])
6869f5cda257STejun Heo 			pi = ppi[j++];
6870f5cda257STejun Heo 
6871f5cda257STejun Heo 		ap->pio_mask = pi->pio_mask;
6872f5cda257STejun Heo 		ap->mwdma_mask = pi->mwdma_mask;
6873f5cda257STejun Heo 		ap->udma_mask = pi->udma_mask;
6874f5cda257STejun Heo 		ap->flags |= pi->flags;
68750c88758bSTejun Heo 		ap->link.flags |= pi->link_flags;
6876f5cda257STejun Heo 		ap->ops = pi->port_ops;
6877f5cda257STejun Heo 
6878f5cda257STejun Heo 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6879f5cda257STejun Heo 			host->ops = pi->port_ops;
6880f5cda257STejun Heo 		if (!host->private_data && pi->private_data)
6881f5cda257STejun Heo 			host->private_data = pi->private_data;
6882f5cda257STejun Heo 	}
6883f5cda257STejun Heo 
6884f5cda257STejun Heo 	return host;
6885f5cda257STejun Heo }
6886f5cda257STejun Heo 
688732ebbc0cSTejun Heo static void ata_host_stop(struct device *gendev, void *res)
688832ebbc0cSTejun Heo {
688932ebbc0cSTejun Heo 	struct ata_host *host = dev_get_drvdata(gendev);
689032ebbc0cSTejun Heo 	int i;
689132ebbc0cSTejun Heo 
689232ebbc0cSTejun Heo 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
689332ebbc0cSTejun Heo 
689432ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
689532ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
689632ebbc0cSTejun Heo 
689732ebbc0cSTejun Heo 		if (ap->ops->port_stop)
689832ebbc0cSTejun Heo 			ap->ops->port_stop(ap);
689932ebbc0cSTejun Heo 	}
690032ebbc0cSTejun Heo 
690132ebbc0cSTejun Heo 	if (host->ops->host_stop)
690232ebbc0cSTejun Heo 		host->ops->host_stop(host);
690332ebbc0cSTejun Heo }
690432ebbc0cSTejun Heo 
6905f5cda257STejun Heo /**
6906ecef7253STejun Heo  *	ata_host_start - start and freeze ports of an ATA host
6907ecef7253STejun Heo  *	@host: ATA host to start ports for
6908ecef7253STejun Heo  *
6909ecef7253STejun Heo  *	Start and then freeze ports of @host.  Started status is
6910ecef7253STejun Heo  *	recorded in host->flags, so this function can be called
6911ecef7253STejun Heo  *	multiple times.  Ports are guaranteed to get started only
6912f3187195STejun Heo  *	once.  If host->ops isn't initialized yet, its set to the
6913f3187195STejun Heo  *	first non-dummy port ops.
6914ecef7253STejun Heo  *
6915ecef7253STejun Heo  *	LOCKING:
6916ecef7253STejun Heo  *	Inherited from calling layer (may sleep).
6917ecef7253STejun Heo  *
6918ecef7253STejun Heo  *	RETURNS:
6919ecef7253STejun Heo  *	0 if all ports are started successfully, -errno otherwise.
6920ecef7253STejun Heo  */
6921ecef7253STejun Heo int ata_host_start(struct ata_host *host)
6922ecef7253STejun Heo {
692332ebbc0cSTejun Heo 	int have_stop = 0;
692432ebbc0cSTejun Heo 	void *start_dr = NULL;
6925ecef7253STejun Heo 	int i, rc;
6926ecef7253STejun Heo 
6927ecef7253STejun Heo 	if (host->flags & ATA_HOST_STARTED)
6928ecef7253STejun Heo 		return 0;
6929ecef7253STejun Heo 
6930ecef7253STejun Heo 	for (i = 0; i < host->n_ports; i++) {
6931ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6932ecef7253STejun Heo 
6933f3187195STejun Heo 		if (!host->ops && !ata_port_is_dummy(ap))
6934f3187195STejun Heo 			host->ops = ap->ops;
6935f3187195STejun Heo 
693632ebbc0cSTejun Heo 		if (ap->ops->port_stop)
693732ebbc0cSTejun Heo 			have_stop = 1;
693832ebbc0cSTejun Heo 	}
693932ebbc0cSTejun Heo 
694032ebbc0cSTejun Heo 	if (host->ops->host_stop)
694132ebbc0cSTejun Heo 		have_stop = 1;
694232ebbc0cSTejun Heo 
694332ebbc0cSTejun Heo 	if (have_stop) {
694432ebbc0cSTejun Heo 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
694532ebbc0cSTejun Heo 		if (!start_dr)
694632ebbc0cSTejun Heo 			return -ENOMEM;
694732ebbc0cSTejun Heo 	}
694832ebbc0cSTejun Heo 
694932ebbc0cSTejun Heo 	for (i = 0; i < host->n_ports; i++) {
695032ebbc0cSTejun Heo 		struct ata_port *ap = host->ports[i];
695132ebbc0cSTejun Heo 
6952ecef7253STejun Heo 		if (ap->ops->port_start) {
6953ecef7253STejun Heo 			rc = ap->ops->port_start(ap);
6954ecef7253STejun Heo 			if (rc) {
69550f9fe9b7SAlan Cox 				if (rc != -ENODEV)
69560f9fe9b7SAlan Cox 					dev_printk(KERN_ERR, host->dev, "failed to start port %d (errno=%d)\n", i, rc);
6957ecef7253STejun Heo 				goto err_out;
6958ecef7253STejun Heo 			}
6959ecef7253STejun Heo 		}
6960ecef7253STejun Heo 		ata_eh_freeze_port(ap);
6961ecef7253STejun Heo 	}
6962ecef7253STejun Heo 
696332ebbc0cSTejun Heo 	if (start_dr)
696432ebbc0cSTejun Heo 		devres_add(host->dev, start_dr);
6965ecef7253STejun Heo 	host->flags |= ATA_HOST_STARTED;
6966ecef7253STejun Heo 	return 0;
6967ecef7253STejun Heo 
6968ecef7253STejun Heo  err_out:
6969ecef7253STejun Heo 	while (--i >= 0) {
6970ecef7253STejun Heo 		struct ata_port *ap = host->ports[i];
6971ecef7253STejun Heo 
6972ecef7253STejun Heo 		if (ap->ops->port_stop)
6973ecef7253STejun Heo 			ap->ops->port_stop(ap);
6974ecef7253STejun Heo 	}
697532ebbc0cSTejun Heo 	devres_free(start_dr);
6976ecef7253STejun Heo 	return rc;
6977ecef7253STejun Heo }
6978ecef7253STejun Heo 
6979ecef7253STejun Heo /**
6980cca3974eSJeff Garzik  *	ata_sas_host_init - Initialize a host struct
6981cca3974eSJeff Garzik  *	@host:	host to initialize
6982cca3974eSJeff Garzik  *	@dev:	device host is attached to
6983cca3974eSJeff Garzik  *	@flags:	host flags
6984c6fd2807SJeff Garzik  *	@ops:	port_ops
6985c6fd2807SJeff Garzik  *
6986c6fd2807SJeff Garzik  *	LOCKING:
6987c6fd2807SJeff Garzik  *	PCI/etc. bus probe sem.
6988c6fd2807SJeff Garzik  *
6989c6fd2807SJeff Garzik  */
6990f3187195STejun Heo /* KILLME - the only user left is ipr */
6991cca3974eSJeff Garzik void ata_host_init(struct ata_host *host, struct device *dev,
6992cca3974eSJeff Garzik 		   unsigned long flags, const struct ata_port_operations *ops)
6993c6fd2807SJeff Garzik {
6994cca3974eSJeff Garzik 	spin_lock_init(&host->lock);
6995cca3974eSJeff Garzik 	host->dev = dev;
6996cca3974eSJeff Garzik 	host->flags = flags;
6997cca3974eSJeff Garzik 	host->ops = ops;
6998c6fd2807SJeff Garzik }
6999c6fd2807SJeff Garzik 
7000c6fd2807SJeff Garzik /**
7001f3187195STejun Heo  *	ata_host_register - register initialized ATA host
7002f3187195STejun Heo  *	@host: ATA host to register
7003f3187195STejun Heo  *	@sht: template for SCSI host
7004c6fd2807SJeff Garzik  *
7005f3187195STejun Heo  *	Register initialized ATA host.  @host is allocated using
7006f3187195STejun Heo  *	ata_host_alloc() and fully initialized by LLD.  This function
7007f3187195STejun Heo  *	starts ports, registers @host with ATA and SCSI layers and
7008f3187195STejun Heo  *	probe registered devices.
7009c6fd2807SJeff Garzik  *
7010c6fd2807SJeff Garzik  *	LOCKING:
7011f3187195STejun Heo  *	Inherited from calling layer (may sleep).
7012c6fd2807SJeff Garzik  *
7013c6fd2807SJeff Garzik  *	RETURNS:
7014f3187195STejun Heo  *	0 on success, -errno otherwise.
7015c6fd2807SJeff Garzik  */
7016f3187195STejun Heo int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7017c6fd2807SJeff Garzik {
7018f3187195STejun Heo 	int i, rc;
7019c6fd2807SJeff Garzik 
7020f3187195STejun Heo 	/* host must have been started */
7021f3187195STejun Heo 	if (!(host->flags & ATA_HOST_STARTED)) {
7022f3187195STejun Heo 		dev_printk(KERN_ERR, host->dev,
7023f3187195STejun Heo 			   "BUG: trying to register unstarted host\n");
7024f3187195STejun Heo 		WARN_ON(1);
7025f3187195STejun Heo 		return -EINVAL;
702602f076aaSAlan Cox 	}
7027f0d36efdSTejun Heo 
7028f3187195STejun Heo 	/* Blow away unused ports.  This happens when LLD can't
7029f3187195STejun Heo 	 * determine the exact number of ports to allocate at
7030f3187195STejun Heo 	 * allocation time.
7031f3187195STejun Heo 	 */
7032f3187195STejun Heo 	for (i = host->n_ports; host->ports[i]; i++)
7033f3187195STejun Heo 		kfree(host->ports[i]);
7034f0d36efdSTejun Heo 
7035f3187195STejun Heo 	/* give ports names and add SCSI hosts */
7036f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++)
7037f3187195STejun Heo 		host->ports[i]->print_id = ata_print_id++;
7038c6fd2807SJeff Garzik 
7039f3187195STejun Heo 	rc = ata_scsi_add_hosts(host, sht);
7040ecef7253STejun Heo 	if (rc)
7041f3187195STejun Heo 		return rc;
7042ecef7253STejun Heo 
7043fafbae87STejun Heo 	/* associate with ACPI nodes */
7044fafbae87STejun Heo 	ata_acpi_associate(host);
7045fafbae87STejun Heo 
7046f3187195STejun Heo 	/* set cable, sata_spd_limit and report */
7047cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7048cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7049f3187195STejun Heo 		unsigned long xfer_mask;
7050f3187195STejun Heo 
7051f3187195STejun Heo 		/* set SATA cable type if still unset */
7052f3187195STejun Heo 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7053f3187195STejun Heo 			ap->cbl = ATA_CBL_SATA;
7054c6fd2807SJeff Garzik 
7055c6fd2807SJeff Garzik 		/* init sata_spd_limit to the current value */
70564fb37a25STejun Heo 		sata_link_init_spd(&ap->link);
7057c6fd2807SJeff Garzik 
7058cbcdd875STejun Heo 		/* print per-port info to dmesg */
7059f3187195STejun Heo 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7060f3187195STejun Heo 					      ap->udma_mask);
7061f3187195STejun Heo 
7062abf6e8edSTejun Heo 		if (!ata_port_is_dummy(ap)) {
7063cbcdd875STejun Heo 			ata_port_printk(ap, KERN_INFO,
7064cbcdd875STejun Heo 					"%cATA max %s %s\n",
7065a16abc0bSTejun Heo 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7066f3187195STejun Heo 					ata_mode_string(xfer_mask),
7067cbcdd875STejun Heo 					ap->link.eh_info.desc);
7068abf6e8edSTejun Heo 			ata_ehi_clear_desc(&ap->link.eh_info);
7069abf6e8edSTejun Heo 		} else
7070f3187195STejun Heo 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7071c6fd2807SJeff Garzik 	}
7072c6fd2807SJeff Garzik 
7073f3187195STejun Heo 	/* perform each probe synchronously */
7074f3187195STejun Heo 	DPRINTK("probe begin\n");
7075f3187195STejun Heo 	for (i = 0; i < host->n_ports; i++) {
7076f3187195STejun Heo 		struct ata_port *ap = host->ports[i];
7077f3187195STejun Heo 		int rc;
7078f3187195STejun Heo 
7079f3187195STejun Heo 		/* probe */
7080c6fd2807SJeff Garzik 		if (ap->ops->error_handler) {
70819af5c9c9STejun Heo 			struct ata_eh_info *ehi = &ap->link.eh_info;
7082c6fd2807SJeff Garzik 			unsigned long flags;
7083c6fd2807SJeff Garzik 
7084c6fd2807SJeff Garzik 			ata_port_probe(ap);
7085c6fd2807SJeff Garzik 
7086c6fd2807SJeff Garzik 			/* kick EH for boot probing */
7087c6fd2807SJeff Garzik 			spin_lock_irqsave(ap->lock, flags);
7088c6fd2807SJeff Garzik 
7089f58229f8STejun Heo 			ehi->probe_mask =
7090f58229f8STejun Heo 				(1 << ata_link_max_devices(&ap->link)) - 1;
7091c6fd2807SJeff Garzik 			ehi->action |= ATA_EH_SOFTRESET;
7092c6fd2807SJeff Garzik 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7093c6fd2807SJeff Garzik 
7094f4d6d004STejun Heo 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7095c6fd2807SJeff Garzik 			ap->pflags |= ATA_PFLAG_LOADING;
7096c6fd2807SJeff Garzik 			ata_port_schedule_eh(ap);
7097c6fd2807SJeff Garzik 
7098c6fd2807SJeff Garzik 			spin_unlock_irqrestore(ap->lock, flags);
7099c6fd2807SJeff Garzik 
7100c6fd2807SJeff Garzik 			/* wait for EH to finish */
7101c6fd2807SJeff Garzik 			ata_port_wait_eh(ap);
7102c6fd2807SJeff Garzik 		} else {
710344877b4eSTejun Heo 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7104c6fd2807SJeff Garzik 			rc = ata_bus_probe(ap);
710544877b4eSTejun Heo 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
7106c6fd2807SJeff Garzik 
7107c6fd2807SJeff Garzik 			if (rc) {
7108c6fd2807SJeff Garzik 				/* FIXME: do something useful here?
7109c6fd2807SJeff Garzik 				 * Current libata behavior will
7110c6fd2807SJeff Garzik 				 * tear down everything when
7111c6fd2807SJeff Garzik 				 * the module is removed
7112c6fd2807SJeff Garzik 				 * or the h/w is unplugged.
7113c6fd2807SJeff Garzik 				 */
7114c6fd2807SJeff Garzik 			}
7115c6fd2807SJeff Garzik 		}
7116c6fd2807SJeff Garzik 	}
7117c6fd2807SJeff Garzik 
7118c6fd2807SJeff Garzik 	/* probes are done, now scan each port's disk(s) */
7119c6fd2807SJeff Garzik 	DPRINTK("host probe begin\n");
7120cca3974eSJeff Garzik 	for (i = 0; i < host->n_ports; i++) {
7121cca3974eSJeff Garzik 		struct ata_port *ap = host->ports[i];
7122c6fd2807SJeff Garzik 
71231ae46317STejun Heo 		ata_scsi_scan_host(ap, 1);
7124ca77329fSKristen Carlson Accardi 		ata_lpm_schedule(ap, ap->pm_policy);
7125c6fd2807SJeff Garzik 	}
7126c6fd2807SJeff Garzik 
7127f3187195STejun Heo 	return 0;
7128f3187195STejun Heo }
7129f3187195STejun Heo 
7130f3187195STejun Heo /**
7131f5cda257STejun Heo  *	ata_host_activate - start host, request IRQ and register it
7132f5cda257STejun Heo  *	@host: target ATA host
7133f5cda257STejun Heo  *	@irq: IRQ to request
7134f5cda257STejun Heo  *	@irq_handler: irq_handler used when requesting IRQ
7135f5cda257STejun Heo  *	@irq_flags: irq_flags used when requesting IRQ
7136f5cda257STejun Heo  *	@sht: scsi_host_template to use when registering the host
7137f5cda257STejun Heo  *
7138f5cda257STejun Heo  *	After allocating an ATA host and initializing it, most libata
7139f5cda257STejun Heo  *	LLDs perform three steps to activate the host - start host,
7140f5cda257STejun Heo  *	request IRQ and register it.  This helper takes necessasry
7141f5cda257STejun Heo  *	arguments and performs the three steps in one go.
7142f5cda257STejun Heo  *
71433d46b2e2SPaul Mundt  *	An invalid IRQ skips the IRQ registration and expects the host to
71443d46b2e2SPaul Mundt  *	have set polling mode on the port. In this case, @irq_handler
71453d46b2e2SPaul Mundt  *	should be NULL.
71463d46b2e2SPaul Mundt  *
7147f5cda257STejun Heo  *	LOCKING:
7148f5cda257STejun Heo  *	Inherited from calling layer (may sleep).
7149f5cda257STejun Heo  *
7150f5cda257STejun Heo  *	RETURNS:
7151f5cda257STejun Heo  *	0 on success, -errno otherwise.
7152f5cda257STejun Heo  */
7153f5cda257STejun Heo int ata_host_activate(struct ata_host *host, int irq,
7154f5cda257STejun Heo 		      irq_handler_t irq_handler, unsigned long irq_flags,
7155f5cda257STejun Heo 		      struct scsi_host_template *sht)
7156f5cda257STejun Heo {
7157cbcdd875STejun Heo 	int i, rc;
7158f5cda257STejun Heo 
7159f5cda257STejun Heo 	rc = ata_host_start(host);
7160f5cda257STejun Heo 	if (rc)
7161f5cda257STejun Heo 		return rc;
7162f5cda257STejun Heo 
71633d46b2e2SPaul Mundt 	/* Special case for polling mode */
71643d46b2e2SPaul Mundt 	if (!irq) {
71653d46b2e2SPaul Mundt 		WARN_ON(irq_handler);
71663d46b2e2SPaul Mundt 		return ata_host_register(host, sht);
71673d46b2e2SPaul Mundt 	}
71683d46b2e2SPaul Mundt 
7169f5cda257STejun Heo 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7170f5cda257STejun Heo 			      dev_driver_string(host->dev), host);
7171f5cda257STejun Heo 	if (rc)
7172f5cda257STejun Heo 		return rc;
7173f5cda257STejun Heo 
7174cbcdd875STejun Heo 	for (i = 0; i < host->n_ports; i++)
7175cbcdd875STejun Heo 		ata_port_desc(host->ports[i], "irq %d", irq);
71764031826bSTejun Heo 
7177f5cda257STejun Heo 	rc = ata_host_register(host, sht);
7178f5cda257STejun Heo 	/* if failed, just free the IRQ and leave ports alone */
7179f5cda257STejun Heo 	if (rc)
7180f5cda257STejun Heo 		devm_free_irq(host->dev, irq, host);
7181f5cda257STejun Heo 
7182f5cda257STejun Heo 	return rc;
7183f5cda257STejun Heo }
7184f5cda257STejun Heo 
7185f5cda257STejun Heo /**
7186c6fd2807SJeff Garzik  *	ata_port_detach - Detach ATA port in prepration of device removal
7187c6fd2807SJeff Garzik  *	@ap: ATA port to be detached
7188c6fd2807SJeff Garzik  *
7189c6fd2807SJeff Garzik  *	Detach all ATA devices and the associated SCSI devices of @ap;
7190c6fd2807SJeff Garzik  *	then, remove the associated SCSI host.  @ap is guaranteed to
7191c6fd2807SJeff Garzik  *	be quiescent on return from this function.
7192c6fd2807SJeff Garzik  *
7193c6fd2807SJeff Garzik  *	LOCKING:
7194c6fd2807SJeff Garzik  *	Kernel thread context (may sleep).
7195c6fd2807SJeff Garzik  */
7196741b7763SAdrian Bunk static void ata_port_detach(struct ata_port *ap)
7197c6fd2807SJeff Garzik {
7198c6fd2807SJeff Garzik 	unsigned long flags;
719941bda9c9STejun Heo 	struct ata_link *link;
7200f58229f8STejun Heo 	struct ata_device *dev;
7201c6fd2807SJeff Garzik 
7202c6fd2807SJeff Garzik 	if (!ap->ops->error_handler)
7203c6fd2807SJeff Garzik 		goto skip_eh;
7204c6fd2807SJeff Garzik 
7205c6fd2807SJeff Garzik 	/* tell EH we're leaving & flush EH */
7206c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7207c6fd2807SJeff Garzik 	ap->pflags |= ATA_PFLAG_UNLOADING;
7208c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7209c6fd2807SJeff Garzik 
7210c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
7211c6fd2807SJeff Garzik 
7212c6fd2807SJeff Garzik 	/* EH is now guaranteed to see UNLOADING, so no new device
7213c6fd2807SJeff Garzik 	 * will be attached.  Disable all existing devices.
7214c6fd2807SJeff Garzik 	 */
7215c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7216c6fd2807SJeff Garzik 
721741bda9c9STejun Heo 	ata_port_for_each_link(link, ap) {
721841bda9c9STejun Heo 		ata_link_for_each_dev(dev, link)
7219f58229f8STejun Heo 			ata_dev_disable(dev);
722041bda9c9STejun Heo 	}
7221c6fd2807SJeff Garzik 
7222c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7223c6fd2807SJeff Garzik 
7224c6fd2807SJeff Garzik 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
7225c6fd2807SJeff Garzik 	 * will be skipped and retrials will be terminated with bad
7226c6fd2807SJeff Garzik 	 * target.
7227c6fd2807SJeff Garzik 	 */
7228c6fd2807SJeff Garzik 	spin_lock_irqsave(ap->lock, flags);
7229c6fd2807SJeff Garzik 	ata_port_freeze(ap);	/* won't be thawed */
7230c6fd2807SJeff Garzik 	spin_unlock_irqrestore(ap->lock, flags);
7231c6fd2807SJeff Garzik 
7232c6fd2807SJeff Garzik 	ata_port_wait_eh(ap);
723345a66c1cSOleg Nesterov 	cancel_rearming_delayed_work(&ap->hotplug_task);
7234c6fd2807SJeff Garzik 
7235c6fd2807SJeff Garzik  skip_eh:
7236c6fd2807SJeff Garzik 	/* remove the associated SCSI host */
7237cca3974eSJeff Garzik 	scsi_remove_host(ap->scsi_host);
7238c6fd2807SJeff Garzik }
7239c6fd2807SJeff Garzik 
7240c6fd2807SJeff Garzik /**
72410529c159STejun Heo  *	ata_host_detach - Detach all ports of an ATA host
72420529c159STejun Heo  *	@host: Host to detach
72430529c159STejun Heo  *
72440529c159STejun Heo  *	Detach all ports of @host.
72450529c159STejun Heo  *
72460529c159STejun Heo  *	LOCKING:
72470529c159STejun Heo  *	Kernel thread context (may sleep).
72480529c159STejun Heo  */
72490529c159STejun Heo void ata_host_detach(struct ata_host *host)
72500529c159STejun Heo {
72510529c159STejun Heo 	int i;
72520529c159STejun Heo 
72530529c159STejun Heo 	for (i = 0; i < host->n_ports; i++)
72540529c159STejun Heo 		ata_port_detach(host->ports[i]);
72550529c159STejun Heo }
72560529c159STejun Heo 
7257c6fd2807SJeff Garzik /**
7258c6fd2807SJeff Garzik  *	ata_std_ports - initialize ioaddr with standard port offsets.
7259c6fd2807SJeff Garzik  *	@ioaddr: IO address structure to be initialized
7260c6fd2807SJeff Garzik  *
7261c6fd2807SJeff Garzik  *	Utility function which initializes data_addr, error_addr,
7262c6fd2807SJeff Garzik  *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7263c6fd2807SJeff Garzik  *	device_addr, status_addr, and command_addr to standard offsets
7264c6fd2807SJeff Garzik  *	relative to cmd_addr.
7265c6fd2807SJeff Garzik  *
7266c6fd2807SJeff Garzik  *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7267c6fd2807SJeff Garzik  */
7268c6fd2807SJeff Garzik 
7269c6fd2807SJeff Garzik void ata_std_ports(struct ata_ioports *ioaddr)
7270c6fd2807SJeff Garzik {
7271c6fd2807SJeff Garzik 	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7272c6fd2807SJeff Garzik 	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7273c6fd2807SJeff Garzik 	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7274c6fd2807SJeff Garzik 	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7275c6fd2807SJeff Garzik 	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7276c6fd2807SJeff Garzik 	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7277c6fd2807SJeff Garzik 	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7278c6fd2807SJeff Garzik 	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7279c6fd2807SJeff Garzik 	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7280c6fd2807SJeff Garzik 	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7281c6fd2807SJeff Garzik }
7282c6fd2807SJeff Garzik 
7283c6fd2807SJeff Garzik 
7284c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7285c6fd2807SJeff Garzik 
7286c6fd2807SJeff Garzik /**
7287c6fd2807SJeff Garzik  *	ata_pci_remove_one - PCI layer callback for device removal
7288c6fd2807SJeff Garzik  *	@pdev: PCI device that was removed
7289c6fd2807SJeff Garzik  *
7290b878ca5dSTejun Heo  *	PCI layer indicates to libata via this hook that hot-unplug or
7291b878ca5dSTejun Heo  *	module unload event has occurred.  Detach all ports.  Resource
7292b878ca5dSTejun Heo  *	release is handled via devres.
7293c6fd2807SJeff Garzik  *
7294c6fd2807SJeff Garzik  *	LOCKING:
7295c6fd2807SJeff Garzik  *	Inherited from PCI layer (may sleep).
7296c6fd2807SJeff Garzik  */
7297c6fd2807SJeff Garzik void ata_pci_remove_one(struct pci_dev *pdev)
7298c6fd2807SJeff Garzik {
72992855568bSJeff Garzik 	struct device *dev = &pdev->dev;
7300cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(dev);
7301c6fd2807SJeff Garzik 
7302f0d36efdSTejun Heo 	ata_host_detach(host);
7303c6fd2807SJeff Garzik }
7304c6fd2807SJeff Garzik 
7305c6fd2807SJeff Garzik /* move to PCI subsystem */
7306c6fd2807SJeff Garzik int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7307c6fd2807SJeff Garzik {
7308c6fd2807SJeff Garzik 	unsigned long tmp = 0;
7309c6fd2807SJeff Garzik 
7310c6fd2807SJeff Garzik 	switch (bits->width) {
7311c6fd2807SJeff Garzik 	case 1: {
7312c6fd2807SJeff Garzik 		u8 tmp8 = 0;
7313c6fd2807SJeff Garzik 		pci_read_config_byte(pdev, bits->reg, &tmp8);
7314c6fd2807SJeff Garzik 		tmp = tmp8;
7315c6fd2807SJeff Garzik 		break;
7316c6fd2807SJeff Garzik 	}
7317c6fd2807SJeff Garzik 	case 2: {
7318c6fd2807SJeff Garzik 		u16 tmp16 = 0;
7319c6fd2807SJeff Garzik 		pci_read_config_word(pdev, bits->reg, &tmp16);
7320c6fd2807SJeff Garzik 		tmp = tmp16;
7321c6fd2807SJeff Garzik 		break;
7322c6fd2807SJeff Garzik 	}
7323c6fd2807SJeff Garzik 	case 4: {
7324c6fd2807SJeff Garzik 		u32 tmp32 = 0;
7325c6fd2807SJeff Garzik 		pci_read_config_dword(pdev, bits->reg, &tmp32);
7326c6fd2807SJeff Garzik 		tmp = tmp32;
7327c6fd2807SJeff Garzik 		break;
7328c6fd2807SJeff Garzik 	}
7329c6fd2807SJeff Garzik 
7330c6fd2807SJeff Garzik 	default:
7331c6fd2807SJeff Garzik 		return -EINVAL;
7332c6fd2807SJeff Garzik 	}
7333c6fd2807SJeff Garzik 
7334c6fd2807SJeff Garzik 	tmp &= bits->mask;
7335c6fd2807SJeff Garzik 
7336c6fd2807SJeff Garzik 	return (tmp == bits->val) ? 1 : 0;
7337c6fd2807SJeff Garzik }
7338c6fd2807SJeff Garzik 
73396ffa01d8STejun Heo #ifdef CONFIG_PM
7340c6fd2807SJeff Garzik void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7341c6fd2807SJeff Garzik {
7342c6fd2807SJeff Garzik 	pci_save_state(pdev);
7343c6fd2807SJeff Garzik 	pci_disable_device(pdev);
73444c90d971STejun Heo 
73454c90d971STejun Heo 	if (mesg.event == PM_EVENT_SUSPEND)
7346c6fd2807SJeff Garzik 		pci_set_power_state(pdev, PCI_D3hot);
7347c6fd2807SJeff Garzik }
7348c6fd2807SJeff Garzik 
7349553c4aa6STejun Heo int ata_pci_device_do_resume(struct pci_dev *pdev)
7350c6fd2807SJeff Garzik {
7351553c4aa6STejun Heo 	int rc;
7352553c4aa6STejun Heo 
7353c6fd2807SJeff Garzik 	pci_set_power_state(pdev, PCI_D0);
7354c6fd2807SJeff Garzik 	pci_restore_state(pdev);
7355553c4aa6STejun Heo 
7356f0d36efdSTejun Heo 	rc = pcim_enable_device(pdev);
7357553c4aa6STejun Heo 	if (rc) {
7358553c4aa6STejun Heo 		dev_printk(KERN_ERR, &pdev->dev,
7359553c4aa6STejun Heo 			   "failed to enable device after resume (%d)\n", rc);
7360553c4aa6STejun Heo 		return rc;
7361553c4aa6STejun Heo 	}
7362553c4aa6STejun Heo 
7363c6fd2807SJeff Garzik 	pci_set_master(pdev);
7364553c4aa6STejun Heo 	return 0;
7365c6fd2807SJeff Garzik }
7366c6fd2807SJeff Garzik 
7367c6fd2807SJeff Garzik int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7368c6fd2807SJeff Garzik {
7369cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7370c6fd2807SJeff Garzik 	int rc = 0;
7371c6fd2807SJeff Garzik 
7372cca3974eSJeff Garzik 	rc = ata_host_suspend(host, mesg);
7373c6fd2807SJeff Garzik 	if (rc)
7374c6fd2807SJeff Garzik 		return rc;
7375c6fd2807SJeff Garzik 
7376c6fd2807SJeff Garzik 	ata_pci_device_do_suspend(pdev, mesg);
7377c6fd2807SJeff Garzik 
7378c6fd2807SJeff Garzik 	return 0;
7379c6fd2807SJeff Garzik }
7380c6fd2807SJeff Garzik 
7381c6fd2807SJeff Garzik int ata_pci_device_resume(struct pci_dev *pdev)
7382c6fd2807SJeff Garzik {
7383cca3974eSJeff Garzik 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
7384553c4aa6STejun Heo 	int rc;
7385c6fd2807SJeff Garzik 
7386553c4aa6STejun Heo 	rc = ata_pci_device_do_resume(pdev);
7387553c4aa6STejun Heo 	if (rc == 0)
7388cca3974eSJeff Garzik 		ata_host_resume(host);
7389553c4aa6STejun Heo 	return rc;
7390c6fd2807SJeff Garzik }
73916ffa01d8STejun Heo #endif /* CONFIG_PM */
73926ffa01d8STejun Heo 
7393c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7394c6fd2807SJeff Garzik 
7395c6fd2807SJeff Garzik 
7396c6fd2807SJeff Garzik static int __init ata_init(void)
7397c6fd2807SJeff Garzik {
7398c6fd2807SJeff Garzik 	ata_probe_timeout *= HZ;
7399c6fd2807SJeff Garzik 	ata_wq = create_workqueue("ata");
7400c6fd2807SJeff Garzik 	if (!ata_wq)
7401c6fd2807SJeff Garzik 		return -ENOMEM;
7402c6fd2807SJeff Garzik 
7403c6fd2807SJeff Garzik 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
7404c6fd2807SJeff Garzik 	if (!ata_aux_wq) {
7405c6fd2807SJeff Garzik 		destroy_workqueue(ata_wq);
7406c6fd2807SJeff Garzik 		return -ENOMEM;
7407c6fd2807SJeff Garzik 	}
7408c6fd2807SJeff Garzik 
7409c6fd2807SJeff Garzik 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7410c6fd2807SJeff Garzik 	return 0;
7411c6fd2807SJeff Garzik }
7412c6fd2807SJeff Garzik 
7413c6fd2807SJeff Garzik static void __exit ata_exit(void)
7414c6fd2807SJeff Garzik {
7415c6fd2807SJeff Garzik 	destroy_workqueue(ata_wq);
7416c6fd2807SJeff Garzik 	destroy_workqueue(ata_aux_wq);
7417c6fd2807SJeff Garzik }
7418c6fd2807SJeff Garzik 
7419a4625085SBrian King subsys_initcall(ata_init);
7420c6fd2807SJeff Garzik module_exit(ata_exit);
7421c6fd2807SJeff Garzik 
7422c6fd2807SJeff Garzik static unsigned long ratelimit_time;
7423c6fd2807SJeff Garzik static DEFINE_SPINLOCK(ata_ratelimit_lock);
7424c6fd2807SJeff Garzik 
7425c6fd2807SJeff Garzik int ata_ratelimit(void)
7426c6fd2807SJeff Garzik {
7427c6fd2807SJeff Garzik 	int rc;
7428c6fd2807SJeff Garzik 	unsigned long flags;
7429c6fd2807SJeff Garzik 
7430c6fd2807SJeff Garzik 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
7431c6fd2807SJeff Garzik 
7432c6fd2807SJeff Garzik 	if (time_after(jiffies, ratelimit_time)) {
7433c6fd2807SJeff Garzik 		rc = 1;
7434c6fd2807SJeff Garzik 		ratelimit_time = jiffies + (HZ/5);
7435c6fd2807SJeff Garzik 	} else
7436c6fd2807SJeff Garzik 		rc = 0;
7437c6fd2807SJeff Garzik 
7438c6fd2807SJeff Garzik 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7439c6fd2807SJeff Garzik 
7440c6fd2807SJeff Garzik 	return rc;
7441c6fd2807SJeff Garzik }
7442c6fd2807SJeff Garzik 
7443c6fd2807SJeff Garzik /**
7444c6fd2807SJeff Garzik  *	ata_wait_register - wait until register value changes
7445c6fd2807SJeff Garzik  *	@reg: IO-mapped register
7446c6fd2807SJeff Garzik  *	@mask: Mask to apply to read register value
7447c6fd2807SJeff Garzik  *	@val: Wait condition
7448c6fd2807SJeff Garzik  *	@interval_msec: polling interval in milliseconds
7449c6fd2807SJeff Garzik  *	@timeout_msec: timeout in milliseconds
7450c6fd2807SJeff Garzik  *
7451c6fd2807SJeff Garzik  *	Waiting for some bits of register to change is a common
7452c6fd2807SJeff Garzik  *	operation for ATA controllers.  This function reads 32bit LE
7453c6fd2807SJeff Garzik  *	IO-mapped register @reg and tests for the following condition.
7454c6fd2807SJeff Garzik  *
7455c6fd2807SJeff Garzik  *	(*@reg & mask) != val
7456c6fd2807SJeff Garzik  *
7457c6fd2807SJeff Garzik  *	If the condition is met, it returns; otherwise, the process is
7458c6fd2807SJeff Garzik  *	repeated after @interval_msec until timeout.
7459c6fd2807SJeff Garzik  *
7460c6fd2807SJeff Garzik  *	LOCKING:
7461c6fd2807SJeff Garzik  *	Kernel thread context (may sleep)
7462c6fd2807SJeff Garzik  *
7463c6fd2807SJeff Garzik  *	RETURNS:
7464c6fd2807SJeff Garzik  *	The final register value.
7465c6fd2807SJeff Garzik  */
7466c6fd2807SJeff Garzik u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7467c6fd2807SJeff Garzik 		      unsigned long interval_msec,
7468c6fd2807SJeff Garzik 		      unsigned long timeout_msec)
7469c6fd2807SJeff Garzik {
7470c6fd2807SJeff Garzik 	unsigned long timeout;
7471c6fd2807SJeff Garzik 	u32 tmp;
7472c6fd2807SJeff Garzik 
7473c6fd2807SJeff Garzik 	tmp = ioread32(reg);
7474c6fd2807SJeff Garzik 
7475c6fd2807SJeff Garzik 	/* Calculate timeout _after_ the first read to make sure
7476c6fd2807SJeff Garzik 	 * preceding writes reach the controller before starting to
7477c6fd2807SJeff Garzik 	 * eat away the timeout.
7478c6fd2807SJeff Garzik 	 */
7479c6fd2807SJeff Garzik 	timeout = jiffies + (timeout_msec * HZ) / 1000;
7480c6fd2807SJeff Garzik 
7481c6fd2807SJeff Garzik 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7482c6fd2807SJeff Garzik 		msleep(interval_msec);
7483c6fd2807SJeff Garzik 		tmp = ioread32(reg);
7484c6fd2807SJeff Garzik 	}
7485c6fd2807SJeff Garzik 
7486c6fd2807SJeff Garzik 	return tmp;
7487c6fd2807SJeff Garzik }
7488c6fd2807SJeff Garzik 
7489c6fd2807SJeff Garzik /*
7490c6fd2807SJeff Garzik  * Dummy port_ops
7491c6fd2807SJeff Garzik  */
7492c6fd2807SJeff Garzik static void ata_dummy_noret(struct ata_port *ap)	{ }
7493c6fd2807SJeff Garzik static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
7494c6fd2807SJeff Garzik static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7495c6fd2807SJeff Garzik 
7496c6fd2807SJeff Garzik static u8 ata_dummy_check_status(struct ata_port *ap)
7497c6fd2807SJeff Garzik {
7498c6fd2807SJeff Garzik 	return ATA_DRDY;
7499c6fd2807SJeff Garzik }
7500c6fd2807SJeff Garzik 
7501c6fd2807SJeff Garzik static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7502c6fd2807SJeff Garzik {
7503c6fd2807SJeff Garzik 	return AC_ERR_SYSTEM;
7504c6fd2807SJeff Garzik }
7505c6fd2807SJeff Garzik 
7506c6fd2807SJeff Garzik const struct ata_port_operations ata_dummy_port_ops = {
7507c6fd2807SJeff Garzik 	.check_status		= ata_dummy_check_status,
7508c6fd2807SJeff Garzik 	.check_altstatus	= ata_dummy_check_status,
7509c6fd2807SJeff Garzik 	.dev_select		= ata_noop_dev_select,
7510c6fd2807SJeff Garzik 	.qc_prep		= ata_noop_qc_prep,
7511c6fd2807SJeff Garzik 	.qc_issue		= ata_dummy_qc_issue,
7512c6fd2807SJeff Garzik 	.freeze			= ata_dummy_noret,
7513c6fd2807SJeff Garzik 	.thaw			= ata_dummy_noret,
7514c6fd2807SJeff Garzik 	.error_handler		= ata_dummy_noret,
7515c6fd2807SJeff Garzik 	.post_internal_cmd	= ata_dummy_qc_noret,
7516c6fd2807SJeff Garzik 	.irq_clear		= ata_dummy_noret,
7517c6fd2807SJeff Garzik 	.port_start		= ata_dummy_ret0,
7518c6fd2807SJeff Garzik 	.port_stop		= ata_dummy_noret,
7519c6fd2807SJeff Garzik };
7520c6fd2807SJeff Garzik 
752121b0ad4fSTejun Heo const struct ata_port_info ata_dummy_port_info = {
752221b0ad4fSTejun Heo 	.port_ops		= &ata_dummy_port_ops,
752321b0ad4fSTejun Heo };
752421b0ad4fSTejun Heo 
7525c6fd2807SJeff Garzik /*
7526c6fd2807SJeff Garzik  * libata is essentially a library of internal helper functions for
7527c6fd2807SJeff Garzik  * low-level ATA host controller drivers.  As such, the API/ABI is
7528c6fd2807SJeff Garzik  * likely to change as new drivers are added and updated.
7529c6fd2807SJeff Garzik  * Do not depend on ABI/API stability.
7530c6fd2807SJeff Garzik  */
7531c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7532c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7533c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7534c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
753521b0ad4fSTejun Heo EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7536c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_bios_param);
7537c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_ports);
7538cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_init);
7539f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc);
7540f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7541ecef7253STejun Heo EXPORT_SYMBOL_GPL(ata_host_start);
7542f3187195STejun Heo EXPORT_SYMBOL_GPL(ata_host_register);
7543f5cda257STejun Heo EXPORT_SYMBOL_GPL(ata_host_activate);
75440529c159STejun Heo EXPORT_SYMBOL_GPL(ata_host_detach);
7545c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init);
7546c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_sg_init_one);
7547c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_hsm_move);
7548c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete);
7549c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7550c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7551c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_load);
7552c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_read);
7553c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7554c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_dev_select);
755543727fbcSJeff Garzik EXPORT_SYMBOL_GPL(sata_print_link_status);
7556c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7557c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7558c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_check_status);
7559c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_altstatus);
7560c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_exec_command);
7561c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_start);
7562d92e74d3SAlan Cox EXPORT_SYMBOL_GPL(ata_sff_port_start);
7563c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_interrupt);
756404351821SAlan EXPORT_SYMBOL_GPL(ata_do_set_mode);
75650d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer);
75660d5ff566STejun Heo EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
756731cc23b3STejun Heo EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7568c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_qc_prep);
7569d26fc955SAlan Cox EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7570c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7571c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7572c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_start);
7573c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7574c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_status);
7575c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7576c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7577c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7578c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7579c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7580c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7581c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_probe);
758210305f0fSAlan EXPORT_SYMBOL_GPL(ata_dev_disable);
7583c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_set_spd);
7584936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_debounce);
7585936fd732STejun Heo EXPORT_SYMBOL_GPL(sata_link_resume);
7586c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_bus_reset);
7587c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_prereset);
7588c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_softreset);
7589cc0680a5STejun Heo EXPORT_SYMBOL_GPL(sata_link_hardreset);
7590c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_std_hardreset);
7591c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_std_postreset);
7592c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_classify);
7593c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_dev_pair);
7594c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_disable);
7595c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_ratelimit);
7596c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_wait_register);
7597c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_busy_sleep);
759888ff6eafSTejun Heo EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7599d4b2bab4STejun Heo EXPORT_SYMBOL_GPL(ata_wait_ready);
7600c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_queue_task);
7601c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7602c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7603c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7604c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7605c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7606c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_host_intr);
7607c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_valid);
7608c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_read);
7609c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write);
7610c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7611936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_online);
7612936fd732STejun Heo EXPORT_SYMBOL_GPL(ata_link_offline);
76136ffa01d8STejun Heo #ifdef CONFIG_PM
7614cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_suspend);
7615cca3974eSJeff Garzik EXPORT_SYMBOL_GPL(ata_host_resume);
76166ffa01d8STejun Heo #endif /* CONFIG_PM */
7617c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_string);
7618c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_id_c_string);
761910305f0fSAlan EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7620c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7621c6fd2807SJeff Garzik 
7622c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7623c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_compute);
7624c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_timing_merge);
7625c6fd2807SJeff Garzik 
7626c6fd2807SJeff Garzik #ifdef CONFIG_PCI
7627c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(pci_test_config_bits);
7628d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
76291626aeb8STejun Heo EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7630d583bc18STejun Heo EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7631c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_init_one);
7632c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_remove_one);
76336ffa01d8STejun Heo #ifdef CONFIG_PM
7634c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7635c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7636c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7637c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_device_resume);
76386ffa01d8STejun Heo #endif /* CONFIG_PM */
7639c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7640c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7641c6fd2807SJeff Garzik #endif /* CONFIG_PCI */
7642c6fd2807SJeff Garzik 
764331f88384STejun Heo EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
76443af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
76453af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
76463af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
76473af9a77aSTejun Heo EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
76483af9a77aSTejun Heo 
7649b64bbc39STejun Heo EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7650b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7651b64bbc39STejun Heo EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7652cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_desc);
7653cbcdd875STejun Heo #ifdef CONFIG_PCI
7654cbcdd875STejun Heo EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7655cbcdd875STejun Heo #endif /* CONFIG_PCI */
7656c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7657dbd82616STejun Heo EXPORT_SYMBOL_GPL(ata_link_abort);
7658c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_abort);
7659c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_port_freeze);
76607d77b247STejun Heo EXPORT_SYMBOL_GPL(sata_async_notification);
7661c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7662c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7663c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7664c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7665c6fd2807SJeff Garzik EXPORT_SYMBOL_GPL(ata_do_eh);
766683625006SAkira Iguchi EXPORT_SYMBOL_GPL(ata_irq_on);
7667a619f981SAkira Iguchi EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7668be0d18dfSAlan Cox 
7669be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_40wire);
7670be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_80wire);
7671be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_unknown);
7672be0d18dfSAlan Cox EXPORT_SYMBOL_GPL(ata_cable_sata);
7673