xref: /openbmc/linux/drivers/ata/libata-core.c (revision 22246614)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/jiffies.h>
58 #include <linux/scatterlist.h>
59 #include <linux/io.h>
60 #include <scsi/scsi.h>
61 #include <scsi/scsi_cmnd.h>
62 #include <scsi/scsi_host.h>
63 #include <linux/libata.h>
64 #include <asm/byteorder.h>
65 #include <linux/cdrom.h>
66 
67 #include "libata.h"
68 
69 
70 /* debounce timing parameters in msecs { interval, duration, timeout } */
71 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
72 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
73 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
74 
75 const struct ata_port_operations ata_base_port_ops = {
76 	.prereset		= ata_std_prereset,
77 	.postreset		= ata_std_postreset,
78 	.error_handler		= ata_std_error_handler,
79 };
80 
81 const struct ata_port_operations sata_port_ops = {
82 	.inherits		= &ata_base_port_ops,
83 
84 	.qc_defer		= ata_std_qc_defer,
85 	.hardreset		= sata_std_hardreset,
86 };
87 
88 static unsigned int ata_dev_init_params(struct ata_device *dev,
89 					u16 heads, u16 sectors);
90 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
91 static unsigned int ata_dev_set_feature(struct ata_device *dev,
92 					u8 enable, u8 feature);
93 static void ata_dev_xfermask(struct ata_device *dev);
94 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
95 
96 unsigned int ata_print_id = 1;
97 static struct workqueue_struct *ata_wq;
98 
99 struct workqueue_struct *ata_aux_wq;
100 
101 struct ata_force_param {
102 	const char	*name;
103 	unsigned int	cbl;
104 	int		spd_limit;
105 	unsigned long	xfer_mask;
106 	unsigned int	horkage_on;
107 	unsigned int	horkage_off;
108 };
109 
110 struct ata_force_ent {
111 	int			port;
112 	int			device;
113 	struct ata_force_param	param;
114 };
115 
116 static struct ata_force_ent *ata_force_tbl;
117 static int ata_force_tbl_size;
118 
119 static char ata_force_param_buf[PAGE_SIZE] __initdata;
120 /* param_buf is thrown away after initialization, disallow read */
121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123 
124 int atapi_enabled = 1;
125 module_param(atapi_enabled, int, 0444);
126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127 
128 static int atapi_dmadir = 0;
129 module_param(atapi_dmadir, int, 0444);
130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131 
132 int atapi_passthru16 = 1;
133 module_param(atapi_passthru16, int, 0444);
134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135 
136 int libata_fua = 0;
137 module_param_named(fua, libata_fua, int, 0444);
138 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139 
140 static int ata_ignore_hpa;
141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143 
144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145 module_param_named(dma, libata_dma_mask, int, 0444);
146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147 
148 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
149 module_param(ata_probe_timeout, int, 0444);
150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151 
152 int libata_noacpi = 0;
153 module_param_named(noacpi, libata_noacpi, int, 0444);
154 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155 
156 int libata_allow_tpm = 0;
157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159 
160 MODULE_AUTHOR("Jeff Garzik");
161 MODULE_DESCRIPTION("Library module for ATA devices");
162 MODULE_LICENSE("GPL");
163 MODULE_VERSION(DRV_VERSION);
164 
165 
166 /**
167  *	ata_force_cbl - force cable type according to libata.force
168  *	@ap: ATA port of interest
169  *
170  *	Force cable type according to libata.force and whine about it.
171  *	The last entry which has matching port number is used, so it
172  *	can be specified as part of device force parameters.  For
173  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
174  *	same effect.
175  *
176  *	LOCKING:
177  *	EH context.
178  */
179 void ata_force_cbl(struct ata_port *ap)
180 {
181 	int i;
182 
183 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
184 		const struct ata_force_ent *fe = &ata_force_tbl[i];
185 
186 		if (fe->port != -1 && fe->port != ap->print_id)
187 			continue;
188 
189 		if (fe->param.cbl == ATA_CBL_NONE)
190 			continue;
191 
192 		ap->cbl = fe->param.cbl;
193 		ata_port_printk(ap, KERN_NOTICE,
194 				"FORCE: cable set to %s\n", fe->param.name);
195 		return;
196 	}
197 }
198 
199 /**
200  *	ata_force_spd_limit - force SATA spd limit according to libata.force
201  *	@link: ATA link of interest
202  *
203  *	Force SATA spd limit according to libata.force and whine about
204  *	it.  When only the port part is specified (e.g. 1:), the limit
205  *	applies to all links connected to both the host link and all
206  *	fan-out ports connected via PMP.  If the device part is
207  *	specified as 0 (e.g. 1.00:), it specifies the first fan-out
208  *	link not the host link.  Device number 15 always points to the
209  *	host link whether PMP is attached or not.
210  *
211  *	LOCKING:
212  *	EH context.
213  */
214 static void ata_force_spd_limit(struct ata_link *link)
215 {
216 	int linkno, i;
217 
218 	if (ata_is_host_link(link))
219 		linkno = 15;
220 	else
221 		linkno = link->pmp;
222 
223 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
224 		const struct ata_force_ent *fe = &ata_force_tbl[i];
225 
226 		if (fe->port != -1 && fe->port != link->ap->print_id)
227 			continue;
228 
229 		if (fe->device != -1 && fe->device != linkno)
230 			continue;
231 
232 		if (!fe->param.spd_limit)
233 			continue;
234 
235 		link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
236 		ata_link_printk(link, KERN_NOTICE,
237 			"FORCE: PHY spd limit set to %s\n", fe->param.name);
238 		return;
239 	}
240 }
241 
242 /**
243  *	ata_force_xfermask - force xfermask according to libata.force
244  *	@dev: ATA device of interest
245  *
246  *	Force xfer_mask according to libata.force and whine about it.
247  *	For consistency with link selection, device number 15 selects
248  *	the first device connected to the host link.
249  *
250  *	LOCKING:
251  *	EH context.
252  */
253 static void ata_force_xfermask(struct ata_device *dev)
254 {
255 	int devno = dev->link->pmp + dev->devno;
256 	int alt_devno = devno;
257 	int i;
258 
259 	/* allow n.15 for the first device attached to host port */
260 	if (ata_is_host_link(dev->link) && devno == 0)
261 		alt_devno = 15;
262 
263 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
264 		const struct ata_force_ent *fe = &ata_force_tbl[i];
265 		unsigned long pio_mask, mwdma_mask, udma_mask;
266 
267 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
268 			continue;
269 
270 		if (fe->device != -1 && fe->device != devno &&
271 		    fe->device != alt_devno)
272 			continue;
273 
274 		if (!fe->param.xfer_mask)
275 			continue;
276 
277 		ata_unpack_xfermask(fe->param.xfer_mask,
278 				    &pio_mask, &mwdma_mask, &udma_mask);
279 		if (udma_mask)
280 			dev->udma_mask = udma_mask;
281 		else if (mwdma_mask) {
282 			dev->udma_mask = 0;
283 			dev->mwdma_mask = mwdma_mask;
284 		} else {
285 			dev->udma_mask = 0;
286 			dev->mwdma_mask = 0;
287 			dev->pio_mask = pio_mask;
288 		}
289 
290 		ata_dev_printk(dev, KERN_NOTICE,
291 			"FORCE: xfer_mask set to %s\n", fe->param.name);
292 		return;
293 	}
294 }
295 
296 /**
297  *	ata_force_horkage - force horkage according to libata.force
298  *	@dev: ATA device of interest
299  *
300  *	Force horkage according to libata.force and whine about it.
301  *	For consistency with link selection, device number 15 selects
302  *	the first device connected to the host link.
303  *
304  *	LOCKING:
305  *	EH context.
306  */
307 static void ata_force_horkage(struct ata_device *dev)
308 {
309 	int devno = dev->link->pmp + dev->devno;
310 	int alt_devno = devno;
311 	int i;
312 
313 	/* allow n.15 for the first device attached to host port */
314 	if (ata_is_host_link(dev->link) && devno == 0)
315 		alt_devno = 15;
316 
317 	for (i = 0; i < ata_force_tbl_size; i++) {
318 		const struct ata_force_ent *fe = &ata_force_tbl[i];
319 
320 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
321 			continue;
322 
323 		if (fe->device != -1 && fe->device != devno &&
324 		    fe->device != alt_devno)
325 			continue;
326 
327 		if (!(~dev->horkage & fe->param.horkage_on) &&
328 		    !(dev->horkage & fe->param.horkage_off))
329 			continue;
330 
331 		dev->horkage |= fe->param.horkage_on;
332 		dev->horkage &= ~fe->param.horkage_off;
333 
334 		ata_dev_printk(dev, KERN_NOTICE,
335 			"FORCE: horkage modified (%s)\n", fe->param.name);
336 	}
337 }
338 
339 /**
340  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
341  *	@opcode: SCSI opcode
342  *
343  *	Determine ATAPI command type from @opcode.
344  *
345  *	LOCKING:
346  *	None.
347  *
348  *	RETURNS:
349  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
350  */
351 int atapi_cmd_type(u8 opcode)
352 {
353 	switch (opcode) {
354 	case GPCMD_READ_10:
355 	case GPCMD_READ_12:
356 		return ATAPI_READ;
357 
358 	case GPCMD_WRITE_10:
359 	case GPCMD_WRITE_12:
360 	case GPCMD_WRITE_AND_VERIFY_10:
361 		return ATAPI_WRITE;
362 
363 	case GPCMD_READ_CD:
364 	case GPCMD_READ_CD_MSF:
365 		return ATAPI_READ_CD;
366 
367 	case ATA_16:
368 	case ATA_12:
369 		if (atapi_passthru16)
370 			return ATAPI_PASS_THRU;
371 		/* fall thru */
372 	default:
373 		return ATAPI_MISC;
374 	}
375 }
376 
377 /**
378  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
379  *	@tf: Taskfile to convert
380  *	@pmp: Port multiplier port
381  *	@is_cmd: This FIS is for command
382  *	@fis: Buffer into which data will output
383  *
384  *	Converts a standard ATA taskfile to a Serial ATA
385  *	FIS structure (Register - Host to Device).
386  *
387  *	LOCKING:
388  *	Inherited from caller.
389  */
390 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
391 {
392 	fis[0] = 0x27;			/* Register - Host to Device FIS */
393 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
394 	if (is_cmd)
395 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
396 
397 	fis[2] = tf->command;
398 	fis[3] = tf->feature;
399 
400 	fis[4] = tf->lbal;
401 	fis[5] = tf->lbam;
402 	fis[6] = tf->lbah;
403 	fis[7] = tf->device;
404 
405 	fis[8] = tf->hob_lbal;
406 	fis[9] = tf->hob_lbam;
407 	fis[10] = tf->hob_lbah;
408 	fis[11] = tf->hob_feature;
409 
410 	fis[12] = tf->nsect;
411 	fis[13] = tf->hob_nsect;
412 	fis[14] = 0;
413 	fis[15] = tf->ctl;
414 
415 	fis[16] = 0;
416 	fis[17] = 0;
417 	fis[18] = 0;
418 	fis[19] = 0;
419 }
420 
421 /**
422  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
423  *	@fis: Buffer from which data will be input
424  *	@tf: Taskfile to output
425  *
426  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
427  *
428  *	LOCKING:
429  *	Inherited from caller.
430  */
431 
432 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
433 {
434 	tf->command	= fis[2];	/* status */
435 	tf->feature	= fis[3];	/* error */
436 
437 	tf->lbal	= fis[4];
438 	tf->lbam	= fis[5];
439 	tf->lbah	= fis[6];
440 	tf->device	= fis[7];
441 
442 	tf->hob_lbal	= fis[8];
443 	tf->hob_lbam	= fis[9];
444 	tf->hob_lbah	= fis[10];
445 
446 	tf->nsect	= fis[12];
447 	tf->hob_nsect	= fis[13];
448 }
449 
450 static const u8 ata_rw_cmds[] = {
451 	/* pio multi */
452 	ATA_CMD_READ_MULTI,
453 	ATA_CMD_WRITE_MULTI,
454 	ATA_CMD_READ_MULTI_EXT,
455 	ATA_CMD_WRITE_MULTI_EXT,
456 	0,
457 	0,
458 	0,
459 	ATA_CMD_WRITE_MULTI_FUA_EXT,
460 	/* pio */
461 	ATA_CMD_PIO_READ,
462 	ATA_CMD_PIO_WRITE,
463 	ATA_CMD_PIO_READ_EXT,
464 	ATA_CMD_PIO_WRITE_EXT,
465 	0,
466 	0,
467 	0,
468 	0,
469 	/* dma */
470 	ATA_CMD_READ,
471 	ATA_CMD_WRITE,
472 	ATA_CMD_READ_EXT,
473 	ATA_CMD_WRITE_EXT,
474 	0,
475 	0,
476 	0,
477 	ATA_CMD_WRITE_FUA_EXT
478 };
479 
480 /**
481  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
482  *	@tf: command to examine and configure
483  *	@dev: device tf belongs to
484  *
485  *	Examine the device configuration and tf->flags to calculate
486  *	the proper read/write commands and protocol to use.
487  *
488  *	LOCKING:
489  *	caller.
490  */
491 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
492 {
493 	u8 cmd;
494 
495 	int index, fua, lba48, write;
496 
497 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
498 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
499 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
500 
501 	if (dev->flags & ATA_DFLAG_PIO) {
502 		tf->protocol = ATA_PROT_PIO;
503 		index = dev->multi_count ? 0 : 8;
504 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
505 		/* Unable to use DMA due to host limitation */
506 		tf->protocol = ATA_PROT_PIO;
507 		index = dev->multi_count ? 0 : 8;
508 	} else {
509 		tf->protocol = ATA_PROT_DMA;
510 		index = 16;
511 	}
512 
513 	cmd = ata_rw_cmds[index + fua + lba48 + write];
514 	if (cmd) {
515 		tf->command = cmd;
516 		return 0;
517 	}
518 	return -1;
519 }
520 
521 /**
522  *	ata_tf_read_block - Read block address from ATA taskfile
523  *	@tf: ATA taskfile of interest
524  *	@dev: ATA device @tf belongs to
525  *
526  *	LOCKING:
527  *	None.
528  *
529  *	Read block address from @tf.  This function can handle all
530  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
531  *	flags select the address format to use.
532  *
533  *	RETURNS:
534  *	Block address read from @tf.
535  */
536 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
537 {
538 	u64 block = 0;
539 
540 	if (tf->flags & ATA_TFLAG_LBA) {
541 		if (tf->flags & ATA_TFLAG_LBA48) {
542 			block |= (u64)tf->hob_lbah << 40;
543 			block |= (u64)tf->hob_lbam << 32;
544 			block |= tf->hob_lbal << 24;
545 		} else
546 			block |= (tf->device & 0xf) << 24;
547 
548 		block |= tf->lbah << 16;
549 		block |= tf->lbam << 8;
550 		block |= tf->lbal;
551 	} else {
552 		u32 cyl, head, sect;
553 
554 		cyl = tf->lbam | (tf->lbah << 8);
555 		head = tf->device & 0xf;
556 		sect = tf->lbal;
557 
558 		block = (cyl * dev->heads + head) * dev->sectors + sect;
559 	}
560 
561 	return block;
562 }
563 
564 /**
565  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
566  *	@tf: Target ATA taskfile
567  *	@dev: ATA device @tf belongs to
568  *	@block: Block address
569  *	@n_block: Number of blocks
570  *	@tf_flags: RW/FUA etc...
571  *	@tag: tag
572  *
573  *	LOCKING:
574  *	None.
575  *
576  *	Build ATA taskfile @tf for read/write request described by
577  *	@block, @n_block, @tf_flags and @tag on @dev.
578  *
579  *	RETURNS:
580  *
581  *	0 on success, -ERANGE if the request is too large for @dev,
582  *	-EINVAL if the request is invalid.
583  */
584 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
585 		    u64 block, u32 n_block, unsigned int tf_flags,
586 		    unsigned int tag)
587 {
588 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
589 	tf->flags |= tf_flags;
590 
591 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
592 		/* yay, NCQ */
593 		if (!lba_48_ok(block, n_block))
594 			return -ERANGE;
595 
596 		tf->protocol = ATA_PROT_NCQ;
597 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
598 
599 		if (tf->flags & ATA_TFLAG_WRITE)
600 			tf->command = ATA_CMD_FPDMA_WRITE;
601 		else
602 			tf->command = ATA_CMD_FPDMA_READ;
603 
604 		tf->nsect = tag << 3;
605 		tf->hob_feature = (n_block >> 8) & 0xff;
606 		tf->feature = n_block & 0xff;
607 
608 		tf->hob_lbah = (block >> 40) & 0xff;
609 		tf->hob_lbam = (block >> 32) & 0xff;
610 		tf->hob_lbal = (block >> 24) & 0xff;
611 		tf->lbah = (block >> 16) & 0xff;
612 		tf->lbam = (block >> 8) & 0xff;
613 		tf->lbal = block & 0xff;
614 
615 		tf->device = 1 << 6;
616 		if (tf->flags & ATA_TFLAG_FUA)
617 			tf->device |= 1 << 7;
618 	} else if (dev->flags & ATA_DFLAG_LBA) {
619 		tf->flags |= ATA_TFLAG_LBA;
620 
621 		if (lba_28_ok(block, n_block)) {
622 			/* use LBA28 */
623 			tf->device |= (block >> 24) & 0xf;
624 		} else if (lba_48_ok(block, n_block)) {
625 			if (!(dev->flags & ATA_DFLAG_LBA48))
626 				return -ERANGE;
627 
628 			/* use LBA48 */
629 			tf->flags |= ATA_TFLAG_LBA48;
630 
631 			tf->hob_nsect = (n_block >> 8) & 0xff;
632 
633 			tf->hob_lbah = (block >> 40) & 0xff;
634 			tf->hob_lbam = (block >> 32) & 0xff;
635 			tf->hob_lbal = (block >> 24) & 0xff;
636 		} else
637 			/* request too large even for LBA48 */
638 			return -ERANGE;
639 
640 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
641 			return -EINVAL;
642 
643 		tf->nsect = n_block & 0xff;
644 
645 		tf->lbah = (block >> 16) & 0xff;
646 		tf->lbam = (block >> 8) & 0xff;
647 		tf->lbal = block & 0xff;
648 
649 		tf->device |= ATA_LBA;
650 	} else {
651 		/* CHS */
652 		u32 sect, head, cyl, track;
653 
654 		/* The request -may- be too large for CHS addressing. */
655 		if (!lba_28_ok(block, n_block))
656 			return -ERANGE;
657 
658 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
659 			return -EINVAL;
660 
661 		/* Convert LBA to CHS */
662 		track = (u32)block / dev->sectors;
663 		cyl   = track / dev->heads;
664 		head  = track % dev->heads;
665 		sect  = (u32)block % dev->sectors + 1;
666 
667 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
668 			(u32)block, track, cyl, head, sect);
669 
670 		/* Check whether the converted CHS can fit.
671 		   Cylinder: 0-65535
672 		   Head: 0-15
673 		   Sector: 1-255*/
674 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
675 			return -ERANGE;
676 
677 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
678 		tf->lbal = sect;
679 		tf->lbam = cyl;
680 		tf->lbah = cyl >> 8;
681 		tf->device |= head;
682 	}
683 
684 	return 0;
685 }
686 
687 /**
688  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
689  *	@pio_mask: pio_mask
690  *	@mwdma_mask: mwdma_mask
691  *	@udma_mask: udma_mask
692  *
693  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
694  *	unsigned int xfer_mask.
695  *
696  *	LOCKING:
697  *	None.
698  *
699  *	RETURNS:
700  *	Packed xfer_mask.
701  */
702 unsigned long ata_pack_xfermask(unsigned long pio_mask,
703 				unsigned long mwdma_mask,
704 				unsigned long udma_mask)
705 {
706 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
707 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
708 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
709 }
710 
711 /**
712  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
713  *	@xfer_mask: xfer_mask to unpack
714  *	@pio_mask: resulting pio_mask
715  *	@mwdma_mask: resulting mwdma_mask
716  *	@udma_mask: resulting udma_mask
717  *
718  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
719  *	Any NULL distination masks will be ignored.
720  */
721 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
722 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
723 {
724 	if (pio_mask)
725 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
726 	if (mwdma_mask)
727 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
728 	if (udma_mask)
729 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
730 }
731 
732 static const struct ata_xfer_ent {
733 	int shift, bits;
734 	u8 base;
735 } ata_xfer_tbl[] = {
736 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
737 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
738 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
739 	{ -1, },
740 };
741 
742 /**
743  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
744  *	@xfer_mask: xfer_mask of interest
745  *
746  *	Return matching XFER_* value for @xfer_mask.  Only the highest
747  *	bit of @xfer_mask is considered.
748  *
749  *	LOCKING:
750  *	None.
751  *
752  *	RETURNS:
753  *	Matching XFER_* value, 0xff if no match found.
754  */
755 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
756 {
757 	int highbit = fls(xfer_mask) - 1;
758 	const struct ata_xfer_ent *ent;
759 
760 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
761 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
762 			return ent->base + highbit - ent->shift;
763 	return 0xff;
764 }
765 
766 /**
767  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
768  *	@xfer_mode: XFER_* of interest
769  *
770  *	Return matching xfer_mask for @xfer_mode.
771  *
772  *	LOCKING:
773  *	None.
774  *
775  *	RETURNS:
776  *	Matching xfer_mask, 0 if no match found.
777  */
778 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
779 {
780 	const struct ata_xfer_ent *ent;
781 
782 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
783 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
784 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
785 				& ~((1 << ent->shift) - 1);
786 	return 0;
787 }
788 
789 /**
790  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
791  *	@xfer_mode: XFER_* of interest
792  *
793  *	Return matching xfer_shift for @xfer_mode.
794  *
795  *	LOCKING:
796  *	None.
797  *
798  *	RETURNS:
799  *	Matching xfer_shift, -1 if no match found.
800  */
801 int ata_xfer_mode2shift(unsigned long xfer_mode)
802 {
803 	const struct ata_xfer_ent *ent;
804 
805 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
806 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
807 			return ent->shift;
808 	return -1;
809 }
810 
811 /**
812  *	ata_mode_string - convert xfer_mask to string
813  *	@xfer_mask: mask of bits supported; only highest bit counts.
814  *
815  *	Determine string which represents the highest speed
816  *	(highest bit in @modemask).
817  *
818  *	LOCKING:
819  *	None.
820  *
821  *	RETURNS:
822  *	Constant C string representing highest speed listed in
823  *	@mode_mask, or the constant C string "<n/a>".
824  */
825 const char *ata_mode_string(unsigned long xfer_mask)
826 {
827 	static const char * const xfer_mode_str[] = {
828 		"PIO0",
829 		"PIO1",
830 		"PIO2",
831 		"PIO3",
832 		"PIO4",
833 		"PIO5",
834 		"PIO6",
835 		"MWDMA0",
836 		"MWDMA1",
837 		"MWDMA2",
838 		"MWDMA3",
839 		"MWDMA4",
840 		"UDMA/16",
841 		"UDMA/25",
842 		"UDMA/33",
843 		"UDMA/44",
844 		"UDMA/66",
845 		"UDMA/100",
846 		"UDMA/133",
847 		"UDMA7",
848 	};
849 	int highbit;
850 
851 	highbit = fls(xfer_mask) - 1;
852 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
853 		return xfer_mode_str[highbit];
854 	return "<n/a>";
855 }
856 
857 static const char *sata_spd_string(unsigned int spd)
858 {
859 	static const char * const spd_str[] = {
860 		"1.5 Gbps",
861 		"3.0 Gbps",
862 	};
863 
864 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
865 		return "<unknown>";
866 	return spd_str[spd - 1];
867 }
868 
869 void ata_dev_disable(struct ata_device *dev)
870 {
871 	if (ata_dev_enabled(dev)) {
872 		if (ata_msg_drv(dev->link->ap))
873 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
874 		ata_acpi_on_disable(dev);
875 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
876 					     ATA_DNXFER_QUIET);
877 		dev->class++;
878 	}
879 }
880 
881 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
882 {
883 	struct ata_link *link = dev->link;
884 	struct ata_port *ap = link->ap;
885 	u32 scontrol;
886 	unsigned int err_mask;
887 	int rc;
888 
889 	/*
890 	 * disallow DIPM for drivers which haven't set
891 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
892 	 * phy ready will be set in the interrupt status on
893 	 * state changes, which will cause some drivers to
894 	 * think there are errors - additionally drivers will
895 	 * need to disable hot plug.
896 	 */
897 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
898 		ap->pm_policy = NOT_AVAILABLE;
899 		return -EINVAL;
900 	}
901 
902 	/*
903 	 * For DIPM, we will only enable it for the
904 	 * min_power setting.
905 	 *
906 	 * Why?  Because Disks are too stupid to know that
907 	 * If the host rejects a request to go to SLUMBER
908 	 * they should retry at PARTIAL, and instead it
909 	 * just would give up.  So, for medium_power to
910 	 * work at all, we need to only allow HIPM.
911 	 */
912 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
913 	if (rc)
914 		return rc;
915 
916 	switch (policy) {
917 	case MIN_POWER:
918 		/* no restrictions on IPM transitions */
919 		scontrol &= ~(0x3 << 8);
920 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
921 		if (rc)
922 			return rc;
923 
924 		/* enable DIPM */
925 		if (dev->flags & ATA_DFLAG_DIPM)
926 			err_mask = ata_dev_set_feature(dev,
927 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
928 		break;
929 	case MEDIUM_POWER:
930 		/* allow IPM to PARTIAL */
931 		scontrol &= ~(0x1 << 8);
932 		scontrol |= (0x2 << 8);
933 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
934 		if (rc)
935 			return rc;
936 
937 		/*
938 		 * we don't have to disable DIPM since IPM flags
939 		 * disallow transitions to SLUMBER, which effectively
940 		 * disable DIPM if it does not support PARTIAL
941 		 */
942 		break;
943 	case NOT_AVAILABLE:
944 	case MAX_PERFORMANCE:
945 		/* disable all IPM transitions */
946 		scontrol |= (0x3 << 8);
947 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
948 		if (rc)
949 			return rc;
950 
951 		/*
952 		 * we don't have to disable DIPM since IPM flags
953 		 * disallow all transitions which effectively
954 		 * disable DIPM anyway.
955 		 */
956 		break;
957 	}
958 
959 	/* FIXME: handle SET FEATURES failure */
960 	(void) err_mask;
961 
962 	return 0;
963 }
964 
965 /**
966  *	ata_dev_enable_pm - enable SATA interface power management
967  *	@dev:  device to enable power management
968  *	@policy: the link power management policy
969  *
970  *	Enable SATA Interface power management.  This will enable
971  *	Device Interface Power Management (DIPM) for min_power
972  * 	policy, and then call driver specific callbacks for
973  *	enabling Host Initiated Power management.
974  *
975  *	Locking: Caller.
976  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
977  */
978 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
979 {
980 	int rc = 0;
981 	struct ata_port *ap = dev->link->ap;
982 
983 	/* set HIPM first, then DIPM */
984 	if (ap->ops->enable_pm)
985 		rc = ap->ops->enable_pm(ap, policy);
986 	if (rc)
987 		goto enable_pm_out;
988 	rc = ata_dev_set_dipm(dev, policy);
989 
990 enable_pm_out:
991 	if (rc)
992 		ap->pm_policy = MAX_PERFORMANCE;
993 	else
994 		ap->pm_policy = policy;
995 	return /* rc */;	/* hopefully we can use 'rc' eventually */
996 }
997 
998 #ifdef CONFIG_PM
999 /**
1000  *	ata_dev_disable_pm - disable SATA interface power management
1001  *	@dev: device to disable power management
1002  *
1003  *	Disable SATA Interface power management.  This will disable
1004  *	Device Interface Power Management (DIPM) without changing
1005  * 	policy,  call driver specific callbacks for disabling Host
1006  * 	Initiated Power management.
1007  *
1008  *	Locking: Caller.
1009  *	Returns: void
1010  */
1011 static void ata_dev_disable_pm(struct ata_device *dev)
1012 {
1013 	struct ata_port *ap = dev->link->ap;
1014 
1015 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1016 	if (ap->ops->disable_pm)
1017 		ap->ops->disable_pm(ap);
1018 }
1019 #endif	/* CONFIG_PM */
1020 
1021 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1022 {
1023 	ap->pm_policy = policy;
1024 	ap->link.eh_info.action |= ATA_EH_LPM;
1025 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1026 	ata_port_schedule_eh(ap);
1027 }
1028 
1029 #ifdef CONFIG_PM
1030 static void ata_lpm_enable(struct ata_host *host)
1031 {
1032 	struct ata_link *link;
1033 	struct ata_port *ap;
1034 	struct ata_device *dev;
1035 	int i;
1036 
1037 	for (i = 0; i < host->n_ports; i++) {
1038 		ap = host->ports[i];
1039 		ata_port_for_each_link(link, ap) {
1040 			ata_link_for_each_dev(dev, link)
1041 				ata_dev_disable_pm(dev);
1042 		}
1043 	}
1044 }
1045 
1046 static void ata_lpm_disable(struct ata_host *host)
1047 {
1048 	int i;
1049 
1050 	for (i = 0; i < host->n_ports; i++) {
1051 		struct ata_port *ap = host->ports[i];
1052 		ata_lpm_schedule(ap, ap->pm_policy);
1053 	}
1054 }
1055 #endif	/* CONFIG_PM */
1056 
1057 /**
1058  *	ata_dev_classify - determine device type based on ATA-spec signature
1059  *	@tf: ATA taskfile register set for device to be identified
1060  *
1061  *	Determine from taskfile register contents whether a device is
1062  *	ATA or ATAPI, as per "Signature and persistence" section
1063  *	of ATA/PI spec (volume 1, sect 5.14).
1064  *
1065  *	LOCKING:
1066  *	None.
1067  *
1068  *	RETURNS:
1069  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1070  *	%ATA_DEV_UNKNOWN the event of failure.
1071  */
1072 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1073 {
1074 	/* Apple's open source Darwin code hints that some devices only
1075 	 * put a proper signature into the LBA mid/high registers,
1076 	 * So, we only check those.  It's sufficient for uniqueness.
1077 	 *
1078 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1079 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1080 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1081 	 * spec has never mentioned about using different signatures
1082 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1083 	 * Multiplier specification began to use 0x69/0x96 to identify
1084 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1085 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1086 	 * 0x69/0x96 shortly and described them as reserved for
1087 	 * SerialATA.
1088 	 *
1089 	 * We follow the current spec and consider that 0x69/0x96
1090 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1091 	 */
1092 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1093 		DPRINTK("found ATA device by sig\n");
1094 		return ATA_DEV_ATA;
1095 	}
1096 
1097 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1098 		DPRINTK("found ATAPI device by sig\n");
1099 		return ATA_DEV_ATAPI;
1100 	}
1101 
1102 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1103 		DPRINTK("found PMP device by sig\n");
1104 		return ATA_DEV_PMP;
1105 	}
1106 
1107 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1108 		printk(KERN_INFO "ata: SEMB device ignored\n");
1109 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1110 	}
1111 
1112 	DPRINTK("unknown device\n");
1113 	return ATA_DEV_UNKNOWN;
1114 }
1115 
1116 /**
1117  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1118  *	@id: IDENTIFY DEVICE results we will examine
1119  *	@s: string into which data is output
1120  *	@ofs: offset into identify device page
1121  *	@len: length of string to return. must be an even number.
1122  *
1123  *	The strings in the IDENTIFY DEVICE page are broken up into
1124  *	16-bit chunks.  Run through the string, and output each
1125  *	8-bit chunk linearly, regardless of platform.
1126  *
1127  *	LOCKING:
1128  *	caller.
1129  */
1130 
1131 void ata_id_string(const u16 *id, unsigned char *s,
1132 		   unsigned int ofs, unsigned int len)
1133 {
1134 	unsigned int c;
1135 
1136 	while (len > 0) {
1137 		c = id[ofs] >> 8;
1138 		*s = c;
1139 		s++;
1140 
1141 		c = id[ofs] & 0xff;
1142 		*s = c;
1143 		s++;
1144 
1145 		ofs++;
1146 		len -= 2;
1147 	}
1148 }
1149 
1150 /**
1151  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1152  *	@id: IDENTIFY DEVICE results we will examine
1153  *	@s: string into which data is output
1154  *	@ofs: offset into identify device page
1155  *	@len: length of string to return. must be an odd number.
1156  *
1157  *	This function is identical to ata_id_string except that it
1158  *	trims trailing spaces and terminates the resulting string with
1159  *	null.  @len must be actual maximum length (even number) + 1.
1160  *
1161  *	LOCKING:
1162  *	caller.
1163  */
1164 void ata_id_c_string(const u16 *id, unsigned char *s,
1165 		     unsigned int ofs, unsigned int len)
1166 {
1167 	unsigned char *p;
1168 
1169 	WARN_ON(!(len & 1));
1170 
1171 	ata_id_string(id, s, ofs, len - 1);
1172 
1173 	p = s + strnlen(s, len - 1);
1174 	while (p > s && p[-1] == ' ')
1175 		p--;
1176 	*p = '\0';
1177 }
1178 
1179 static u64 ata_id_n_sectors(const u16 *id)
1180 {
1181 	if (ata_id_has_lba(id)) {
1182 		if (ata_id_has_lba48(id))
1183 			return ata_id_u64(id, 100);
1184 		else
1185 			return ata_id_u32(id, 60);
1186 	} else {
1187 		if (ata_id_current_chs_valid(id))
1188 			return ata_id_u32(id, 57);
1189 		else
1190 			return id[1] * id[3] * id[6];
1191 	}
1192 }
1193 
1194 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1195 {
1196 	u64 sectors = 0;
1197 
1198 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1199 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1200 	sectors |= (tf->hob_lbal & 0xff) << 24;
1201 	sectors |= (tf->lbah & 0xff) << 16;
1202 	sectors |= (tf->lbam & 0xff) << 8;
1203 	sectors |= (tf->lbal & 0xff);
1204 
1205 	return sectors;
1206 }
1207 
1208 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1209 {
1210 	u64 sectors = 0;
1211 
1212 	sectors |= (tf->device & 0x0f) << 24;
1213 	sectors |= (tf->lbah & 0xff) << 16;
1214 	sectors |= (tf->lbam & 0xff) << 8;
1215 	sectors |= (tf->lbal & 0xff);
1216 
1217 	return sectors;
1218 }
1219 
1220 /**
1221  *	ata_read_native_max_address - Read native max address
1222  *	@dev: target device
1223  *	@max_sectors: out parameter for the result native max address
1224  *
1225  *	Perform an LBA48 or LBA28 native size query upon the device in
1226  *	question.
1227  *
1228  *	RETURNS:
1229  *	0 on success, -EACCES if command is aborted by the drive.
1230  *	-EIO on other errors.
1231  */
1232 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1233 {
1234 	unsigned int err_mask;
1235 	struct ata_taskfile tf;
1236 	int lba48 = ata_id_has_lba48(dev->id);
1237 
1238 	ata_tf_init(dev, &tf);
1239 
1240 	/* always clear all address registers */
1241 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1242 
1243 	if (lba48) {
1244 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1245 		tf.flags |= ATA_TFLAG_LBA48;
1246 	} else
1247 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1248 
1249 	tf.protocol |= ATA_PROT_NODATA;
1250 	tf.device |= ATA_LBA;
1251 
1252 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1253 	if (err_mask) {
1254 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1255 			       "max address (err_mask=0x%x)\n", err_mask);
1256 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1257 			return -EACCES;
1258 		return -EIO;
1259 	}
1260 
1261 	if (lba48)
1262 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1263 	else
1264 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1265 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1266 		(*max_sectors)--;
1267 	return 0;
1268 }
1269 
1270 /**
1271  *	ata_set_max_sectors - Set max sectors
1272  *	@dev: target device
1273  *	@new_sectors: new max sectors value to set for the device
1274  *
1275  *	Set max sectors of @dev to @new_sectors.
1276  *
1277  *	RETURNS:
1278  *	0 on success, -EACCES if command is aborted or denied (due to
1279  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1280  *	errors.
1281  */
1282 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1283 {
1284 	unsigned int err_mask;
1285 	struct ata_taskfile tf;
1286 	int lba48 = ata_id_has_lba48(dev->id);
1287 
1288 	new_sectors--;
1289 
1290 	ata_tf_init(dev, &tf);
1291 
1292 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1293 
1294 	if (lba48) {
1295 		tf.command = ATA_CMD_SET_MAX_EXT;
1296 		tf.flags |= ATA_TFLAG_LBA48;
1297 
1298 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1299 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1300 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1301 	} else {
1302 		tf.command = ATA_CMD_SET_MAX;
1303 
1304 		tf.device |= (new_sectors >> 24) & 0xf;
1305 	}
1306 
1307 	tf.protocol |= ATA_PROT_NODATA;
1308 	tf.device |= ATA_LBA;
1309 
1310 	tf.lbal = (new_sectors >> 0) & 0xff;
1311 	tf.lbam = (new_sectors >> 8) & 0xff;
1312 	tf.lbah = (new_sectors >> 16) & 0xff;
1313 
1314 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1315 	if (err_mask) {
1316 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1317 			       "max address (err_mask=0x%x)\n", err_mask);
1318 		if (err_mask == AC_ERR_DEV &&
1319 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1320 			return -EACCES;
1321 		return -EIO;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 /**
1328  *	ata_hpa_resize		-	Resize a device with an HPA set
1329  *	@dev: Device to resize
1330  *
1331  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1332  *	it if required to the full size of the media. The caller must check
1333  *	the drive has the HPA feature set enabled.
1334  *
1335  *	RETURNS:
1336  *	0 on success, -errno on failure.
1337  */
1338 static int ata_hpa_resize(struct ata_device *dev)
1339 {
1340 	struct ata_eh_context *ehc = &dev->link->eh_context;
1341 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1342 	u64 sectors = ata_id_n_sectors(dev->id);
1343 	u64 native_sectors;
1344 	int rc;
1345 
1346 	/* do we need to do it? */
1347 	if (dev->class != ATA_DEV_ATA ||
1348 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1349 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1350 		return 0;
1351 
1352 	/* read native max address */
1353 	rc = ata_read_native_max_address(dev, &native_sectors);
1354 	if (rc) {
1355 		/* If device aborted the command or HPA isn't going to
1356 		 * be unlocked, skip HPA resizing.
1357 		 */
1358 		if (rc == -EACCES || !ata_ignore_hpa) {
1359 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1360 				       "broken, skipping HPA handling\n");
1361 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1362 
1363 			/* we can continue if device aborted the command */
1364 			if (rc == -EACCES)
1365 				rc = 0;
1366 		}
1367 
1368 		return rc;
1369 	}
1370 
1371 	/* nothing to do? */
1372 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1373 		if (!print_info || native_sectors == sectors)
1374 			return 0;
1375 
1376 		if (native_sectors > sectors)
1377 			ata_dev_printk(dev, KERN_INFO,
1378 				"HPA detected: current %llu, native %llu\n",
1379 				(unsigned long long)sectors,
1380 				(unsigned long long)native_sectors);
1381 		else if (native_sectors < sectors)
1382 			ata_dev_printk(dev, KERN_WARNING,
1383 				"native sectors (%llu) is smaller than "
1384 				"sectors (%llu)\n",
1385 				(unsigned long long)native_sectors,
1386 				(unsigned long long)sectors);
1387 		return 0;
1388 	}
1389 
1390 	/* let's unlock HPA */
1391 	rc = ata_set_max_sectors(dev, native_sectors);
1392 	if (rc == -EACCES) {
1393 		/* if device aborted the command, skip HPA resizing */
1394 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1395 			       "(%llu -> %llu), skipping HPA handling\n",
1396 			       (unsigned long long)sectors,
1397 			       (unsigned long long)native_sectors);
1398 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1399 		return 0;
1400 	} else if (rc)
1401 		return rc;
1402 
1403 	/* re-read IDENTIFY data */
1404 	rc = ata_dev_reread_id(dev, 0);
1405 	if (rc) {
1406 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1407 			       "data after HPA resizing\n");
1408 		return rc;
1409 	}
1410 
1411 	if (print_info) {
1412 		u64 new_sectors = ata_id_n_sectors(dev->id);
1413 		ata_dev_printk(dev, KERN_INFO,
1414 			"HPA unlocked: %llu -> %llu, native %llu\n",
1415 			(unsigned long long)sectors,
1416 			(unsigned long long)new_sectors,
1417 			(unsigned long long)native_sectors);
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 /**
1424  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1425  *	@id: IDENTIFY DEVICE page to dump
1426  *
1427  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1428  *	page.
1429  *
1430  *	LOCKING:
1431  *	caller.
1432  */
1433 
1434 static inline void ata_dump_id(const u16 *id)
1435 {
1436 	DPRINTK("49==0x%04x  "
1437 		"53==0x%04x  "
1438 		"63==0x%04x  "
1439 		"64==0x%04x  "
1440 		"75==0x%04x  \n",
1441 		id[49],
1442 		id[53],
1443 		id[63],
1444 		id[64],
1445 		id[75]);
1446 	DPRINTK("80==0x%04x  "
1447 		"81==0x%04x  "
1448 		"82==0x%04x  "
1449 		"83==0x%04x  "
1450 		"84==0x%04x  \n",
1451 		id[80],
1452 		id[81],
1453 		id[82],
1454 		id[83],
1455 		id[84]);
1456 	DPRINTK("88==0x%04x  "
1457 		"93==0x%04x\n",
1458 		id[88],
1459 		id[93]);
1460 }
1461 
1462 /**
1463  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1464  *	@id: IDENTIFY data to compute xfer mask from
1465  *
1466  *	Compute the xfermask for this device. This is not as trivial
1467  *	as it seems if we must consider early devices correctly.
1468  *
1469  *	FIXME: pre IDE drive timing (do we care ?).
1470  *
1471  *	LOCKING:
1472  *	None.
1473  *
1474  *	RETURNS:
1475  *	Computed xfermask
1476  */
1477 unsigned long ata_id_xfermask(const u16 *id)
1478 {
1479 	unsigned long pio_mask, mwdma_mask, udma_mask;
1480 
1481 	/* Usual case. Word 53 indicates word 64 is valid */
1482 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1483 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1484 		pio_mask <<= 3;
1485 		pio_mask |= 0x7;
1486 	} else {
1487 		/* If word 64 isn't valid then Word 51 high byte holds
1488 		 * the PIO timing number for the maximum. Turn it into
1489 		 * a mask.
1490 		 */
1491 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1492 		if (mode < 5)	/* Valid PIO range */
1493 			pio_mask = (2 << mode) - 1;
1494 		else
1495 			pio_mask = 1;
1496 
1497 		/* But wait.. there's more. Design your standards by
1498 		 * committee and you too can get a free iordy field to
1499 		 * process. However its the speeds not the modes that
1500 		 * are supported... Note drivers using the timing API
1501 		 * will get this right anyway
1502 		 */
1503 	}
1504 
1505 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1506 
1507 	if (ata_id_is_cfa(id)) {
1508 		/*
1509 		 *	Process compact flash extended modes
1510 		 */
1511 		int pio = id[163] & 0x7;
1512 		int dma = (id[163] >> 3) & 7;
1513 
1514 		if (pio)
1515 			pio_mask |= (1 << 5);
1516 		if (pio > 1)
1517 			pio_mask |= (1 << 6);
1518 		if (dma)
1519 			mwdma_mask |= (1 << 3);
1520 		if (dma > 1)
1521 			mwdma_mask |= (1 << 4);
1522 	}
1523 
1524 	udma_mask = 0;
1525 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1526 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1527 
1528 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1529 }
1530 
1531 /**
1532  *	ata_pio_queue_task - Queue port_task
1533  *	@ap: The ata_port to queue port_task for
1534  *	@fn: workqueue function to be scheduled
1535  *	@data: data for @fn to use
1536  *	@delay: delay time for workqueue function
1537  *
1538  *	Schedule @fn(@data) for execution after @delay jiffies using
1539  *	port_task.  There is one port_task per port and it's the
1540  *	user(low level driver)'s responsibility to make sure that only
1541  *	one task is active at any given time.
1542  *
1543  *	libata core layer takes care of synchronization between
1544  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1545  *	synchronization.
1546  *
1547  *	LOCKING:
1548  *	Inherited from caller.
1549  */
1550 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1551 {
1552 	ap->port_task_data = data;
1553 
1554 	/* may fail if ata_port_flush_task() in progress */
1555 	queue_delayed_work(ata_wq, &ap->port_task, delay);
1556 }
1557 
1558 /**
1559  *	ata_port_flush_task - Flush port_task
1560  *	@ap: The ata_port to flush port_task for
1561  *
1562  *	After this function completes, port_task is guranteed not to
1563  *	be running or scheduled.
1564  *
1565  *	LOCKING:
1566  *	Kernel thread context (may sleep)
1567  */
1568 void ata_port_flush_task(struct ata_port *ap)
1569 {
1570 	DPRINTK("ENTER\n");
1571 
1572 	cancel_rearming_delayed_work(&ap->port_task);
1573 
1574 	if (ata_msg_ctl(ap))
1575 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1576 }
1577 
1578 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1579 {
1580 	struct completion *waiting = qc->private_data;
1581 
1582 	complete(waiting);
1583 }
1584 
1585 /**
1586  *	ata_exec_internal_sg - execute libata internal command
1587  *	@dev: Device to which the command is sent
1588  *	@tf: Taskfile registers for the command and the result
1589  *	@cdb: CDB for packet command
1590  *	@dma_dir: Data tranfer direction of the command
1591  *	@sgl: sg list for the data buffer of the command
1592  *	@n_elem: Number of sg entries
1593  *	@timeout: Timeout in msecs (0 for default)
1594  *
1595  *	Executes libata internal command with timeout.  @tf contains
1596  *	command on entry and result on return.  Timeout and error
1597  *	conditions are reported via return value.  No recovery action
1598  *	is taken after a command times out.  It's caller's duty to
1599  *	clean up after timeout.
1600  *
1601  *	LOCKING:
1602  *	None.  Should be called with kernel context, might sleep.
1603  *
1604  *	RETURNS:
1605  *	Zero on success, AC_ERR_* mask on failure
1606  */
1607 unsigned ata_exec_internal_sg(struct ata_device *dev,
1608 			      struct ata_taskfile *tf, const u8 *cdb,
1609 			      int dma_dir, struct scatterlist *sgl,
1610 			      unsigned int n_elem, unsigned long timeout)
1611 {
1612 	struct ata_link *link = dev->link;
1613 	struct ata_port *ap = link->ap;
1614 	u8 command = tf->command;
1615 	struct ata_queued_cmd *qc;
1616 	unsigned int tag, preempted_tag;
1617 	u32 preempted_sactive, preempted_qc_active;
1618 	int preempted_nr_active_links;
1619 	DECLARE_COMPLETION_ONSTACK(wait);
1620 	unsigned long flags;
1621 	unsigned int err_mask;
1622 	int rc;
1623 
1624 	spin_lock_irqsave(ap->lock, flags);
1625 
1626 	/* no internal command while frozen */
1627 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1628 		spin_unlock_irqrestore(ap->lock, flags);
1629 		return AC_ERR_SYSTEM;
1630 	}
1631 
1632 	/* initialize internal qc */
1633 
1634 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1635 	 * drivers choke if any other tag is given.  This breaks
1636 	 * ata_tag_internal() test for those drivers.  Don't use new
1637 	 * EH stuff without converting to it.
1638 	 */
1639 	if (ap->ops->error_handler)
1640 		tag = ATA_TAG_INTERNAL;
1641 	else
1642 		tag = 0;
1643 
1644 	if (test_and_set_bit(tag, &ap->qc_allocated))
1645 		BUG();
1646 	qc = __ata_qc_from_tag(ap, tag);
1647 
1648 	qc->tag = tag;
1649 	qc->scsicmd = NULL;
1650 	qc->ap = ap;
1651 	qc->dev = dev;
1652 	ata_qc_reinit(qc);
1653 
1654 	preempted_tag = link->active_tag;
1655 	preempted_sactive = link->sactive;
1656 	preempted_qc_active = ap->qc_active;
1657 	preempted_nr_active_links = ap->nr_active_links;
1658 	link->active_tag = ATA_TAG_POISON;
1659 	link->sactive = 0;
1660 	ap->qc_active = 0;
1661 	ap->nr_active_links = 0;
1662 
1663 	/* prepare & issue qc */
1664 	qc->tf = *tf;
1665 	if (cdb)
1666 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1667 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1668 	qc->dma_dir = dma_dir;
1669 	if (dma_dir != DMA_NONE) {
1670 		unsigned int i, buflen = 0;
1671 		struct scatterlist *sg;
1672 
1673 		for_each_sg(sgl, sg, n_elem, i)
1674 			buflen += sg->length;
1675 
1676 		ata_sg_init(qc, sgl, n_elem);
1677 		qc->nbytes = buflen;
1678 	}
1679 
1680 	qc->private_data = &wait;
1681 	qc->complete_fn = ata_qc_complete_internal;
1682 
1683 	ata_qc_issue(qc);
1684 
1685 	spin_unlock_irqrestore(ap->lock, flags);
1686 
1687 	if (!timeout)
1688 		timeout = ata_probe_timeout * 1000 / HZ;
1689 
1690 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1691 
1692 	ata_port_flush_task(ap);
1693 
1694 	if (!rc) {
1695 		spin_lock_irqsave(ap->lock, flags);
1696 
1697 		/* We're racing with irq here.  If we lose, the
1698 		 * following test prevents us from completing the qc
1699 		 * twice.  If we win, the port is frozen and will be
1700 		 * cleaned up by ->post_internal_cmd().
1701 		 */
1702 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1703 			qc->err_mask |= AC_ERR_TIMEOUT;
1704 
1705 			if (ap->ops->error_handler)
1706 				ata_port_freeze(ap);
1707 			else
1708 				ata_qc_complete(qc);
1709 
1710 			if (ata_msg_warn(ap))
1711 				ata_dev_printk(dev, KERN_WARNING,
1712 					"qc timeout (cmd 0x%x)\n", command);
1713 		}
1714 
1715 		spin_unlock_irqrestore(ap->lock, flags);
1716 	}
1717 
1718 	/* do post_internal_cmd */
1719 	if (ap->ops->post_internal_cmd)
1720 		ap->ops->post_internal_cmd(qc);
1721 
1722 	/* perform minimal error analysis */
1723 	if (qc->flags & ATA_QCFLAG_FAILED) {
1724 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1725 			qc->err_mask |= AC_ERR_DEV;
1726 
1727 		if (!qc->err_mask)
1728 			qc->err_mask |= AC_ERR_OTHER;
1729 
1730 		if (qc->err_mask & ~AC_ERR_OTHER)
1731 			qc->err_mask &= ~AC_ERR_OTHER;
1732 	}
1733 
1734 	/* finish up */
1735 	spin_lock_irqsave(ap->lock, flags);
1736 
1737 	*tf = qc->result_tf;
1738 	err_mask = qc->err_mask;
1739 
1740 	ata_qc_free(qc);
1741 	link->active_tag = preempted_tag;
1742 	link->sactive = preempted_sactive;
1743 	ap->qc_active = preempted_qc_active;
1744 	ap->nr_active_links = preempted_nr_active_links;
1745 
1746 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1747 	 * Until those drivers are fixed, we detect the condition
1748 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1749 	 * port.
1750 	 *
1751 	 * Note that this doesn't change any behavior as internal
1752 	 * command failure results in disabling the device in the
1753 	 * higher layer for LLDDs without new reset/EH callbacks.
1754 	 *
1755 	 * Kill the following code as soon as those drivers are fixed.
1756 	 */
1757 	if (ap->flags & ATA_FLAG_DISABLED) {
1758 		err_mask |= AC_ERR_SYSTEM;
1759 		ata_port_probe(ap);
1760 	}
1761 
1762 	spin_unlock_irqrestore(ap->lock, flags);
1763 
1764 	return err_mask;
1765 }
1766 
1767 /**
1768  *	ata_exec_internal - execute libata internal command
1769  *	@dev: Device to which the command is sent
1770  *	@tf: Taskfile registers for the command and the result
1771  *	@cdb: CDB for packet command
1772  *	@dma_dir: Data tranfer direction of the command
1773  *	@buf: Data buffer of the command
1774  *	@buflen: Length of data buffer
1775  *	@timeout: Timeout in msecs (0 for default)
1776  *
1777  *	Wrapper around ata_exec_internal_sg() which takes simple
1778  *	buffer instead of sg list.
1779  *
1780  *	LOCKING:
1781  *	None.  Should be called with kernel context, might sleep.
1782  *
1783  *	RETURNS:
1784  *	Zero on success, AC_ERR_* mask on failure
1785  */
1786 unsigned ata_exec_internal(struct ata_device *dev,
1787 			   struct ata_taskfile *tf, const u8 *cdb,
1788 			   int dma_dir, void *buf, unsigned int buflen,
1789 			   unsigned long timeout)
1790 {
1791 	struct scatterlist *psg = NULL, sg;
1792 	unsigned int n_elem = 0;
1793 
1794 	if (dma_dir != DMA_NONE) {
1795 		WARN_ON(!buf);
1796 		sg_init_one(&sg, buf, buflen);
1797 		psg = &sg;
1798 		n_elem++;
1799 	}
1800 
1801 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1802 				    timeout);
1803 }
1804 
1805 /**
1806  *	ata_do_simple_cmd - execute simple internal command
1807  *	@dev: Device to which the command is sent
1808  *	@cmd: Opcode to execute
1809  *
1810  *	Execute a 'simple' command, that only consists of the opcode
1811  *	'cmd' itself, without filling any other registers
1812  *
1813  *	LOCKING:
1814  *	Kernel thread context (may sleep).
1815  *
1816  *	RETURNS:
1817  *	Zero on success, AC_ERR_* mask on failure
1818  */
1819 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1820 {
1821 	struct ata_taskfile tf;
1822 
1823 	ata_tf_init(dev, &tf);
1824 
1825 	tf.command = cmd;
1826 	tf.flags |= ATA_TFLAG_DEVICE;
1827 	tf.protocol = ATA_PROT_NODATA;
1828 
1829 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1830 }
1831 
1832 /**
1833  *	ata_pio_need_iordy	-	check if iordy needed
1834  *	@adev: ATA device
1835  *
1836  *	Check if the current speed of the device requires IORDY. Used
1837  *	by various controllers for chip configuration.
1838  */
1839 
1840 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1841 {
1842 	/* Controller doesn't support  IORDY. Probably a pointless check
1843 	   as the caller should know this */
1844 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1845 		return 0;
1846 	/* PIO3 and higher it is mandatory */
1847 	if (adev->pio_mode > XFER_PIO_2)
1848 		return 1;
1849 	/* We turn it on when possible */
1850 	if (ata_id_has_iordy(adev->id))
1851 		return 1;
1852 	return 0;
1853 }
1854 
1855 /**
1856  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1857  *	@adev: ATA device
1858  *
1859  *	Compute the highest mode possible if we are not using iordy. Return
1860  *	-1 if no iordy mode is available.
1861  */
1862 
1863 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1864 {
1865 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1866 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1867 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1868 		/* Is the speed faster than the drive allows non IORDY ? */
1869 		if (pio) {
1870 			/* This is cycle times not frequency - watch the logic! */
1871 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1872 				return 3 << ATA_SHIFT_PIO;
1873 			return 7 << ATA_SHIFT_PIO;
1874 		}
1875 	}
1876 	return 3 << ATA_SHIFT_PIO;
1877 }
1878 
1879 /**
1880  *	ata_dev_read_id - Read ID data from the specified device
1881  *	@dev: target device
1882  *	@p_class: pointer to class of the target device (may be changed)
1883  *	@flags: ATA_READID_* flags
1884  *	@id: buffer to read IDENTIFY data into
1885  *
1886  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1887  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1888  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1889  *	for pre-ATA4 drives.
1890  *
1891  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1892  *	now we abort if we hit that case.
1893  *
1894  *	LOCKING:
1895  *	Kernel thread context (may sleep)
1896  *
1897  *	RETURNS:
1898  *	0 on success, -errno otherwise.
1899  */
1900 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1901 		    unsigned int flags, u16 *id)
1902 {
1903 	struct ata_port *ap = dev->link->ap;
1904 	unsigned int class = *p_class;
1905 	struct ata_taskfile tf;
1906 	unsigned int err_mask = 0;
1907 	const char *reason;
1908 	int may_fallback = 1, tried_spinup = 0;
1909 	int rc;
1910 
1911 	if (ata_msg_ctl(ap))
1912 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1913 
1914  retry:
1915 	ata_tf_init(dev, &tf);
1916 
1917 	switch (class) {
1918 	case ATA_DEV_ATA:
1919 		tf.command = ATA_CMD_ID_ATA;
1920 		break;
1921 	case ATA_DEV_ATAPI:
1922 		tf.command = ATA_CMD_ID_ATAPI;
1923 		break;
1924 	default:
1925 		rc = -ENODEV;
1926 		reason = "unsupported class";
1927 		goto err_out;
1928 	}
1929 
1930 	tf.protocol = ATA_PROT_PIO;
1931 
1932 	/* Some devices choke if TF registers contain garbage.  Make
1933 	 * sure those are properly initialized.
1934 	 */
1935 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1936 
1937 	/* Device presence detection is unreliable on some
1938 	 * controllers.  Always poll IDENTIFY if available.
1939 	 */
1940 	tf.flags |= ATA_TFLAG_POLLING;
1941 
1942 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1943 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1944 	if (err_mask) {
1945 		if (err_mask & AC_ERR_NODEV_HINT) {
1946 			ata_dev_printk(dev, KERN_DEBUG,
1947 				       "NODEV after polling detection\n");
1948 			return -ENOENT;
1949 		}
1950 
1951 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1952 			/* Device or controller might have reported
1953 			 * the wrong device class.  Give a shot at the
1954 			 * other IDENTIFY if the current one is
1955 			 * aborted by the device.
1956 			 */
1957 			if (may_fallback) {
1958 				may_fallback = 0;
1959 
1960 				if (class == ATA_DEV_ATA)
1961 					class = ATA_DEV_ATAPI;
1962 				else
1963 					class = ATA_DEV_ATA;
1964 				goto retry;
1965 			}
1966 
1967 			/* Control reaches here iff the device aborted
1968 			 * both flavors of IDENTIFYs which happens
1969 			 * sometimes with phantom devices.
1970 			 */
1971 			ata_dev_printk(dev, KERN_DEBUG,
1972 				       "both IDENTIFYs aborted, assuming NODEV\n");
1973 			return -ENOENT;
1974 		}
1975 
1976 		rc = -EIO;
1977 		reason = "I/O error";
1978 		goto err_out;
1979 	}
1980 
1981 	/* Falling back doesn't make sense if ID data was read
1982 	 * successfully at least once.
1983 	 */
1984 	may_fallback = 0;
1985 
1986 	swap_buf_le16(id, ATA_ID_WORDS);
1987 
1988 	/* sanity check */
1989 	rc = -EINVAL;
1990 	reason = "device reports invalid type";
1991 
1992 	if (class == ATA_DEV_ATA) {
1993 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1994 			goto err_out;
1995 	} else {
1996 		if (ata_id_is_ata(id))
1997 			goto err_out;
1998 	}
1999 
2000 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2001 		tried_spinup = 1;
2002 		/*
2003 		 * Drive powered-up in standby mode, and requires a specific
2004 		 * SET_FEATURES spin-up subcommand before it will accept
2005 		 * anything other than the original IDENTIFY command.
2006 		 */
2007 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2008 		if (err_mask && id[2] != 0x738c) {
2009 			rc = -EIO;
2010 			reason = "SPINUP failed";
2011 			goto err_out;
2012 		}
2013 		/*
2014 		 * If the drive initially returned incomplete IDENTIFY info,
2015 		 * we now must reissue the IDENTIFY command.
2016 		 */
2017 		if (id[2] == 0x37c8)
2018 			goto retry;
2019 	}
2020 
2021 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2022 		/*
2023 		 * The exact sequence expected by certain pre-ATA4 drives is:
2024 		 * SRST RESET
2025 		 * IDENTIFY (optional in early ATA)
2026 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2027 		 * anything else..
2028 		 * Some drives were very specific about that exact sequence.
2029 		 *
2030 		 * Note that ATA4 says lba is mandatory so the second check
2031 		 * shoud never trigger.
2032 		 */
2033 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2034 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2035 			if (err_mask) {
2036 				rc = -EIO;
2037 				reason = "INIT_DEV_PARAMS failed";
2038 				goto err_out;
2039 			}
2040 
2041 			/* current CHS translation info (id[53-58]) might be
2042 			 * changed. reread the identify device info.
2043 			 */
2044 			flags &= ~ATA_READID_POSTRESET;
2045 			goto retry;
2046 		}
2047 	}
2048 
2049 	*p_class = class;
2050 
2051 	return 0;
2052 
2053  err_out:
2054 	if (ata_msg_warn(ap))
2055 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2056 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2057 	return rc;
2058 }
2059 
2060 static inline u8 ata_dev_knobble(struct ata_device *dev)
2061 {
2062 	struct ata_port *ap = dev->link->ap;
2063 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2064 }
2065 
2066 static void ata_dev_config_ncq(struct ata_device *dev,
2067 			       char *desc, size_t desc_sz)
2068 {
2069 	struct ata_port *ap = dev->link->ap;
2070 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2071 
2072 	if (!ata_id_has_ncq(dev->id)) {
2073 		desc[0] = '\0';
2074 		return;
2075 	}
2076 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2077 		snprintf(desc, desc_sz, "NCQ (not used)");
2078 		return;
2079 	}
2080 	if (ap->flags & ATA_FLAG_NCQ) {
2081 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2082 		dev->flags |= ATA_DFLAG_NCQ;
2083 	}
2084 
2085 	if (hdepth >= ddepth)
2086 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2087 	else
2088 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2089 }
2090 
2091 /**
2092  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2093  *	@dev: Target device to configure
2094  *
2095  *	Configure @dev according to @dev->id.  Generic and low-level
2096  *	driver specific fixups are also applied.
2097  *
2098  *	LOCKING:
2099  *	Kernel thread context (may sleep)
2100  *
2101  *	RETURNS:
2102  *	0 on success, -errno otherwise
2103  */
2104 int ata_dev_configure(struct ata_device *dev)
2105 {
2106 	struct ata_port *ap = dev->link->ap;
2107 	struct ata_eh_context *ehc = &dev->link->eh_context;
2108 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2109 	const u16 *id = dev->id;
2110 	unsigned long xfer_mask;
2111 	char revbuf[7];		/* XYZ-99\0 */
2112 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2113 	char modelbuf[ATA_ID_PROD_LEN+1];
2114 	int rc;
2115 
2116 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2117 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2118 			       __func__);
2119 		return 0;
2120 	}
2121 
2122 	if (ata_msg_probe(ap))
2123 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2124 
2125 	/* set horkage */
2126 	dev->horkage |= ata_dev_blacklisted(dev);
2127 	ata_force_horkage(dev);
2128 
2129 	/* let ACPI work its magic */
2130 	rc = ata_acpi_on_devcfg(dev);
2131 	if (rc)
2132 		return rc;
2133 
2134 	/* massage HPA, do it early as it might change IDENTIFY data */
2135 	rc = ata_hpa_resize(dev);
2136 	if (rc)
2137 		return rc;
2138 
2139 	/* print device capabilities */
2140 	if (ata_msg_probe(ap))
2141 		ata_dev_printk(dev, KERN_DEBUG,
2142 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2143 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2144 			       __func__,
2145 			       id[49], id[82], id[83], id[84],
2146 			       id[85], id[86], id[87], id[88]);
2147 
2148 	/* initialize to-be-configured parameters */
2149 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2150 	dev->max_sectors = 0;
2151 	dev->cdb_len = 0;
2152 	dev->n_sectors = 0;
2153 	dev->cylinders = 0;
2154 	dev->heads = 0;
2155 	dev->sectors = 0;
2156 
2157 	/*
2158 	 * common ATA, ATAPI feature tests
2159 	 */
2160 
2161 	/* find max transfer mode; for printk only */
2162 	xfer_mask = ata_id_xfermask(id);
2163 
2164 	if (ata_msg_probe(ap))
2165 		ata_dump_id(id);
2166 
2167 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2168 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2169 			sizeof(fwrevbuf));
2170 
2171 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2172 			sizeof(modelbuf));
2173 
2174 	/* ATA-specific feature tests */
2175 	if (dev->class == ATA_DEV_ATA) {
2176 		if (ata_id_is_cfa(id)) {
2177 			if (id[162] & 1) /* CPRM may make this media unusable */
2178 				ata_dev_printk(dev, KERN_WARNING,
2179 					       "supports DRM functions and may "
2180 					       "not be fully accessable.\n");
2181 			snprintf(revbuf, 7, "CFA");
2182 		} else {
2183 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2184 			/* Warn the user if the device has TPM extensions */
2185 			if (ata_id_has_tpm(id))
2186 				ata_dev_printk(dev, KERN_WARNING,
2187 					       "supports DRM functions and may "
2188 					       "not be fully accessable.\n");
2189 		}
2190 
2191 		dev->n_sectors = ata_id_n_sectors(id);
2192 
2193 		if (dev->id[59] & 0x100)
2194 			dev->multi_count = dev->id[59] & 0xff;
2195 
2196 		if (ata_id_has_lba(id)) {
2197 			const char *lba_desc;
2198 			char ncq_desc[20];
2199 
2200 			lba_desc = "LBA";
2201 			dev->flags |= ATA_DFLAG_LBA;
2202 			if (ata_id_has_lba48(id)) {
2203 				dev->flags |= ATA_DFLAG_LBA48;
2204 				lba_desc = "LBA48";
2205 
2206 				if (dev->n_sectors >= (1UL << 28) &&
2207 				    ata_id_has_flush_ext(id))
2208 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2209 			}
2210 
2211 			/* config NCQ */
2212 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2213 
2214 			/* print device info to dmesg */
2215 			if (ata_msg_drv(ap) && print_info) {
2216 				ata_dev_printk(dev, KERN_INFO,
2217 					"%s: %s, %s, max %s\n",
2218 					revbuf, modelbuf, fwrevbuf,
2219 					ata_mode_string(xfer_mask));
2220 				ata_dev_printk(dev, KERN_INFO,
2221 					"%Lu sectors, multi %u: %s %s\n",
2222 					(unsigned long long)dev->n_sectors,
2223 					dev->multi_count, lba_desc, ncq_desc);
2224 			}
2225 		} else {
2226 			/* CHS */
2227 
2228 			/* Default translation */
2229 			dev->cylinders	= id[1];
2230 			dev->heads	= id[3];
2231 			dev->sectors	= id[6];
2232 
2233 			if (ata_id_current_chs_valid(id)) {
2234 				/* Current CHS translation is valid. */
2235 				dev->cylinders = id[54];
2236 				dev->heads     = id[55];
2237 				dev->sectors   = id[56];
2238 			}
2239 
2240 			/* print device info to dmesg */
2241 			if (ata_msg_drv(ap) && print_info) {
2242 				ata_dev_printk(dev, KERN_INFO,
2243 					"%s: %s, %s, max %s\n",
2244 					revbuf,	modelbuf, fwrevbuf,
2245 					ata_mode_string(xfer_mask));
2246 				ata_dev_printk(dev, KERN_INFO,
2247 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2248 					(unsigned long long)dev->n_sectors,
2249 					dev->multi_count, dev->cylinders,
2250 					dev->heads, dev->sectors);
2251 			}
2252 		}
2253 
2254 		dev->cdb_len = 16;
2255 	}
2256 
2257 	/* ATAPI-specific feature tests */
2258 	else if (dev->class == ATA_DEV_ATAPI) {
2259 		const char *cdb_intr_string = "";
2260 		const char *atapi_an_string = "";
2261 		const char *dma_dir_string = "";
2262 		u32 sntf;
2263 
2264 		rc = atapi_cdb_len(id);
2265 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2266 			if (ata_msg_warn(ap))
2267 				ata_dev_printk(dev, KERN_WARNING,
2268 					       "unsupported CDB len\n");
2269 			rc = -EINVAL;
2270 			goto err_out_nosup;
2271 		}
2272 		dev->cdb_len = (unsigned int) rc;
2273 
2274 		/* Enable ATAPI AN if both the host and device have
2275 		 * the support.  If PMP is attached, SNTF is required
2276 		 * to enable ATAPI AN to discern between PHY status
2277 		 * changed notifications and ATAPI ANs.
2278 		 */
2279 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2280 		    (!sata_pmp_attached(ap) ||
2281 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2282 			unsigned int err_mask;
2283 
2284 			/* issue SET feature command to turn this on */
2285 			err_mask = ata_dev_set_feature(dev,
2286 					SETFEATURES_SATA_ENABLE, SATA_AN);
2287 			if (err_mask)
2288 				ata_dev_printk(dev, KERN_ERR,
2289 					"failed to enable ATAPI AN "
2290 					"(err_mask=0x%x)\n", err_mask);
2291 			else {
2292 				dev->flags |= ATA_DFLAG_AN;
2293 				atapi_an_string = ", ATAPI AN";
2294 			}
2295 		}
2296 
2297 		if (ata_id_cdb_intr(dev->id)) {
2298 			dev->flags |= ATA_DFLAG_CDB_INTR;
2299 			cdb_intr_string = ", CDB intr";
2300 		}
2301 
2302 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2303 			dev->flags |= ATA_DFLAG_DMADIR;
2304 			dma_dir_string = ", DMADIR";
2305 		}
2306 
2307 		/* print device info to dmesg */
2308 		if (ata_msg_drv(ap) && print_info)
2309 			ata_dev_printk(dev, KERN_INFO,
2310 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2311 				       modelbuf, fwrevbuf,
2312 				       ata_mode_string(xfer_mask),
2313 				       cdb_intr_string, atapi_an_string,
2314 				       dma_dir_string);
2315 	}
2316 
2317 	/* determine max_sectors */
2318 	dev->max_sectors = ATA_MAX_SECTORS;
2319 	if (dev->flags & ATA_DFLAG_LBA48)
2320 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2321 
2322 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2323 		if (ata_id_has_hipm(dev->id))
2324 			dev->flags |= ATA_DFLAG_HIPM;
2325 		if (ata_id_has_dipm(dev->id))
2326 			dev->flags |= ATA_DFLAG_DIPM;
2327 	}
2328 
2329 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2330 	   200 sectors */
2331 	if (ata_dev_knobble(dev)) {
2332 		if (ata_msg_drv(ap) && print_info)
2333 			ata_dev_printk(dev, KERN_INFO,
2334 				       "applying bridge limits\n");
2335 		dev->udma_mask &= ATA_UDMA5;
2336 		dev->max_sectors = ATA_MAX_SECTORS;
2337 	}
2338 
2339 	if ((dev->class == ATA_DEV_ATAPI) &&
2340 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2341 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2342 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2343 	}
2344 
2345 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2346 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2347 					 dev->max_sectors);
2348 
2349 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2350 		dev->horkage |= ATA_HORKAGE_IPM;
2351 
2352 		/* reset link pm_policy for this port to no pm */
2353 		ap->pm_policy = MAX_PERFORMANCE;
2354 	}
2355 
2356 	if (ap->ops->dev_config)
2357 		ap->ops->dev_config(dev);
2358 
2359 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2360 		/* Let the user know. We don't want to disallow opens for
2361 		   rescue purposes, or in case the vendor is just a blithering
2362 		   idiot. Do this after the dev_config call as some controllers
2363 		   with buggy firmware may want to avoid reporting false device
2364 		   bugs */
2365 
2366 		if (print_info) {
2367 			ata_dev_printk(dev, KERN_WARNING,
2368 "Drive reports diagnostics failure. This may indicate a drive\n");
2369 			ata_dev_printk(dev, KERN_WARNING,
2370 "fault or invalid emulation. Contact drive vendor for information.\n");
2371 		}
2372 	}
2373 
2374 	return 0;
2375 
2376 err_out_nosup:
2377 	if (ata_msg_probe(ap))
2378 		ata_dev_printk(dev, KERN_DEBUG,
2379 			       "%s: EXIT, err\n", __func__);
2380 	return rc;
2381 }
2382 
2383 /**
2384  *	ata_cable_40wire	-	return 40 wire cable type
2385  *	@ap: port
2386  *
2387  *	Helper method for drivers which want to hardwire 40 wire cable
2388  *	detection.
2389  */
2390 
2391 int ata_cable_40wire(struct ata_port *ap)
2392 {
2393 	return ATA_CBL_PATA40;
2394 }
2395 
2396 /**
2397  *	ata_cable_80wire	-	return 80 wire cable type
2398  *	@ap: port
2399  *
2400  *	Helper method for drivers which want to hardwire 80 wire cable
2401  *	detection.
2402  */
2403 
2404 int ata_cable_80wire(struct ata_port *ap)
2405 {
2406 	return ATA_CBL_PATA80;
2407 }
2408 
2409 /**
2410  *	ata_cable_unknown	-	return unknown PATA cable.
2411  *	@ap: port
2412  *
2413  *	Helper method for drivers which have no PATA cable detection.
2414  */
2415 
2416 int ata_cable_unknown(struct ata_port *ap)
2417 {
2418 	return ATA_CBL_PATA_UNK;
2419 }
2420 
2421 /**
2422  *	ata_cable_ignore	-	return ignored PATA cable.
2423  *	@ap: port
2424  *
2425  *	Helper method for drivers which don't use cable type to limit
2426  *	transfer mode.
2427  */
2428 int ata_cable_ignore(struct ata_port *ap)
2429 {
2430 	return ATA_CBL_PATA_IGN;
2431 }
2432 
2433 /**
2434  *	ata_cable_sata	-	return SATA cable type
2435  *	@ap: port
2436  *
2437  *	Helper method for drivers which have SATA cables
2438  */
2439 
2440 int ata_cable_sata(struct ata_port *ap)
2441 {
2442 	return ATA_CBL_SATA;
2443 }
2444 
2445 /**
2446  *	ata_bus_probe - Reset and probe ATA bus
2447  *	@ap: Bus to probe
2448  *
2449  *	Master ATA bus probing function.  Initiates a hardware-dependent
2450  *	bus reset, then attempts to identify any devices found on
2451  *	the bus.
2452  *
2453  *	LOCKING:
2454  *	PCI/etc. bus probe sem.
2455  *
2456  *	RETURNS:
2457  *	Zero on success, negative errno otherwise.
2458  */
2459 
2460 int ata_bus_probe(struct ata_port *ap)
2461 {
2462 	unsigned int classes[ATA_MAX_DEVICES];
2463 	int tries[ATA_MAX_DEVICES];
2464 	int rc;
2465 	struct ata_device *dev;
2466 
2467 	ata_port_probe(ap);
2468 
2469 	ata_link_for_each_dev(dev, &ap->link)
2470 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2471 
2472  retry:
2473 	ata_link_for_each_dev(dev, &ap->link) {
2474 		/* If we issue an SRST then an ATA drive (not ATAPI)
2475 		 * may change configuration and be in PIO0 timing. If
2476 		 * we do a hard reset (or are coming from power on)
2477 		 * this is true for ATA or ATAPI. Until we've set a
2478 		 * suitable controller mode we should not touch the
2479 		 * bus as we may be talking too fast.
2480 		 */
2481 		dev->pio_mode = XFER_PIO_0;
2482 
2483 		/* If the controller has a pio mode setup function
2484 		 * then use it to set the chipset to rights. Don't
2485 		 * touch the DMA setup as that will be dealt with when
2486 		 * configuring devices.
2487 		 */
2488 		if (ap->ops->set_piomode)
2489 			ap->ops->set_piomode(ap, dev);
2490 	}
2491 
2492 	/* reset and determine device classes */
2493 	ap->ops->phy_reset(ap);
2494 
2495 	ata_link_for_each_dev(dev, &ap->link) {
2496 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2497 		    dev->class != ATA_DEV_UNKNOWN)
2498 			classes[dev->devno] = dev->class;
2499 		else
2500 			classes[dev->devno] = ATA_DEV_NONE;
2501 
2502 		dev->class = ATA_DEV_UNKNOWN;
2503 	}
2504 
2505 	ata_port_probe(ap);
2506 
2507 	/* read IDENTIFY page and configure devices. We have to do the identify
2508 	   specific sequence bass-ackwards so that PDIAG- is released by
2509 	   the slave device */
2510 
2511 	ata_link_for_each_dev_reverse(dev, &ap->link) {
2512 		if (tries[dev->devno])
2513 			dev->class = classes[dev->devno];
2514 
2515 		if (!ata_dev_enabled(dev))
2516 			continue;
2517 
2518 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2519 				     dev->id);
2520 		if (rc)
2521 			goto fail;
2522 	}
2523 
2524 	/* Now ask for the cable type as PDIAG- should have been released */
2525 	if (ap->ops->cable_detect)
2526 		ap->cbl = ap->ops->cable_detect(ap);
2527 
2528 	/* We may have SATA bridge glue hiding here irrespective of the
2529 	   reported cable types and sensed types */
2530 	ata_link_for_each_dev(dev, &ap->link) {
2531 		if (!ata_dev_enabled(dev))
2532 			continue;
2533 		/* SATA drives indicate we have a bridge. We don't know which
2534 		   end of the link the bridge is which is a problem */
2535 		if (ata_id_is_sata(dev->id))
2536 			ap->cbl = ATA_CBL_SATA;
2537 	}
2538 
2539 	/* After the identify sequence we can now set up the devices. We do
2540 	   this in the normal order so that the user doesn't get confused */
2541 
2542 	ata_link_for_each_dev(dev, &ap->link) {
2543 		if (!ata_dev_enabled(dev))
2544 			continue;
2545 
2546 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2547 		rc = ata_dev_configure(dev);
2548 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2549 		if (rc)
2550 			goto fail;
2551 	}
2552 
2553 	/* configure transfer mode */
2554 	rc = ata_set_mode(&ap->link, &dev);
2555 	if (rc)
2556 		goto fail;
2557 
2558 	ata_link_for_each_dev(dev, &ap->link)
2559 		if (ata_dev_enabled(dev))
2560 			return 0;
2561 
2562 	/* no device present, disable port */
2563 	ata_port_disable(ap);
2564 	return -ENODEV;
2565 
2566  fail:
2567 	tries[dev->devno]--;
2568 
2569 	switch (rc) {
2570 	case -EINVAL:
2571 		/* eeek, something went very wrong, give up */
2572 		tries[dev->devno] = 0;
2573 		break;
2574 
2575 	case -ENODEV:
2576 		/* give it just one more chance */
2577 		tries[dev->devno] = min(tries[dev->devno], 1);
2578 	case -EIO:
2579 		if (tries[dev->devno] == 1) {
2580 			/* This is the last chance, better to slow
2581 			 * down than lose it.
2582 			 */
2583 			sata_down_spd_limit(&ap->link);
2584 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2585 		}
2586 	}
2587 
2588 	if (!tries[dev->devno])
2589 		ata_dev_disable(dev);
2590 
2591 	goto retry;
2592 }
2593 
2594 /**
2595  *	ata_port_probe - Mark port as enabled
2596  *	@ap: Port for which we indicate enablement
2597  *
2598  *	Modify @ap data structure such that the system
2599  *	thinks that the entire port is enabled.
2600  *
2601  *	LOCKING: host lock, or some other form of
2602  *	serialization.
2603  */
2604 
2605 void ata_port_probe(struct ata_port *ap)
2606 {
2607 	ap->flags &= ~ATA_FLAG_DISABLED;
2608 }
2609 
2610 /**
2611  *	sata_print_link_status - Print SATA link status
2612  *	@link: SATA link to printk link status about
2613  *
2614  *	This function prints link speed and status of a SATA link.
2615  *
2616  *	LOCKING:
2617  *	None.
2618  */
2619 static void sata_print_link_status(struct ata_link *link)
2620 {
2621 	u32 sstatus, scontrol, tmp;
2622 
2623 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2624 		return;
2625 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2626 
2627 	if (ata_link_online(link)) {
2628 		tmp = (sstatus >> 4) & 0xf;
2629 		ata_link_printk(link, KERN_INFO,
2630 				"SATA link up %s (SStatus %X SControl %X)\n",
2631 				sata_spd_string(tmp), sstatus, scontrol);
2632 	} else {
2633 		ata_link_printk(link, KERN_INFO,
2634 				"SATA link down (SStatus %X SControl %X)\n",
2635 				sstatus, scontrol);
2636 	}
2637 }
2638 
2639 /**
2640  *	ata_dev_pair		-	return other device on cable
2641  *	@adev: device
2642  *
2643  *	Obtain the other device on the same cable, or if none is
2644  *	present NULL is returned
2645  */
2646 
2647 struct ata_device *ata_dev_pair(struct ata_device *adev)
2648 {
2649 	struct ata_link *link = adev->link;
2650 	struct ata_device *pair = &link->device[1 - adev->devno];
2651 	if (!ata_dev_enabled(pair))
2652 		return NULL;
2653 	return pair;
2654 }
2655 
2656 /**
2657  *	ata_port_disable - Disable port.
2658  *	@ap: Port to be disabled.
2659  *
2660  *	Modify @ap data structure such that the system
2661  *	thinks that the entire port is disabled, and should
2662  *	never attempt to probe or communicate with devices
2663  *	on this port.
2664  *
2665  *	LOCKING: host lock, or some other form of
2666  *	serialization.
2667  */
2668 
2669 void ata_port_disable(struct ata_port *ap)
2670 {
2671 	ap->link.device[0].class = ATA_DEV_NONE;
2672 	ap->link.device[1].class = ATA_DEV_NONE;
2673 	ap->flags |= ATA_FLAG_DISABLED;
2674 }
2675 
2676 /**
2677  *	sata_down_spd_limit - adjust SATA spd limit downward
2678  *	@link: Link to adjust SATA spd limit for
2679  *
2680  *	Adjust SATA spd limit of @link downward.  Note that this
2681  *	function only adjusts the limit.  The change must be applied
2682  *	using sata_set_spd().
2683  *
2684  *	LOCKING:
2685  *	Inherited from caller.
2686  *
2687  *	RETURNS:
2688  *	0 on success, negative errno on failure
2689  */
2690 int sata_down_spd_limit(struct ata_link *link)
2691 {
2692 	u32 sstatus, spd, mask;
2693 	int rc, highbit;
2694 
2695 	if (!sata_scr_valid(link))
2696 		return -EOPNOTSUPP;
2697 
2698 	/* If SCR can be read, use it to determine the current SPD.
2699 	 * If not, use cached value in link->sata_spd.
2700 	 */
2701 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2702 	if (rc == 0)
2703 		spd = (sstatus >> 4) & 0xf;
2704 	else
2705 		spd = link->sata_spd;
2706 
2707 	mask = link->sata_spd_limit;
2708 	if (mask <= 1)
2709 		return -EINVAL;
2710 
2711 	/* unconditionally mask off the highest bit */
2712 	highbit = fls(mask) - 1;
2713 	mask &= ~(1 << highbit);
2714 
2715 	/* Mask off all speeds higher than or equal to the current
2716 	 * one.  Force 1.5Gbps if current SPD is not available.
2717 	 */
2718 	if (spd > 1)
2719 		mask &= (1 << (spd - 1)) - 1;
2720 	else
2721 		mask &= 1;
2722 
2723 	/* were we already at the bottom? */
2724 	if (!mask)
2725 		return -EINVAL;
2726 
2727 	link->sata_spd_limit = mask;
2728 
2729 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2730 			sata_spd_string(fls(mask)));
2731 
2732 	return 0;
2733 }
2734 
2735 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2736 {
2737 	struct ata_link *host_link = &link->ap->link;
2738 	u32 limit, target, spd;
2739 
2740 	limit = link->sata_spd_limit;
2741 
2742 	/* Don't configure downstream link faster than upstream link.
2743 	 * It doesn't speed up anything and some PMPs choke on such
2744 	 * configuration.
2745 	 */
2746 	if (!ata_is_host_link(link) && host_link->sata_spd)
2747 		limit &= (1 << host_link->sata_spd) - 1;
2748 
2749 	if (limit == UINT_MAX)
2750 		target = 0;
2751 	else
2752 		target = fls(limit);
2753 
2754 	spd = (*scontrol >> 4) & 0xf;
2755 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2756 
2757 	return spd != target;
2758 }
2759 
2760 /**
2761  *	sata_set_spd_needed - is SATA spd configuration needed
2762  *	@link: Link in question
2763  *
2764  *	Test whether the spd limit in SControl matches
2765  *	@link->sata_spd_limit.  This function is used to determine
2766  *	whether hardreset is necessary to apply SATA spd
2767  *	configuration.
2768  *
2769  *	LOCKING:
2770  *	Inherited from caller.
2771  *
2772  *	RETURNS:
2773  *	1 if SATA spd configuration is needed, 0 otherwise.
2774  */
2775 static int sata_set_spd_needed(struct ata_link *link)
2776 {
2777 	u32 scontrol;
2778 
2779 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2780 		return 1;
2781 
2782 	return __sata_set_spd_needed(link, &scontrol);
2783 }
2784 
2785 /**
2786  *	sata_set_spd - set SATA spd according to spd limit
2787  *	@link: Link to set SATA spd for
2788  *
2789  *	Set SATA spd of @link according to sata_spd_limit.
2790  *
2791  *	LOCKING:
2792  *	Inherited from caller.
2793  *
2794  *	RETURNS:
2795  *	0 if spd doesn't need to be changed, 1 if spd has been
2796  *	changed.  Negative errno if SCR registers are inaccessible.
2797  */
2798 int sata_set_spd(struct ata_link *link)
2799 {
2800 	u32 scontrol;
2801 	int rc;
2802 
2803 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2804 		return rc;
2805 
2806 	if (!__sata_set_spd_needed(link, &scontrol))
2807 		return 0;
2808 
2809 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2810 		return rc;
2811 
2812 	return 1;
2813 }
2814 
2815 /*
2816  * This mode timing computation functionality is ported over from
2817  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2818  */
2819 /*
2820  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2821  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2822  * for UDMA6, which is currently supported only by Maxtor drives.
2823  *
2824  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2825  */
2826 
2827 static const struct ata_timing ata_timing[] = {
2828 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2829 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2830 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2831 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2832 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2833 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2834 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2835 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2836 
2837 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2838 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2839 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2840 
2841 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2842 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2843 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2844 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2845 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2846 
2847 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2848 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2849 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2850 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2851 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2852 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2853 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2854 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2855 
2856 	{ 0xFF }
2857 };
2858 
2859 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2860 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2861 
2862 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2863 {
2864 	q->setup   = EZ(t->setup   * 1000,  T);
2865 	q->act8b   = EZ(t->act8b   * 1000,  T);
2866 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2867 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2868 	q->active  = EZ(t->active  * 1000,  T);
2869 	q->recover = EZ(t->recover * 1000,  T);
2870 	q->cycle   = EZ(t->cycle   * 1000,  T);
2871 	q->udma    = EZ(t->udma    * 1000, UT);
2872 }
2873 
2874 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2875 		      struct ata_timing *m, unsigned int what)
2876 {
2877 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2878 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2879 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2880 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2881 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2882 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2883 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2884 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2885 }
2886 
2887 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2888 {
2889 	const struct ata_timing *t = ata_timing;
2890 
2891 	while (xfer_mode > t->mode)
2892 		t++;
2893 
2894 	if (xfer_mode == t->mode)
2895 		return t;
2896 	return NULL;
2897 }
2898 
2899 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2900 		       struct ata_timing *t, int T, int UT)
2901 {
2902 	const struct ata_timing *s;
2903 	struct ata_timing p;
2904 
2905 	/*
2906 	 * Find the mode.
2907 	 */
2908 
2909 	if (!(s = ata_timing_find_mode(speed)))
2910 		return -EINVAL;
2911 
2912 	memcpy(t, s, sizeof(*s));
2913 
2914 	/*
2915 	 * If the drive is an EIDE drive, it can tell us it needs extended
2916 	 * PIO/MW_DMA cycle timing.
2917 	 */
2918 
2919 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2920 		memset(&p, 0, sizeof(p));
2921 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2922 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2923 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2924 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2925 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2926 		}
2927 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2928 	}
2929 
2930 	/*
2931 	 * Convert the timing to bus clock counts.
2932 	 */
2933 
2934 	ata_timing_quantize(t, t, T, UT);
2935 
2936 	/*
2937 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2938 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2939 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2940 	 */
2941 
2942 	if (speed > XFER_PIO_6) {
2943 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2944 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2945 	}
2946 
2947 	/*
2948 	 * Lengthen active & recovery time so that cycle time is correct.
2949 	 */
2950 
2951 	if (t->act8b + t->rec8b < t->cyc8b) {
2952 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2953 		t->rec8b = t->cyc8b - t->act8b;
2954 	}
2955 
2956 	if (t->active + t->recover < t->cycle) {
2957 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2958 		t->recover = t->cycle - t->active;
2959 	}
2960 
2961 	/* In a few cases quantisation may produce enough errors to
2962 	   leave t->cycle too low for the sum of active and recovery
2963 	   if so we must correct this */
2964 	if (t->active + t->recover > t->cycle)
2965 		t->cycle = t->active + t->recover;
2966 
2967 	return 0;
2968 }
2969 
2970 /**
2971  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2972  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2973  *	@cycle: cycle duration in ns
2974  *
2975  *	Return matching xfer mode for @cycle.  The returned mode is of
2976  *	the transfer type specified by @xfer_shift.  If @cycle is too
2977  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
2978  *	than the fastest known mode, the fasted mode is returned.
2979  *
2980  *	LOCKING:
2981  *	None.
2982  *
2983  *	RETURNS:
2984  *	Matching xfer_mode, 0xff if no match found.
2985  */
2986 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2987 {
2988 	u8 base_mode = 0xff, last_mode = 0xff;
2989 	const struct ata_xfer_ent *ent;
2990 	const struct ata_timing *t;
2991 
2992 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2993 		if (ent->shift == xfer_shift)
2994 			base_mode = ent->base;
2995 
2996 	for (t = ata_timing_find_mode(base_mode);
2997 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
2998 		unsigned short this_cycle;
2999 
3000 		switch (xfer_shift) {
3001 		case ATA_SHIFT_PIO:
3002 		case ATA_SHIFT_MWDMA:
3003 			this_cycle = t->cycle;
3004 			break;
3005 		case ATA_SHIFT_UDMA:
3006 			this_cycle = t->udma;
3007 			break;
3008 		default:
3009 			return 0xff;
3010 		}
3011 
3012 		if (cycle > this_cycle)
3013 			break;
3014 
3015 		last_mode = t->mode;
3016 	}
3017 
3018 	return last_mode;
3019 }
3020 
3021 /**
3022  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3023  *	@dev: Device to adjust xfer masks
3024  *	@sel: ATA_DNXFER_* selector
3025  *
3026  *	Adjust xfer masks of @dev downward.  Note that this function
3027  *	does not apply the change.  Invoking ata_set_mode() afterwards
3028  *	will apply the limit.
3029  *
3030  *	LOCKING:
3031  *	Inherited from caller.
3032  *
3033  *	RETURNS:
3034  *	0 on success, negative errno on failure
3035  */
3036 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3037 {
3038 	char buf[32];
3039 	unsigned long orig_mask, xfer_mask;
3040 	unsigned long pio_mask, mwdma_mask, udma_mask;
3041 	int quiet, highbit;
3042 
3043 	quiet = !!(sel & ATA_DNXFER_QUIET);
3044 	sel &= ~ATA_DNXFER_QUIET;
3045 
3046 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3047 						  dev->mwdma_mask,
3048 						  dev->udma_mask);
3049 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3050 
3051 	switch (sel) {
3052 	case ATA_DNXFER_PIO:
3053 		highbit = fls(pio_mask) - 1;
3054 		pio_mask &= ~(1 << highbit);
3055 		break;
3056 
3057 	case ATA_DNXFER_DMA:
3058 		if (udma_mask) {
3059 			highbit = fls(udma_mask) - 1;
3060 			udma_mask &= ~(1 << highbit);
3061 			if (!udma_mask)
3062 				return -ENOENT;
3063 		} else if (mwdma_mask) {
3064 			highbit = fls(mwdma_mask) - 1;
3065 			mwdma_mask &= ~(1 << highbit);
3066 			if (!mwdma_mask)
3067 				return -ENOENT;
3068 		}
3069 		break;
3070 
3071 	case ATA_DNXFER_40C:
3072 		udma_mask &= ATA_UDMA_MASK_40C;
3073 		break;
3074 
3075 	case ATA_DNXFER_FORCE_PIO0:
3076 		pio_mask &= 1;
3077 	case ATA_DNXFER_FORCE_PIO:
3078 		mwdma_mask = 0;
3079 		udma_mask = 0;
3080 		break;
3081 
3082 	default:
3083 		BUG();
3084 	}
3085 
3086 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3087 
3088 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3089 		return -ENOENT;
3090 
3091 	if (!quiet) {
3092 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3093 			snprintf(buf, sizeof(buf), "%s:%s",
3094 				 ata_mode_string(xfer_mask),
3095 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3096 		else
3097 			snprintf(buf, sizeof(buf), "%s",
3098 				 ata_mode_string(xfer_mask));
3099 
3100 		ata_dev_printk(dev, KERN_WARNING,
3101 			       "limiting speed to %s\n", buf);
3102 	}
3103 
3104 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3105 			    &dev->udma_mask);
3106 
3107 	return 0;
3108 }
3109 
3110 static int ata_dev_set_mode(struct ata_device *dev)
3111 {
3112 	struct ata_eh_context *ehc = &dev->link->eh_context;
3113 	const char *dev_err_whine = "";
3114 	int ign_dev_err = 0;
3115 	unsigned int err_mask;
3116 	int rc;
3117 
3118 	dev->flags &= ~ATA_DFLAG_PIO;
3119 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3120 		dev->flags |= ATA_DFLAG_PIO;
3121 
3122 	err_mask = ata_dev_set_xfermode(dev);
3123 
3124 	if (err_mask & ~AC_ERR_DEV)
3125 		goto fail;
3126 
3127 	/* revalidate */
3128 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3129 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3130 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3131 	if (rc)
3132 		return rc;
3133 
3134 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3135 		/* Old CFA may refuse this command, which is just fine */
3136 		if (ata_id_is_cfa(dev->id))
3137 			ign_dev_err = 1;
3138 		/* Catch several broken garbage emulations plus some pre
3139 		   ATA devices */
3140 		if (ata_id_major_version(dev->id) == 0 &&
3141 					dev->pio_mode <= XFER_PIO_2)
3142 			ign_dev_err = 1;
3143 		/* Some very old devices and some bad newer ones fail
3144 		   any kind of SET_XFERMODE request but support PIO0-2
3145 		   timings and no IORDY */
3146 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3147 			ign_dev_err = 1;
3148 	}
3149 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3150 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3151 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3152 	    dev->dma_mode == XFER_MW_DMA_0 &&
3153 	    (dev->id[63] >> 8) & 1)
3154 		ign_dev_err = 1;
3155 
3156 	/* if the device is actually configured correctly, ignore dev err */
3157 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3158 		ign_dev_err = 1;
3159 
3160 	if (err_mask & AC_ERR_DEV) {
3161 		if (!ign_dev_err)
3162 			goto fail;
3163 		else
3164 			dev_err_whine = " (device error ignored)";
3165 	}
3166 
3167 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3168 		dev->xfer_shift, (int)dev->xfer_mode);
3169 
3170 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3171 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3172 		       dev_err_whine);
3173 
3174 	return 0;
3175 
3176  fail:
3177 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3178 		       "(err_mask=0x%x)\n", err_mask);
3179 	return -EIO;
3180 }
3181 
3182 /**
3183  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3184  *	@link: link on which timings will be programmed
3185  *	@r_failed_dev: out parameter for failed device
3186  *
3187  *	Standard implementation of the function used to tune and set
3188  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3189  *	ata_dev_set_mode() fails, pointer to the failing device is
3190  *	returned in @r_failed_dev.
3191  *
3192  *	LOCKING:
3193  *	PCI/etc. bus probe sem.
3194  *
3195  *	RETURNS:
3196  *	0 on success, negative errno otherwise
3197  */
3198 
3199 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3200 {
3201 	struct ata_port *ap = link->ap;
3202 	struct ata_device *dev;
3203 	int rc = 0, used_dma = 0, found = 0;
3204 
3205 	/* step 1: calculate xfer_mask */
3206 	ata_link_for_each_dev(dev, link) {
3207 		unsigned long pio_mask, dma_mask;
3208 		unsigned int mode_mask;
3209 
3210 		if (!ata_dev_enabled(dev))
3211 			continue;
3212 
3213 		mode_mask = ATA_DMA_MASK_ATA;
3214 		if (dev->class == ATA_DEV_ATAPI)
3215 			mode_mask = ATA_DMA_MASK_ATAPI;
3216 		else if (ata_id_is_cfa(dev->id))
3217 			mode_mask = ATA_DMA_MASK_CFA;
3218 
3219 		ata_dev_xfermask(dev);
3220 		ata_force_xfermask(dev);
3221 
3222 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3223 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3224 
3225 		if (libata_dma_mask & mode_mask)
3226 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3227 		else
3228 			dma_mask = 0;
3229 
3230 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3231 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3232 
3233 		found = 1;
3234 		if (dev->dma_mode != 0xff)
3235 			used_dma = 1;
3236 	}
3237 	if (!found)
3238 		goto out;
3239 
3240 	/* step 2: always set host PIO timings */
3241 	ata_link_for_each_dev(dev, link) {
3242 		if (!ata_dev_enabled(dev))
3243 			continue;
3244 
3245 		if (dev->pio_mode == 0xff) {
3246 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3247 			rc = -EINVAL;
3248 			goto out;
3249 		}
3250 
3251 		dev->xfer_mode = dev->pio_mode;
3252 		dev->xfer_shift = ATA_SHIFT_PIO;
3253 		if (ap->ops->set_piomode)
3254 			ap->ops->set_piomode(ap, dev);
3255 	}
3256 
3257 	/* step 3: set host DMA timings */
3258 	ata_link_for_each_dev(dev, link) {
3259 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3260 			continue;
3261 
3262 		dev->xfer_mode = dev->dma_mode;
3263 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3264 		if (ap->ops->set_dmamode)
3265 			ap->ops->set_dmamode(ap, dev);
3266 	}
3267 
3268 	/* step 4: update devices' xfer mode */
3269 	ata_link_for_each_dev(dev, link) {
3270 		/* don't update suspended devices' xfer mode */
3271 		if (!ata_dev_enabled(dev))
3272 			continue;
3273 
3274 		rc = ata_dev_set_mode(dev);
3275 		if (rc)
3276 			goto out;
3277 	}
3278 
3279 	/* Record simplex status. If we selected DMA then the other
3280 	 * host channels are not permitted to do so.
3281 	 */
3282 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3283 		ap->host->simplex_claimed = ap;
3284 
3285  out:
3286 	if (rc)
3287 		*r_failed_dev = dev;
3288 	return rc;
3289 }
3290 
3291 /**
3292  *	ata_wait_ready - wait for link to become ready
3293  *	@link: link to be waited on
3294  *	@deadline: deadline jiffies for the operation
3295  *	@check_ready: callback to check link readiness
3296  *
3297  *	Wait for @link to become ready.  @check_ready should return
3298  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3299  *	link doesn't seem to be occupied, other errno for other error
3300  *	conditions.
3301  *
3302  *	Transient -ENODEV conditions are allowed for
3303  *	ATA_TMOUT_FF_WAIT.
3304  *
3305  *	LOCKING:
3306  *	EH context.
3307  *
3308  *	RETURNS:
3309  *	0 if @linke is ready before @deadline; otherwise, -errno.
3310  */
3311 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3312 		   int (*check_ready)(struct ata_link *link))
3313 {
3314 	unsigned long start = jiffies;
3315 	unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
3316 	int warned = 0;
3317 
3318 	if (time_after(nodev_deadline, deadline))
3319 		nodev_deadline = deadline;
3320 
3321 	while (1) {
3322 		unsigned long now = jiffies;
3323 		int ready, tmp;
3324 
3325 		ready = tmp = check_ready(link);
3326 		if (ready > 0)
3327 			return 0;
3328 
3329 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3330 		 * is online.  Also, some SATA devices take a long
3331 		 * time to clear 0xff after reset.  For example,
3332 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3333 		 * GoVault needs even more than that.  Wait for
3334 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3335 		 *
3336 		 * Note that some PATA controllers (pata_ali) explode
3337 		 * if status register is read more than once when
3338 		 * there's no device attached.
3339 		 */
3340 		if (ready == -ENODEV) {
3341 			if (ata_link_online(link))
3342 				ready = 0;
3343 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3344 				 !ata_link_offline(link) &&
3345 				 time_before(now, nodev_deadline))
3346 				ready = 0;
3347 		}
3348 
3349 		if (ready)
3350 			return ready;
3351 		if (time_after(now, deadline))
3352 			return -EBUSY;
3353 
3354 		if (!warned && time_after(now, start + 5 * HZ) &&
3355 		    (deadline - now > 3 * HZ)) {
3356 			ata_link_printk(link, KERN_WARNING,
3357 				"link is slow to respond, please be patient "
3358 				"(ready=%d)\n", tmp);
3359 			warned = 1;
3360 		}
3361 
3362 		msleep(50);
3363 	}
3364 }
3365 
3366 /**
3367  *	ata_wait_after_reset - wait for link to become ready after reset
3368  *	@link: link to be waited on
3369  *	@deadline: deadline jiffies for the operation
3370  *	@check_ready: callback to check link readiness
3371  *
3372  *	Wait for @link to become ready after reset.
3373  *
3374  *	LOCKING:
3375  *	EH context.
3376  *
3377  *	RETURNS:
3378  *	0 if @linke is ready before @deadline; otherwise, -errno.
3379  */
3380 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3381 				int (*check_ready)(struct ata_link *link))
3382 {
3383 	msleep(ATA_WAIT_AFTER_RESET_MSECS);
3384 
3385 	return ata_wait_ready(link, deadline, check_ready);
3386 }
3387 
3388 /**
3389  *	sata_link_debounce - debounce SATA phy status
3390  *	@link: ATA link to debounce SATA phy status for
3391  *	@params: timing parameters { interval, duratinon, timeout } in msec
3392  *	@deadline: deadline jiffies for the operation
3393  *
3394 *	Make sure SStatus of @link reaches stable state, determined by
3395  *	holding the same value where DET is not 1 for @duration polled
3396  *	every @interval, before @timeout.  Timeout constraints the
3397  *	beginning of the stable state.  Because DET gets stuck at 1 on
3398  *	some controllers after hot unplugging, this functions waits
3399  *	until timeout then returns 0 if DET is stable at 1.
3400  *
3401  *	@timeout is further limited by @deadline.  The sooner of the
3402  *	two is used.
3403  *
3404  *	LOCKING:
3405  *	Kernel thread context (may sleep)
3406  *
3407  *	RETURNS:
3408  *	0 on success, -errno on failure.
3409  */
3410 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3411 		       unsigned long deadline)
3412 {
3413 	unsigned long interval_msec = params[0];
3414 	unsigned long duration = msecs_to_jiffies(params[1]);
3415 	unsigned long last_jiffies, t;
3416 	u32 last, cur;
3417 	int rc;
3418 
3419 	t = jiffies + msecs_to_jiffies(params[2]);
3420 	if (time_before(t, deadline))
3421 		deadline = t;
3422 
3423 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3424 		return rc;
3425 	cur &= 0xf;
3426 
3427 	last = cur;
3428 	last_jiffies = jiffies;
3429 
3430 	while (1) {
3431 		msleep(interval_msec);
3432 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3433 			return rc;
3434 		cur &= 0xf;
3435 
3436 		/* DET stable? */
3437 		if (cur == last) {
3438 			if (cur == 1 && time_before(jiffies, deadline))
3439 				continue;
3440 			if (time_after(jiffies, last_jiffies + duration))
3441 				return 0;
3442 			continue;
3443 		}
3444 
3445 		/* unstable, start over */
3446 		last = cur;
3447 		last_jiffies = jiffies;
3448 
3449 		/* Check deadline.  If debouncing failed, return
3450 		 * -EPIPE to tell upper layer to lower link speed.
3451 		 */
3452 		if (time_after(jiffies, deadline))
3453 			return -EPIPE;
3454 	}
3455 }
3456 
3457 /**
3458  *	sata_link_resume - resume SATA link
3459  *	@link: ATA link to resume SATA
3460  *	@params: timing parameters { interval, duratinon, timeout } in msec
3461  *	@deadline: deadline jiffies for the operation
3462  *
3463  *	Resume SATA phy @link and debounce it.
3464  *
3465  *	LOCKING:
3466  *	Kernel thread context (may sleep)
3467  *
3468  *	RETURNS:
3469  *	0 on success, -errno on failure.
3470  */
3471 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3472 		     unsigned long deadline)
3473 {
3474 	u32 scontrol, serror;
3475 	int rc;
3476 
3477 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3478 		return rc;
3479 
3480 	scontrol = (scontrol & 0x0f0) | 0x300;
3481 
3482 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3483 		return rc;
3484 
3485 	/* Some PHYs react badly if SStatus is pounded immediately
3486 	 * after resuming.  Delay 200ms before debouncing.
3487 	 */
3488 	msleep(200);
3489 
3490 	if ((rc = sata_link_debounce(link, params, deadline)))
3491 		return rc;
3492 
3493 	/* Clear SError.  PMP and some host PHYs require this to
3494 	 * operate and clearing should be done before checking PHY
3495 	 * online status to avoid race condition (hotplugging between
3496 	 * link resume and status check).
3497 	 */
3498 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3499 		rc = sata_scr_write(link, SCR_ERROR, serror);
3500 	if (rc == 0 || rc == -EINVAL) {
3501 		unsigned long flags;
3502 
3503 		spin_lock_irqsave(link->ap->lock, flags);
3504 		link->eh_info.serror = 0;
3505 		spin_unlock_irqrestore(link->ap->lock, flags);
3506 		rc = 0;
3507 	}
3508 	return rc;
3509 }
3510 
3511 /**
3512  *	ata_std_prereset - prepare for reset
3513  *	@link: ATA link to be reset
3514  *	@deadline: deadline jiffies for the operation
3515  *
3516  *	@link is about to be reset.  Initialize it.  Failure from
3517  *	prereset makes libata abort whole reset sequence and give up
3518  *	that port, so prereset should be best-effort.  It does its
3519  *	best to prepare for reset sequence but if things go wrong, it
3520  *	should just whine, not fail.
3521  *
3522  *	LOCKING:
3523  *	Kernel thread context (may sleep)
3524  *
3525  *	RETURNS:
3526  *	0 on success, -errno otherwise.
3527  */
3528 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3529 {
3530 	struct ata_port *ap = link->ap;
3531 	struct ata_eh_context *ehc = &link->eh_context;
3532 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3533 	int rc;
3534 
3535 	/* if we're about to do hardreset, nothing more to do */
3536 	if (ehc->i.action & ATA_EH_HARDRESET)
3537 		return 0;
3538 
3539 	/* if SATA, resume link */
3540 	if (ap->flags & ATA_FLAG_SATA) {
3541 		rc = sata_link_resume(link, timing, deadline);
3542 		/* whine about phy resume failure but proceed */
3543 		if (rc && rc != -EOPNOTSUPP)
3544 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3545 					"link for reset (errno=%d)\n", rc);
3546 	}
3547 
3548 	/* no point in trying softreset on offline link */
3549 	if (ata_link_offline(link))
3550 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3551 
3552 	return 0;
3553 }
3554 
3555 /**
3556  *	sata_link_hardreset - reset link via SATA phy reset
3557  *	@link: link to reset
3558  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3559  *	@deadline: deadline jiffies for the operation
3560  *	@online: optional out parameter indicating link onlineness
3561  *	@check_ready: optional callback to check link readiness
3562  *
3563  *	SATA phy-reset @link using DET bits of SControl register.
3564  *	After hardreset, link readiness is waited upon using
3565  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3566  *	allowed to not specify @check_ready and wait itself after this
3567  *	function returns.  Device classification is LLD's
3568  *	responsibility.
3569  *
3570  *	*@online is set to one iff reset succeeded and @link is online
3571  *	after reset.
3572  *
3573  *	LOCKING:
3574  *	Kernel thread context (may sleep)
3575  *
3576  *	RETURNS:
3577  *	0 on success, -errno otherwise.
3578  */
3579 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3580 			unsigned long deadline,
3581 			bool *online, int (*check_ready)(struct ata_link *))
3582 {
3583 	u32 scontrol;
3584 	int rc;
3585 
3586 	DPRINTK("ENTER\n");
3587 
3588 	if (online)
3589 		*online = false;
3590 
3591 	if (sata_set_spd_needed(link)) {
3592 		/* SATA spec says nothing about how to reconfigure
3593 		 * spd.  To be on the safe side, turn off phy during
3594 		 * reconfiguration.  This works for at least ICH7 AHCI
3595 		 * and Sil3124.
3596 		 */
3597 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3598 			goto out;
3599 
3600 		scontrol = (scontrol & 0x0f0) | 0x304;
3601 
3602 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3603 			goto out;
3604 
3605 		sata_set_spd(link);
3606 	}
3607 
3608 	/* issue phy wake/reset */
3609 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3610 		goto out;
3611 
3612 	scontrol = (scontrol & 0x0f0) | 0x301;
3613 
3614 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3615 		goto out;
3616 
3617 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3618 	 * 10.4.2 says at least 1 ms.
3619 	 */
3620 	msleep(1);
3621 
3622 	/* bring link back */
3623 	rc = sata_link_resume(link, timing, deadline);
3624 	if (rc)
3625 		goto out;
3626 	/* if link is offline nothing more to do */
3627 	if (ata_link_offline(link))
3628 		goto out;
3629 
3630 	/* Link is online.  From this point, -ENODEV too is an error. */
3631 	if (online)
3632 		*online = true;
3633 
3634 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3635 		/* If PMP is supported, we have to do follow-up SRST.
3636 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3637 		 * the first port is empty.  Wait only for
3638 		 * ATA_TMOUT_PMP_SRST_WAIT.
3639 		 */
3640 		if (check_ready) {
3641 			unsigned long pmp_deadline;
3642 
3643 			pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
3644 			if (time_after(pmp_deadline, deadline))
3645 				pmp_deadline = deadline;
3646 			ata_wait_ready(link, pmp_deadline, check_ready);
3647 		}
3648 		rc = -EAGAIN;
3649 		goto out;
3650 	}
3651 
3652 	rc = 0;
3653 	if (check_ready)
3654 		rc = ata_wait_ready(link, deadline, check_ready);
3655  out:
3656 	if (rc && rc != -EAGAIN)
3657 		ata_link_printk(link, KERN_ERR,
3658 				"COMRESET failed (errno=%d)\n", rc);
3659 	DPRINTK("EXIT, rc=%d\n", rc);
3660 	return rc;
3661 }
3662 
3663 /**
3664  *	sata_std_hardreset - COMRESET w/o waiting or classification
3665  *	@link: link to reset
3666  *	@class: resulting class of attached device
3667  *	@deadline: deadline jiffies for the operation
3668  *
3669  *	Standard SATA COMRESET w/o waiting or classification.
3670  *
3671  *	LOCKING:
3672  *	Kernel thread context (may sleep)
3673  *
3674  *	RETURNS:
3675  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3676  */
3677 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3678 		       unsigned long deadline)
3679 {
3680 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3681 	bool online;
3682 	int rc;
3683 
3684 	/* do hardreset */
3685 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3686 	return online ? -EAGAIN : rc;
3687 }
3688 
3689 /**
3690  *	ata_std_postreset - standard postreset callback
3691  *	@link: the target ata_link
3692  *	@classes: classes of attached devices
3693  *
3694  *	This function is invoked after a successful reset.  Note that
3695  *	the device might have been reset more than once using
3696  *	different reset methods before postreset is invoked.
3697  *
3698  *	LOCKING:
3699  *	Kernel thread context (may sleep)
3700  */
3701 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3702 {
3703 	DPRINTK("ENTER\n");
3704 
3705 	/* print link status */
3706 	sata_print_link_status(link);
3707 
3708 	DPRINTK("EXIT\n");
3709 }
3710 
3711 /**
3712  *	ata_dev_same_device - Determine whether new ID matches configured device
3713  *	@dev: device to compare against
3714  *	@new_class: class of the new device
3715  *	@new_id: IDENTIFY page of the new device
3716  *
3717  *	Compare @new_class and @new_id against @dev and determine
3718  *	whether @dev is the device indicated by @new_class and
3719  *	@new_id.
3720  *
3721  *	LOCKING:
3722  *	None.
3723  *
3724  *	RETURNS:
3725  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3726  */
3727 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3728 			       const u16 *new_id)
3729 {
3730 	const u16 *old_id = dev->id;
3731 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3732 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3733 
3734 	if (dev->class != new_class) {
3735 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3736 			       dev->class, new_class);
3737 		return 0;
3738 	}
3739 
3740 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3741 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3742 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3743 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3744 
3745 	if (strcmp(model[0], model[1])) {
3746 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3747 			       "'%s' != '%s'\n", model[0], model[1]);
3748 		return 0;
3749 	}
3750 
3751 	if (strcmp(serial[0], serial[1])) {
3752 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3753 			       "'%s' != '%s'\n", serial[0], serial[1]);
3754 		return 0;
3755 	}
3756 
3757 	return 1;
3758 }
3759 
3760 /**
3761  *	ata_dev_reread_id - Re-read IDENTIFY data
3762  *	@dev: target ATA device
3763  *	@readid_flags: read ID flags
3764  *
3765  *	Re-read IDENTIFY page and make sure @dev is still attached to
3766  *	the port.
3767  *
3768  *	LOCKING:
3769  *	Kernel thread context (may sleep)
3770  *
3771  *	RETURNS:
3772  *	0 on success, negative errno otherwise
3773  */
3774 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3775 {
3776 	unsigned int class = dev->class;
3777 	u16 *id = (void *)dev->link->ap->sector_buf;
3778 	int rc;
3779 
3780 	/* read ID data */
3781 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3782 	if (rc)
3783 		return rc;
3784 
3785 	/* is the device still there? */
3786 	if (!ata_dev_same_device(dev, class, id))
3787 		return -ENODEV;
3788 
3789 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3790 	return 0;
3791 }
3792 
3793 /**
3794  *	ata_dev_revalidate - Revalidate ATA device
3795  *	@dev: device to revalidate
3796  *	@new_class: new class code
3797  *	@readid_flags: read ID flags
3798  *
3799  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3800  *	port and reconfigure it according to the new IDENTIFY page.
3801  *
3802  *	LOCKING:
3803  *	Kernel thread context (may sleep)
3804  *
3805  *	RETURNS:
3806  *	0 on success, negative errno otherwise
3807  */
3808 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3809 		       unsigned int readid_flags)
3810 {
3811 	u64 n_sectors = dev->n_sectors;
3812 	int rc;
3813 
3814 	if (!ata_dev_enabled(dev))
3815 		return -ENODEV;
3816 
3817 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3818 	if (ata_class_enabled(new_class) &&
3819 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3820 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3821 			       dev->class, new_class);
3822 		rc = -ENODEV;
3823 		goto fail;
3824 	}
3825 
3826 	/* re-read ID */
3827 	rc = ata_dev_reread_id(dev, readid_flags);
3828 	if (rc)
3829 		goto fail;
3830 
3831 	/* configure device according to the new ID */
3832 	rc = ata_dev_configure(dev);
3833 	if (rc)
3834 		goto fail;
3835 
3836 	/* verify n_sectors hasn't changed */
3837 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3838 	    dev->n_sectors != n_sectors) {
3839 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3840 			       "%llu != %llu\n",
3841 			       (unsigned long long)n_sectors,
3842 			       (unsigned long long)dev->n_sectors);
3843 
3844 		/* restore original n_sectors */
3845 		dev->n_sectors = n_sectors;
3846 
3847 		rc = -ENODEV;
3848 		goto fail;
3849 	}
3850 
3851 	return 0;
3852 
3853  fail:
3854 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3855 	return rc;
3856 }
3857 
3858 struct ata_blacklist_entry {
3859 	const char *model_num;
3860 	const char *model_rev;
3861 	unsigned long horkage;
3862 };
3863 
3864 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3865 	/* Devices with DMA related problems under Linux */
3866 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3867 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3868 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3869 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3870 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3871 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3872 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3873 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3874 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3875 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3876 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3877 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3878 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3879 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3880 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3881 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3882 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3883 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3884 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3885 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3886 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3887 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3888 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3889 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3890 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3891 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3892 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3893 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3894 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
3895 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3896 	/* Odd clown on sil3726/4726 PMPs */
3897 	{ "Config  Disk",	NULL,		ATA_HORKAGE_NODMA |
3898 						ATA_HORKAGE_SKIP_PM },
3899 
3900 	/* Weird ATAPI devices */
3901 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3902 
3903 	/* Devices we expect to fail diagnostics */
3904 
3905 	/* Devices where NCQ should be avoided */
3906 	/* NCQ is slow */
3907 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
3908 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3909 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3910 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3911 	/* NCQ is broken */
3912 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
3913 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
3914 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3915 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
3916 
3917 	/* Blacklist entries taken from Silicon Image 3124/3132
3918 	   Windows driver .inf file - also several Linux problem reports */
3919 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3920 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3921 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3922 
3923 	/* devices which puke on READ_NATIVE_MAX */
3924 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
3925 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3926 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3927 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
3928 
3929 	/* Devices which report 1 sector over size HPA */
3930 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3931 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3932 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3933 
3934 	/* Devices which get the IVB wrong */
3935 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3936 	/* Maybe we should just blacklist TSSTcorp... */
3937 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
3938 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
3939 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
3940 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
3941 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
3942 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
3943 
3944 	/* End Marker */
3945 	{ }
3946 };
3947 
3948 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3949 {
3950 	const char *p;
3951 	int len;
3952 
3953 	/*
3954 	 * check for trailing wildcard: *\0
3955 	 */
3956 	p = strchr(patt, wildchar);
3957 	if (p && ((*(p + 1)) == 0))
3958 		len = p - patt;
3959 	else {
3960 		len = strlen(name);
3961 		if (!len) {
3962 			if (!*patt)
3963 				return 0;
3964 			return -1;
3965 		}
3966 	}
3967 
3968 	return strncmp(patt, name, len);
3969 }
3970 
3971 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3972 {
3973 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
3974 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3975 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3976 
3977 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3978 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3979 
3980 	while (ad->model_num) {
3981 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3982 			if (ad->model_rev == NULL)
3983 				return ad->horkage;
3984 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3985 				return ad->horkage;
3986 		}
3987 		ad++;
3988 	}
3989 	return 0;
3990 }
3991 
3992 static int ata_dma_blacklisted(const struct ata_device *dev)
3993 {
3994 	/* We don't support polling DMA.
3995 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3996 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3997 	 */
3998 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3999 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4000 		return 1;
4001 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4002 }
4003 
4004 /**
4005  *	ata_is_40wire		-	check drive side detection
4006  *	@dev: device
4007  *
4008  *	Perform drive side detection decoding, allowing for device vendors
4009  *	who can't follow the documentation.
4010  */
4011 
4012 static int ata_is_40wire(struct ata_device *dev)
4013 {
4014 	if (dev->horkage & ATA_HORKAGE_IVB)
4015 		return ata_drive_40wire_relaxed(dev->id);
4016 	return ata_drive_40wire(dev->id);
4017 }
4018 
4019 /**
4020  *	cable_is_40wire		-	40/80/SATA decider
4021  *	@ap: port to consider
4022  *
4023  *	This function encapsulates the policy for speed management
4024  *	in one place. At the moment we don't cache the result but
4025  *	there is a good case for setting ap->cbl to the result when
4026  *	we are called with unknown cables (and figuring out if it
4027  *	impacts hotplug at all).
4028  *
4029  *	Return 1 if the cable appears to be 40 wire.
4030  */
4031 
4032 static int cable_is_40wire(struct ata_port *ap)
4033 {
4034 	struct ata_link *link;
4035 	struct ata_device *dev;
4036 
4037 	/* If the controller thinks we are 40 wire, we are */
4038 	if (ap->cbl == ATA_CBL_PATA40)
4039 		return 1;
4040 	/* If the controller thinks we are 80 wire, we are */
4041 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4042 		return 0;
4043 	/* If the system is known to be 40 wire short cable (eg laptop),
4044 	   then we allow 80 wire modes even if the drive isn't sure */
4045 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4046 		return 0;
4047 	/* If the controller doesn't know we scan
4048 
4049 	   - Note: We look for all 40 wire detects at this point.
4050 	     Any 80 wire detect is taken to be 80 wire cable
4051 	     because
4052 	     - In many setups only the one drive (slave if present)
4053                will give a valid detect
4054              - If you have a non detect capable drive you don't
4055                want it to colour the choice
4056         */
4057 	ata_port_for_each_link(link, ap) {
4058 		ata_link_for_each_dev(dev, link) {
4059 			if (!ata_is_40wire(dev))
4060 				return 0;
4061 		}
4062 	}
4063 	return 1;
4064 }
4065 
4066 /**
4067  *	ata_dev_xfermask - Compute supported xfermask of the given device
4068  *	@dev: Device to compute xfermask for
4069  *
4070  *	Compute supported xfermask of @dev and store it in
4071  *	dev->*_mask.  This function is responsible for applying all
4072  *	known limits including host controller limits, device
4073  *	blacklist, etc...
4074  *
4075  *	LOCKING:
4076  *	None.
4077  */
4078 static void ata_dev_xfermask(struct ata_device *dev)
4079 {
4080 	struct ata_link *link = dev->link;
4081 	struct ata_port *ap = link->ap;
4082 	struct ata_host *host = ap->host;
4083 	unsigned long xfer_mask;
4084 
4085 	/* controller modes available */
4086 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4087 				      ap->mwdma_mask, ap->udma_mask);
4088 
4089 	/* drive modes available */
4090 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4091 				       dev->mwdma_mask, dev->udma_mask);
4092 	xfer_mask &= ata_id_xfermask(dev->id);
4093 
4094 	/*
4095 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4096 	 *	cable
4097 	 */
4098 	if (ata_dev_pair(dev)) {
4099 		/* No PIO5 or PIO6 */
4100 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4101 		/* No MWDMA3 or MWDMA 4 */
4102 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4103 	}
4104 
4105 	if (ata_dma_blacklisted(dev)) {
4106 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4107 		ata_dev_printk(dev, KERN_WARNING,
4108 			       "device is on DMA blacklist, disabling DMA\n");
4109 	}
4110 
4111 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4112 	    host->simplex_claimed && host->simplex_claimed != ap) {
4113 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4114 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4115 			       "other device, disabling DMA\n");
4116 	}
4117 
4118 	if (ap->flags & ATA_FLAG_NO_IORDY)
4119 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4120 
4121 	if (ap->ops->mode_filter)
4122 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4123 
4124 	/* Apply cable rule here.  Don't apply it early because when
4125 	 * we handle hot plug the cable type can itself change.
4126 	 * Check this last so that we know if the transfer rate was
4127 	 * solely limited by the cable.
4128 	 * Unknown or 80 wire cables reported host side are checked
4129 	 * drive side as well. Cases where we know a 40wire cable
4130 	 * is used safely for 80 are not checked here.
4131 	 */
4132 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4133 		/* UDMA/44 or higher would be available */
4134 		if (cable_is_40wire(ap)) {
4135 			ata_dev_printk(dev, KERN_WARNING,
4136 				 "limited to UDMA/33 due to 40-wire cable\n");
4137 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4138 		}
4139 
4140 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4141 			    &dev->mwdma_mask, &dev->udma_mask);
4142 }
4143 
4144 /**
4145  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4146  *	@dev: Device to which command will be sent
4147  *
4148  *	Issue SET FEATURES - XFER MODE command to device @dev
4149  *	on port @ap.
4150  *
4151  *	LOCKING:
4152  *	PCI/etc. bus probe sem.
4153  *
4154  *	RETURNS:
4155  *	0 on success, AC_ERR_* mask otherwise.
4156  */
4157 
4158 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4159 {
4160 	struct ata_taskfile tf;
4161 	unsigned int err_mask;
4162 
4163 	/* set up set-features taskfile */
4164 	DPRINTK("set features - xfer mode\n");
4165 
4166 	/* Some controllers and ATAPI devices show flaky interrupt
4167 	 * behavior after setting xfer mode.  Use polling instead.
4168 	 */
4169 	ata_tf_init(dev, &tf);
4170 	tf.command = ATA_CMD_SET_FEATURES;
4171 	tf.feature = SETFEATURES_XFER;
4172 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4173 	tf.protocol = ATA_PROT_NODATA;
4174 	/* If we are using IORDY we must send the mode setting command */
4175 	if (ata_pio_need_iordy(dev))
4176 		tf.nsect = dev->xfer_mode;
4177 	/* If the device has IORDY and the controller does not - turn it off */
4178  	else if (ata_id_has_iordy(dev->id))
4179 		tf.nsect = 0x01;
4180 	else /* In the ancient relic department - skip all of this */
4181 		return 0;
4182 
4183 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4184 
4185 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4186 	return err_mask;
4187 }
4188 /**
4189  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4190  *	@dev: Device to which command will be sent
4191  *	@enable: Whether to enable or disable the feature
4192  *	@feature: The sector count represents the feature to set
4193  *
4194  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4195  *	on port @ap with sector count
4196  *
4197  *	LOCKING:
4198  *	PCI/etc. bus probe sem.
4199  *
4200  *	RETURNS:
4201  *	0 on success, AC_ERR_* mask otherwise.
4202  */
4203 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4204 					u8 feature)
4205 {
4206 	struct ata_taskfile tf;
4207 	unsigned int err_mask;
4208 
4209 	/* set up set-features taskfile */
4210 	DPRINTK("set features - SATA features\n");
4211 
4212 	ata_tf_init(dev, &tf);
4213 	tf.command = ATA_CMD_SET_FEATURES;
4214 	tf.feature = enable;
4215 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4216 	tf.protocol = ATA_PROT_NODATA;
4217 	tf.nsect = feature;
4218 
4219 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4220 
4221 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4222 	return err_mask;
4223 }
4224 
4225 /**
4226  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4227  *	@dev: Device to which command will be sent
4228  *	@heads: Number of heads (taskfile parameter)
4229  *	@sectors: Number of sectors (taskfile parameter)
4230  *
4231  *	LOCKING:
4232  *	Kernel thread context (may sleep)
4233  *
4234  *	RETURNS:
4235  *	0 on success, AC_ERR_* mask otherwise.
4236  */
4237 static unsigned int ata_dev_init_params(struct ata_device *dev,
4238 					u16 heads, u16 sectors)
4239 {
4240 	struct ata_taskfile tf;
4241 	unsigned int err_mask;
4242 
4243 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4244 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4245 		return AC_ERR_INVALID;
4246 
4247 	/* set up init dev params taskfile */
4248 	DPRINTK("init dev params \n");
4249 
4250 	ata_tf_init(dev, &tf);
4251 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4252 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4253 	tf.protocol = ATA_PROT_NODATA;
4254 	tf.nsect = sectors;
4255 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4256 
4257 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4258 	/* A clean abort indicates an original or just out of spec drive
4259 	   and we should continue as we issue the setup based on the
4260 	   drive reported working geometry */
4261 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4262 		err_mask = 0;
4263 
4264 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4265 	return err_mask;
4266 }
4267 
4268 /**
4269  *	ata_sg_clean - Unmap DMA memory associated with command
4270  *	@qc: Command containing DMA memory to be released
4271  *
4272  *	Unmap all mapped DMA memory associated with this command.
4273  *
4274  *	LOCKING:
4275  *	spin_lock_irqsave(host lock)
4276  */
4277 void ata_sg_clean(struct ata_queued_cmd *qc)
4278 {
4279 	struct ata_port *ap = qc->ap;
4280 	struct scatterlist *sg = qc->sg;
4281 	int dir = qc->dma_dir;
4282 
4283 	WARN_ON(sg == NULL);
4284 
4285 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4286 
4287 	if (qc->n_elem)
4288 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4289 
4290 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4291 	qc->sg = NULL;
4292 }
4293 
4294 /**
4295  *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4296  *	@qc: Metadata associated with taskfile to check
4297  *
4298  *	Allow low-level driver to filter ATA PACKET commands, returning
4299  *	a status indicating whether or not it is OK to use DMA for the
4300  *	supplied PACKET command.
4301  *
4302  *	LOCKING:
4303  *	spin_lock_irqsave(host lock)
4304  *
4305  *	RETURNS: 0 when ATAPI DMA can be used
4306  *               nonzero otherwise
4307  */
4308 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4309 {
4310 	struct ata_port *ap = qc->ap;
4311 
4312 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4313 	 * few ATAPI devices choke on such DMA requests.
4314 	 */
4315 	if (unlikely(qc->nbytes & 15))
4316 		return 1;
4317 
4318 	if (ap->ops->check_atapi_dma)
4319 		return ap->ops->check_atapi_dma(qc);
4320 
4321 	return 0;
4322 }
4323 
4324 /**
4325  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4326  *	@qc: ATA command in question
4327  *
4328  *	Non-NCQ commands cannot run with any other command, NCQ or
4329  *	not.  As upper layer only knows the queue depth, we are
4330  *	responsible for maintaining exclusion.  This function checks
4331  *	whether a new command @qc can be issued.
4332  *
4333  *	LOCKING:
4334  *	spin_lock_irqsave(host lock)
4335  *
4336  *	RETURNS:
4337  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4338  */
4339 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4340 {
4341 	struct ata_link *link = qc->dev->link;
4342 
4343 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4344 		if (!ata_tag_valid(link->active_tag))
4345 			return 0;
4346 	} else {
4347 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4348 			return 0;
4349 	}
4350 
4351 	return ATA_DEFER_LINK;
4352 }
4353 
4354 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4355 
4356 /**
4357  *	ata_sg_init - Associate command with scatter-gather table.
4358  *	@qc: Command to be associated
4359  *	@sg: Scatter-gather table.
4360  *	@n_elem: Number of elements in s/g table.
4361  *
4362  *	Initialize the data-related elements of queued_cmd @qc
4363  *	to point to a scatter-gather table @sg, containing @n_elem
4364  *	elements.
4365  *
4366  *	LOCKING:
4367  *	spin_lock_irqsave(host lock)
4368  */
4369 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4370 		 unsigned int n_elem)
4371 {
4372 	qc->sg = sg;
4373 	qc->n_elem = n_elem;
4374 	qc->cursg = qc->sg;
4375 }
4376 
4377 /**
4378  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4379  *	@qc: Command with scatter-gather table to be mapped.
4380  *
4381  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4382  *
4383  *	LOCKING:
4384  *	spin_lock_irqsave(host lock)
4385  *
4386  *	RETURNS:
4387  *	Zero on success, negative on error.
4388  *
4389  */
4390 static int ata_sg_setup(struct ata_queued_cmd *qc)
4391 {
4392 	struct ata_port *ap = qc->ap;
4393 	unsigned int n_elem;
4394 
4395 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4396 
4397 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4398 	if (n_elem < 1)
4399 		return -1;
4400 
4401 	DPRINTK("%d sg elements mapped\n", n_elem);
4402 
4403 	qc->n_elem = n_elem;
4404 	qc->flags |= ATA_QCFLAG_DMAMAP;
4405 
4406 	return 0;
4407 }
4408 
4409 /**
4410  *	swap_buf_le16 - swap halves of 16-bit words in place
4411  *	@buf:  Buffer to swap
4412  *	@buf_words:  Number of 16-bit words in buffer.
4413  *
4414  *	Swap halves of 16-bit words if needed to convert from
4415  *	little-endian byte order to native cpu byte order, or
4416  *	vice-versa.
4417  *
4418  *	LOCKING:
4419  *	Inherited from caller.
4420  */
4421 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4422 {
4423 #ifdef __BIG_ENDIAN
4424 	unsigned int i;
4425 
4426 	for (i = 0; i < buf_words; i++)
4427 		buf[i] = le16_to_cpu(buf[i]);
4428 #endif /* __BIG_ENDIAN */
4429 }
4430 
4431 /**
4432  *	ata_qc_new - Request an available ATA command, for queueing
4433  *	@ap: Port associated with device @dev
4434  *	@dev: Device from whom we request an available command structure
4435  *
4436  *	LOCKING:
4437  *	None.
4438  */
4439 
4440 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4441 {
4442 	struct ata_queued_cmd *qc = NULL;
4443 	unsigned int i;
4444 
4445 	/* no command while frozen */
4446 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4447 		return NULL;
4448 
4449 	/* the last tag is reserved for internal command. */
4450 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4451 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4452 			qc = __ata_qc_from_tag(ap, i);
4453 			break;
4454 		}
4455 
4456 	if (qc)
4457 		qc->tag = i;
4458 
4459 	return qc;
4460 }
4461 
4462 /**
4463  *	ata_qc_new_init - Request an available ATA command, and initialize it
4464  *	@dev: Device from whom we request an available command structure
4465  *
4466  *	LOCKING:
4467  *	None.
4468  */
4469 
4470 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4471 {
4472 	struct ata_port *ap = dev->link->ap;
4473 	struct ata_queued_cmd *qc;
4474 
4475 	qc = ata_qc_new(ap);
4476 	if (qc) {
4477 		qc->scsicmd = NULL;
4478 		qc->ap = ap;
4479 		qc->dev = dev;
4480 
4481 		ata_qc_reinit(qc);
4482 	}
4483 
4484 	return qc;
4485 }
4486 
4487 /**
4488  *	ata_qc_free - free unused ata_queued_cmd
4489  *	@qc: Command to complete
4490  *
4491  *	Designed to free unused ata_queued_cmd object
4492  *	in case something prevents using it.
4493  *
4494  *	LOCKING:
4495  *	spin_lock_irqsave(host lock)
4496  */
4497 void ata_qc_free(struct ata_queued_cmd *qc)
4498 {
4499 	struct ata_port *ap = qc->ap;
4500 	unsigned int tag;
4501 
4502 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4503 
4504 	qc->flags = 0;
4505 	tag = qc->tag;
4506 	if (likely(ata_tag_valid(tag))) {
4507 		qc->tag = ATA_TAG_POISON;
4508 		clear_bit(tag, &ap->qc_allocated);
4509 	}
4510 }
4511 
4512 void __ata_qc_complete(struct ata_queued_cmd *qc)
4513 {
4514 	struct ata_port *ap = qc->ap;
4515 	struct ata_link *link = qc->dev->link;
4516 
4517 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4518 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4519 
4520 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4521 		ata_sg_clean(qc);
4522 
4523 	/* command should be marked inactive atomically with qc completion */
4524 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4525 		link->sactive &= ~(1 << qc->tag);
4526 		if (!link->sactive)
4527 			ap->nr_active_links--;
4528 	} else {
4529 		link->active_tag = ATA_TAG_POISON;
4530 		ap->nr_active_links--;
4531 	}
4532 
4533 	/* clear exclusive status */
4534 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4535 		     ap->excl_link == link))
4536 		ap->excl_link = NULL;
4537 
4538 	/* atapi: mark qc as inactive to prevent the interrupt handler
4539 	 * from completing the command twice later, before the error handler
4540 	 * is called. (when rc != 0 and atapi request sense is needed)
4541 	 */
4542 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4543 	ap->qc_active &= ~(1 << qc->tag);
4544 
4545 	/* call completion callback */
4546 	qc->complete_fn(qc);
4547 }
4548 
4549 static void fill_result_tf(struct ata_queued_cmd *qc)
4550 {
4551 	struct ata_port *ap = qc->ap;
4552 
4553 	qc->result_tf.flags = qc->tf.flags;
4554 	ap->ops->qc_fill_rtf(qc);
4555 }
4556 
4557 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4558 {
4559 	struct ata_device *dev = qc->dev;
4560 
4561 	if (ata_tag_internal(qc->tag))
4562 		return;
4563 
4564 	if (ata_is_nodata(qc->tf.protocol))
4565 		return;
4566 
4567 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4568 		return;
4569 
4570 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4571 }
4572 
4573 /**
4574  *	ata_qc_complete - Complete an active ATA command
4575  *	@qc: Command to complete
4576  *	@err_mask: ATA Status register contents
4577  *
4578  *	Indicate to the mid and upper layers that an ATA
4579  *	command has completed, with either an ok or not-ok status.
4580  *
4581  *	LOCKING:
4582  *	spin_lock_irqsave(host lock)
4583  */
4584 void ata_qc_complete(struct ata_queued_cmd *qc)
4585 {
4586 	struct ata_port *ap = qc->ap;
4587 
4588 	/* XXX: New EH and old EH use different mechanisms to
4589 	 * synchronize EH with regular execution path.
4590 	 *
4591 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4592 	 * Normal execution path is responsible for not accessing a
4593 	 * failed qc.  libata core enforces the rule by returning NULL
4594 	 * from ata_qc_from_tag() for failed qcs.
4595 	 *
4596 	 * Old EH depends on ata_qc_complete() nullifying completion
4597 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4598 	 * not synchronize with interrupt handler.  Only PIO task is
4599 	 * taken care of.
4600 	 */
4601 	if (ap->ops->error_handler) {
4602 		struct ata_device *dev = qc->dev;
4603 		struct ata_eh_info *ehi = &dev->link->eh_info;
4604 
4605 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4606 
4607 		if (unlikely(qc->err_mask))
4608 			qc->flags |= ATA_QCFLAG_FAILED;
4609 
4610 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4611 			if (!ata_tag_internal(qc->tag)) {
4612 				/* always fill result TF for failed qc */
4613 				fill_result_tf(qc);
4614 				ata_qc_schedule_eh(qc);
4615 				return;
4616 			}
4617 		}
4618 
4619 		/* read result TF if requested */
4620 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4621 			fill_result_tf(qc);
4622 
4623 		/* Some commands need post-processing after successful
4624 		 * completion.
4625 		 */
4626 		switch (qc->tf.command) {
4627 		case ATA_CMD_SET_FEATURES:
4628 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4629 			    qc->tf.feature != SETFEATURES_WC_OFF)
4630 				break;
4631 			/* fall through */
4632 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4633 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4634 			/* revalidate device */
4635 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4636 			ata_port_schedule_eh(ap);
4637 			break;
4638 
4639 		case ATA_CMD_SLEEP:
4640 			dev->flags |= ATA_DFLAG_SLEEPING;
4641 			break;
4642 		}
4643 
4644 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4645 			ata_verify_xfer(qc);
4646 
4647 		__ata_qc_complete(qc);
4648 	} else {
4649 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4650 			return;
4651 
4652 		/* read result TF if failed or requested */
4653 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4654 			fill_result_tf(qc);
4655 
4656 		__ata_qc_complete(qc);
4657 	}
4658 }
4659 
4660 /**
4661  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4662  *	@ap: port in question
4663  *	@qc_active: new qc_active mask
4664  *
4665  *	Complete in-flight commands.  This functions is meant to be
4666  *	called from low-level driver's interrupt routine to complete
4667  *	requests normally.  ap->qc_active and @qc_active is compared
4668  *	and commands are completed accordingly.
4669  *
4670  *	LOCKING:
4671  *	spin_lock_irqsave(host lock)
4672  *
4673  *	RETURNS:
4674  *	Number of completed commands on success, -errno otherwise.
4675  */
4676 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4677 {
4678 	int nr_done = 0;
4679 	u32 done_mask;
4680 	int i;
4681 
4682 	done_mask = ap->qc_active ^ qc_active;
4683 
4684 	if (unlikely(done_mask & qc_active)) {
4685 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4686 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4687 		return -EINVAL;
4688 	}
4689 
4690 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4691 		struct ata_queued_cmd *qc;
4692 
4693 		if (!(done_mask & (1 << i)))
4694 			continue;
4695 
4696 		if ((qc = ata_qc_from_tag(ap, i))) {
4697 			ata_qc_complete(qc);
4698 			nr_done++;
4699 		}
4700 	}
4701 
4702 	return nr_done;
4703 }
4704 
4705 /**
4706  *	ata_qc_issue - issue taskfile to device
4707  *	@qc: command to issue to device
4708  *
4709  *	Prepare an ATA command to submission to device.
4710  *	This includes mapping the data into a DMA-able
4711  *	area, filling in the S/G table, and finally
4712  *	writing the taskfile to hardware, starting the command.
4713  *
4714  *	LOCKING:
4715  *	spin_lock_irqsave(host lock)
4716  */
4717 void ata_qc_issue(struct ata_queued_cmd *qc)
4718 {
4719 	struct ata_port *ap = qc->ap;
4720 	struct ata_link *link = qc->dev->link;
4721 	u8 prot = qc->tf.protocol;
4722 
4723 	/* Make sure only one non-NCQ command is outstanding.  The
4724 	 * check is skipped for old EH because it reuses active qc to
4725 	 * request ATAPI sense.
4726 	 */
4727 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4728 
4729 	if (ata_is_ncq(prot)) {
4730 		WARN_ON(link->sactive & (1 << qc->tag));
4731 
4732 		if (!link->sactive)
4733 			ap->nr_active_links++;
4734 		link->sactive |= 1 << qc->tag;
4735 	} else {
4736 		WARN_ON(link->sactive);
4737 
4738 		ap->nr_active_links++;
4739 		link->active_tag = qc->tag;
4740 	}
4741 
4742 	qc->flags |= ATA_QCFLAG_ACTIVE;
4743 	ap->qc_active |= 1 << qc->tag;
4744 
4745 	/* We guarantee to LLDs that they will have at least one
4746 	 * non-zero sg if the command is a data command.
4747 	 */
4748 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4749 
4750 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4751 				 (ap->flags & ATA_FLAG_PIO_DMA)))
4752 		if (ata_sg_setup(qc))
4753 			goto sg_err;
4754 
4755 	/* if device is sleeping, schedule reset and abort the link */
4756 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4757 		link->eh_info.action |= ATA_EH_RESET;
4758 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4759 		ata_link_abort(link);
4760 		return;
4761 	}
4762 
4763 	ap->ops->qc_prep(qc);
4764 
4765 	qc->err_mask |= ap->ops->qc_issue(qc);
4766 	if (unlikely(qc->err_mask))
4767 		goto err;
4768 	return;
4769 
4770 sg_err:
4771 	qc->err_mask |= AC_ERR_SYSTEM;
4772 err:
4773 	ata_qc_complete(qc);
4774 }
4775 
4776 /**
4777  *	sata_scr_valid - test whether SCRs are accessible
4778  *	@link: ATA link to test SCR accessibility for
4779  *
4780  *	Test whether SCRs are accessible for @link.
4781  *
4782  *	LOCKING:
4783  *	None.
4784  *
4785  *	RETURNS:
4786  *	1 if SCRs are accessible, 0 otherwise.
4787  */
4788 int sata_scr_valid(struct ata_link *link)
4789 {
4790 	struct ata_port *ap = link->ap;
4791 
4792 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4793 }
4794 
4795 /**
4796  *	sata_scr_read - read SCR register of the specified port
4797  *	@link: ATA link to read SCR for
4798  *	@reg: SCR to read
4799  *	@val: Place to store read value
4800  *
4801  *	Read SCR register @reg of @link into *@val.  This function is
4802  *	guaranteed to succeed if @link is ap->link, the cable type of
4803  *	the port is SATA and the port implements ->scr_read.
4804  *
4805  *	LOCKING:
4806  *	None if @link is ap->link.  Kernel thread context otherwise.
4807  *
4808  *	RETURNS:
4809  *	0 on success, negative errno on failure.
4810  */
4811 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4812 {
4813 	if (ata_is_host_link(link)) {
4814 		struct ata_port *ap = link->ap;
4815 
4816 		if (sata_scr_valid(link))
4817 			return ap->ops->scr_read(ap, reg, val);
4818 		return -EOPNOTSUPP;
4819 	}
4820 
4821 	return sata_pmp_scr_read(link, reg, val);
4822 }
4823 
4824 /**
4825  *	sata_scr_write - write SCR register of the specified port
4826  *	@link: ATA link to write SCR for
4827  *	@reg: SCR to write
4828  *	@val: value to write
4829  *
4830  *	Write @val to SCR register @reg of @link.  This function is
4831  *	guaranteed to succeed if @link is ap->link, the cable type of
4832  *	the port is SATA and the port implements ->scr_read.
4833  *
4834  *	LOCKING:
4835  *	None if @link is ap->link.  Kernel thread context otherwise.
4836  *
4837  *	RETURNS:
4838  *	0 on success, negative errno on failure.
4839  */
4840 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4841 {
4842 	if (ata_is_host_link(link)) {
4843 		struct ata_port *ap = link->ap;
4844 
4845 		if (sata_scr_valid(link))
4846 			return ap->ops->scr_write(ap, reg, val);
4847 		return -EOPNOTSUPP;
4848 	}
4849 
4850 	return sata_pmp_scr_write(link, reg, val);
4851 }
4852 
4853 /**
4854  *	sata_scr_write_flush - write SCR register of the specified port and flush
4855  *	@link: ATA link to write SCR for
4856  *	@reg: SCR to write
4857  *	@val: value to write
4858  *
4859  *	This function is identical to sata_scr_write() except that this
4860  *	function performs flush after writing to the register.
4861  *
4862  *	LOCKING:
4863  *	None if @link is ap->link.  Kernel thread context otherwise.
4864  *
4865  *	RETURNS:
4866  *	0 on success, negative errno on failure.
4867  */
4868 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4869 {
4870 	if (ata_is_host_link(link)) {
4871 		struct ata_port *ap = link->ap;
4872 		int rc;
4873 
4874 		if (sata_scr_valid(link)) {
4875 			rc = ap->ops->scr_write(ap, reg, val);
4876 			if (rc == 0)
4877 				rc = ap->ops->scr_read(ap, reg, &val);
4878 			return rc;
4879 		}
4880 		return -EOPNOTSUPP;
4881 	}
4882 
4883 	return sata_pmp_scr_write(link, reg, val);
4884 }
4885 
4886 /**
4887  *	ata_link_online - test whether the given link is online
4888  *	@link: ATA link to test
4889  *
4890  *	Test whether @link is online.  Note that this function returns
4891  *	0 if online status of @link cannot be obtained, so
4892  *	ata_link_online(link) != !ata_link_offline(link).
4893  *
4894  *	LOCKING:
4895  *	None.
4896  *
4897  *	RETURNS:
4898  *	1 if the port online status is available and online.
4899  */
4900 int ata_link_online(struct ata_link *link)
4901 {
4902 	u32 sstatus;
4903 
4904 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4905 	    (sstatus & 0xf) == 0x3)
4906 		return 1;
4907 	return 0;
4908 }
4909 
4910 /**
4911  *	ata_link_offline - test whether the given link is offline
4912  *	@link: ATA link to test
4913  *
4914  *	Test whether @link is offline.  Note that this function
4915  *	returns 0 if offline status of @link cannot be obtained, so
4916  *	ata_link_online(link) != !ata_link_offline(link).
4917  *
4918  *	LOCKING:
4919  *	None.
4920  *
4921  *	RETURNS:
4922  *	1 if the port offline status is available and offline.
4923  */
4924 int ata_link_offline(struct ata_link *link)
4925 {
4926 	u32 sstatus;
4927 
4928 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4929 	    (sstatus & 0xf) != 0x3)
4930 		return 1;
4931 	return 0;
4932 }
4933 
4934 #ifdef CONFIG_PM
4935 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4936 			       unsigned int action, unsigned int ehi_flags,
4937 			       int wait)
4938 {
4939 	unsigned long flags;
4940 	int i, rc;
4941 
4942 	for (i = 0; i < host->n_ports; i++) {
4943 		struct ata_port *ap = host->ports[i];
4944 		struct ata_link *link;
4945 
4946 		/* Previous resume operation might still be in
4947 		 * progress.  Wait for PM_PENDING to clear.
4948 		 */
4949 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4950 			ata_port_wait_eh(ap);
4951 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4952 		}
4953 
4954 		/* request PM ops to EH */
4955 		spin_lock_irqsave(ap->lock, flags);
4956 
4957 		ap->pm_mesg = mesg;
4958 		if (wait) {
4959 			rc = 0;
4960 			ap->pm_result = &rc;
4961 		}
4962 
4963 		ap->pflags |= ATA_PFLAG_PM_PENDING;
4964 		__ata_port_for_each_link(link, ap) {
4965 			link->eh_info.action |= action;
4966 			link->eh_info.flags |= ehi_flags;
4967 		}
4968 
4969 		ata_port_schedule_eh(ap);
4970 
4971 		spin_unlock_irqrestore(ap->lock, flags);
4972 
4973 		/* wait and check result */
4974 		if (wait) {
4975 			ata_port_wait_eh(ap);
4976 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4977 			if (rc)
4978 				return rc;
4979 		}
4980 	}
4981 
4982 	return 0;
4983 }
4984 
4985 /**
4986  *	ata_host_suspend - suspend host
4987  *	@host: host to suspend
4988  *	@mesg: PM message
4989  *
4990  *	Suspend @host.  Actual operation is performed by EH.  This
4991  *	function requests EH to perform PM operations and waits for EH
4992  *	to finish.
4993  *
4994  *	LOCKING:
4995  *	Kernel thread context (may sleep).
4996  *
4997  *	RETURNS:
4998  *	0 on success, -errno on failure.
4999  */
5000 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5001 {
5002 	int rc;
5003 
5004 	/*
5005 	 * disable link pm on all ports before requesting
5006 	 * any pm activity
5007 	 */
5008 	ata_lpm_enable(host);
5009 
5010 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5011 	if (rc == 0)
5012 		host->dev->power.power_state = mesg;
5013 	return rc;
5014 }
5015 
5016 /**
5017  *	ata_host_resume - resume host
5018  *	@host: host to resume
5019  *
5020  *	Resume @host.  Actual operation is performed by EH.  This
5021  *	function requests EH to perform PM operations and returns.
5022  *	Note that all resume operations are performed parallely.
5023  *
5024  *	LOCKING:
5025  *	Kernel thread context (may sleep).
5026  */
5027 void ata_host_resume(struct ata_host *host)
5028 {
5029 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5030 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5031 	host->dev->power.power_state = PMSG_ON;
5032 
5033 	/* reenable link pm */
5034 	ata_lpm_disable(host);
5035 }
5036 #endif
5037 
5038 /**
5039  *	ata_port_start - Set port up for dma.
5040  *	@ap: Port to initialize
5041  *
5042  *	Called just after data structures for each port are
5043  *	initialized.  Allocates space for PRD table.
5044  *
5045  *	May be used as the port_start() entry in ata_port_operations.
5046  *
5047  *	LOCKING:
5048  *	Inherited from caller.
5049  */
5050 int ata_port_start(struct ata_port *ap)
5051 {
5052 	struct device *dev = ap->dev;
5053 
5054 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5055 				      GFP_KERNEL);
5056 	if (!ap->prd)
5057 		return -ENOMEM;
5058 
5059 	return 0;
5060 }
5061 
5062 /**
5063  *	ata_dev_init - Initialize an ata_device structure
5064  *	@dev: Device structure to initialize
5065  *
5066  *	Initialize @dev in preparation for probing.
5067  *
5068  *	LOCKING:
5069  *	Inherited from caller.
5070  */
5071 void ata_dev_init(struct ata_device *dev)
5072 {
5073 	struct ata_link *link = dev->link;
5074 	struct ata_port *ap = link->ap;
5075 	unsigned long flags;
5076 
5077 	/* SATA spd limit is bound to the first device */
5078 	link->sata_spd_limit = link->hw_sata_spd_limit;
5079 	link->sata_spd = 0;
5080 
5081 	/* High bits of dev->flags are used to record warm plug
5082 	 * requests which occur asynchronously.  Synchronize using
5083 	 * host lock.
5084 	 */
5085 	spin_lock_irqsave(ap->lock, flags);
5086 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5087 	dev->horkage = 0;
5088 	spin_unlock_irqrestore(ap->lock, flags);
5089 
5090 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5091 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5092 	dev->pio_mask = UINT_MAX;
5093 	dev->mwdma_mask = UINT_MAX;
5094 	dev->udma_mask = UINT_MAX;
5095 }
5096 
5097 /**
5098  *	ata_link_init - Initialize an ata_link structure
5099  *	@ap: ATA port link is attached to
5100  *	@link: Link structure to initialize
5101  *	@pmp: Port multiplier port number
5102  *
5103  *	Initialize @link.
5104  *
5105  *	LOCKING:
5106  *	Kernel thread context (may sleep)
5107  */
5108 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5109 {
5110 	int i;
5111 
5112 	/* clear everything except for devices */
5113 	memset(link, 0, offsetof(struct ata_link, device[0]));
5114 
5115 	link->ap = ap;
5116 	link->pmp = pmp;
5117 	link->active_tag = ATA_TAG_POISON;
5118 	link->hw_sata_spd_limit = UINT_MAX;
5119 
5120 	/* can't use iterator, ap isn't initialized yet */
5121 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5122 		struct ata_device *dev = &link->device[i];
5123 
5124 		dev->link = link;
5125 		dev->devno = dev - link->device;
5126 		ata_dev_init(dev);
5127 	}
5128 }
5129 
5130 /**
5131  *	sata_link_init_spd - Initialize link->sata_spd_limit
5132  *	@link: Link to configure sata_spd_limit for
5133  *
5134  *	Initialize @link->[hw_]sata_spd_limit to the currently
5135  *	configured value.
5136  *
5137  *	LOCKING:
5138  *	Kernel thread context (may sleep).
5139  *
5140  *	RETURNS:
5141  *	0 on success, -errno on failure.
5142  */
5143 int sata_link_init_spd(struct ata_link *link)
5144 {
5145 	u32 scontrol;
5146 	u8 spd;
5147 	int rc;
5148 
5149 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5150 	if (rc)
5151 		return rc;
5152 
5153 	spd = (scontrol >> 4) & 0xf;
5154 	if (spd)
5155 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5156 
5157 	ata_force_spd_limit(link);
5158 
5159 	link->sata_spd_limit = link->hw_sata_spd_limit;
5160 
5161 	return 0;
5162 }
5163 
5164 /**
5165  *	ata_port_alloc - allocate and initialize basic ATA port resources
5166  *	@host: ATA host this allocated port belongs to
5167  *
5168  *	Allocate and initialize basic ATA port resources.
5169  *
5170  *	RETURNS:
5171  *	Allocate ATA port on success, NULL on failure.
5172  *
5173  *	LOCKING:
5174  *	Inherited from calling layer (may sleep).
5175  */
5176 struct ata_port *ata_port_alloc(struct ata_host *host)
5177 {
5178 	struct ata_port *ap;
5179 
5180 	DPRINTK("ENTER\n");
5181 
5182 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5183 	if (!ap)
5184 		return NULL;
5185 
5186 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5187 	ap->lock = &host->lock;
5188 	ap->flags = ATA_FLAG_DISABLED;
5189 	ap->print_id = -1;
5190 	ap->ctl = ATA_DEVCTL_OBS;
5191 	ap->host = host;
5192 	ap->dev = host->dev;
5193 	ap->last_ctl = 0xFF;
5194 
5195 #if defined(ATA_VERBOSE_DEBUG)
5196 	/* turn on all debugging levels */
5197 	ap->msg_enable = 0x00FF;
5198 #elif defined(ATA_DEBUG)
5199 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5200 #else
5201 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5202 #endif
5203 
5204 #ifdef CONFIG_ATA_SFF
5205 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5206 #endif
5207 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5208 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5209 	INIT_LIST_HEAD(&ap->eh_done_q);
5210 	init_waitqueue_head(&ap->eh_wait_q);
5211 	init_timer_deferrable(&ap->fastdrain_timer);
5212 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5213 	ap->fastdrain_timer.data = (unsigned long)ap;
5214 
5215 	ap->cbl = ATA_CBL_NONE;
5216 
5217 	ata_link_init(ap, &ap->link, 0);
5218 
5219 #ifdef ATA_IRQ_TRAP
5220 	ap->stats.unhandled_irq = 1;
5221 	ap->stats.idle_irq = 1;
5222 #endif
5223 	return ap;
5224 }
5225 
5226 static void ata_host_release(struct device *gendev, void *res)
5227 {
5228 	struct ata_host *host = dev_get_drvdata(gendev);
5229 	int i;
5230 
5231 	for (i = 0; i < host->n_ports; i++) {
5232 		struct ata_port *ap = host->ports[i];
5233 
5234 		if (!ap)
5235 			continue;
5236 
5237 		if (ap->scsi_host)
5238 			scsi_host_put(ap->scsi_host);
5239 
5240 		kfree(ap->pmp_link);
5241 		kfree(ap);
5242 		host->ports[i] = NULL;
5243 	}
5244 
5245 	dev_set_drvdata(gendev, NULL);
5246 }
5247 
5248 /**
5249  *	ata_host_alloc - allocate and init basic ATA host resources
5250  *	@dev: generic device this host is associated with
5251  *	@max_ports: maximum number of ATA ports associated with this host
5252  *
5253  *	Allocate and initialize basic ATA host resources.  LLD calls
5254  *	this function to allocate a host, initializes it fully and
5255  *	attaches it using ata_host_register().
5256  *
5257  *	@max_ports ports are allocated and host->n_ports is
5258  *	initialized to @max_ports.  The caller is allowed to decrease
5259  *	host->n_ports before calling ata_host_register().  The unused
5260  *	ports will be automatically freed on registration.
5261  *
5262  *	RETURNS:
5263  *	Allocate ATA host on success, NULL on failure.
5264  *
5265  *	LOCKING:
5266  *	Inherited from calling layer (may sleep).
5267  */
5268 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5269 {
5270 	struct ata_host *host;
5271 	size_t sz;
5272 	int i;
5273 
5274 	DPRINTK("ENTER\n");
5275 
5276 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5277 		return NULL;
5278 
5279 	/* alloc a container for our list of ATA ports (buses) */
5280 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5281 	/* alloc a container for our list of ATA ports (buses) */
5282 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5283 	if (!host)
5284 		goto err_out;
5285 
5286 	devres_add(dev, host);
5287 	dev_set_drvdata(dev, host);
5288 
5289 	spin_lock_init(&host->lock);
5290 	host->dev = dev;
5291 	host->n_ports = max_ports;
5292 
5293 	/* allocate ports bound to this host */
5294 	for (i = 0; i < max_ports; i++) {
5295 		struct ata_port *ap;
5296 
5297 		ap = ata_port_alloc(host);
5298 		if (!ap)
5299 			goto err_out;
5300 
5301 		ap->port_no = i;
5302 		host->ports[i] = ap;
5303 	}
5304 
5305 	devres_remove_group(dev, NULL);
5306 	return host;
5307 
5308  err_out:
5309 	devres_release_group(dev, NULL);
5310 	return NULL;
5311 }
5312 
5313 /**
5314  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5315  *	@dev: generic device this host is associated with
5316  *	@ppi: array of ATA port_info to initialize host with
5317  *	@n_ports: number of ATA ports attached to this host
5318  *
5319  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5320  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5321  *	last entry will be used for the remaining ports.
5322  *
5323  *	RETURNS:
5324  *	Allocate ATA host on success, NULL on failure.
5325  *
5326  *	LOCKING:
5327  *	Inherited from calling layer (may sleep).
5328  */
5329 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5330 				      const struct ata_port_info * const * ppi,
5331 				      int n_ports)
5332 {
5333 	const struct ata_port_info *pi;
5334 	struct ata_host *host;
5335 	int i, j;
5336 
5337 	host = ata_host_alloc(dev, n_ports);
5338 	if (!host)
5339 		return NULL;
5340 
5341 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5342 		struct ata_port *ap = host->ports[i];
5343 
5344 		if (ppi[j])
5345 			pi = ppi[j++];
5346 
5347 		ap->pio_mask = pi->pio_mask;
5348 		ap->mwdma_mask = pi->mwdma_mask;
5349 		ap->udma_mask = pi->udma_mask;
5350 		ap->flags |= pi->flags;
5351 		ap->link.flags |= pi->link_flags;
5352 		ap->ops = pi->port_ops;
5353 
5354 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5355 			host->ops = pi->port_ops;
5356 	}
5357 
5358 	return host;
5359 }
5360 
5361 static void ata_host_stop(struct device *gendev, void *res)
5362 {
5363 	struct ata_host *host = dev_get_drvdata(gendev);
5364 	int i;
5365 
5366 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5367 
5368 	for (i = 0; i < host->n_ports; i++) {
5369 		struct ata_port *ap = host->ports[i];
5370 
5371 		if (ap->ops->port_stop)
5372 			ap->ops->port_stop(ap);
5373 	}
5374 
5375 	if (host->ops->host_stop)
5376 		host->ops->host_stop(host);
5377 }
5378 
5379 /**
5380  *	ata_finalize_port_ops - finalize ata_port_operations
5381  *	@ops: ata_port_operations to finalize
5382  *
5383  *	An ata_port_operations can inherit from another ops and that
5384  *	ops can again inherit from another.  This can go on as many
5385  *	times as necessary as long as there is no loop in the
5386  *	inheritance chain.
5387  *
5388  *	Ops tables are finalized when the host is started.  NULL or
5389  *	unspecified entries are inherited from the closet ancestor
5390  *	which has the method and the entry is populated with it.
5391  *	After finalization, the ops table directly points to all the
5392  *	methods and ->inherits is no longer necessary and cleared.
5393  *
5394  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5395  *
5396  *	LOCKING:
5397  *	None.
5398  */
5399 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5400 {
5401 	static spinlock_t lock = SPIN_LOCK_UNLOCKED;
5402 	const struct ata_port_operations *cur;
5403 	void **begin = (void **)ops;
5404 	void **end = (void **)&ops->inherits;
5405 	void **pp;
5406 
5407 	if (!ops || !ops->inherits)
5408 		return;
5409 
5410 	spin_lock(&lock);
5411 
5412 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5413 		void **inherit = (void **)cur;
5414 
5415 		for (pp = begin; pp < end; pp++, inherit++)
5416 			if (!*pp)
5417 				*pp = *inherit;
5418 	}
5419 
5420 	for (pp = begin; pp < end; pp++)
5421 		if (IS_ERR(*pp))
5422 			*pp = NULL;
5423 
5424 	ops->inherits = NULL;
5425 
5426 	spin_unlock(&lock);
5427 }
5428 
5429 /**
5430  *	ata_host_start - start and freeze ports of an ATA host
5431  *	@host: ATA host to start ports for
5432  *
5433  *	Start and then freeze ports of @host.  Started status is
5434  *	recorded in host->flags, so this function can be called
5435  *	multiple times.  Ports are guaranteed to get started only
5436  *	once.  If host->ops isn't initialized yet, its set to the
5437  *	first non-dummy port ops.
5438  *
5439  *	LOCKING:
5440  *	Inherited from calling layer (may sleep).
5441  *
5442  *	RETURNS:
5443  *	0 if all ports are started successfully, -errno otherwise.
5444  */
5445 int ata_host_start(struct ata_host *host)
5446 {
5447 	int have_stop = 0;
5448 	void *start_dr = NULL;
5449 	int i, rc;
5450 
5451 	if (host->flags & ATA_HOST_STARTED)
5452 		return 0;
5453 
5454 	ata_finalize_port_ops(host->ops);
5455 
5456 	for (i = 0; i < host->n_ports; i++) {
5457 		struct ata_port *ap = host->ports[i];
5458 
5459 		ata_finalize_port_ops(ap->ops);
5460 
5461 		if (!host->ops && !ata_port_is_dummy(ap))
5462 			host->ops = ap->ops;
5463 
5464 		if (ap->ops->port_stop)
5465 			have_stop = 1;
5466 	}
5467 
5468 	if (host->ops->host_stop)
5469 		have_stop = 1;
5470 
5471 	if (have_stop) {
5472 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5473 		if (!start_dr)
5474 			return -ENOMEM;
5475 	}
5476 
5477 	for (i = 0; i < host->n_ports; i++) {
5478 		struct ata_port *ap = host->ports[i];
5479 
5480 		if (ap->ops->port_start) {
5481 			rc = ap->ops->port_start(ap);
5482 			if (rc) {
5483 				if (rc != -ENODEV)
5484 					dev_printk(KERN_ERR, host->dev,
5485 						"failed to start port %d "
5486 						"(errno=%d)\n", i, rc);
5487 				goto err_out;
5488 			}
5489 		}
5490 		ata_eh_freeze_port(ap);
5491 	}
5492 
5493 	if (start_dr)
5494 		devres_add(host->dev, start_dr);
5495 	host->flags |= ATA_HOST_STARTED;
5496 	return 0;
5497 
5498  err_out:
5499 	while (--i >= 0) {
5500 		struct ata_port *ap = host->ports[i];
5501 
5502 		if (ap->ops->port_stop)
5503 			ap->ops->port_stop(ap);
5504 	}
5505 	devres_free(start_dr);
5506 	return rc;
5507 }
5508 
5509 /**
5510  *	ata_sas_host_init - Initialize a host struct
5511  *	@host:	host to initialize
5512  *	@dev:	device host is attached to
5513  *	@flags:	host flags
5514  *	@ops:	port_ops
5515  *
5516  *	LOCKING:
5517  *	PCI/etc. bus probe sem.
5518  *
5519  */
5520 /* KILLME - the only user left is ipr */
5521 void ata_host_init(struct ata_host *host, struct device *dev,
5522 		   unsigned long flags, struct ata_port_operations *ops)
5523 {
5524 	spin_lock_init(&host->lock);
5525 	host->dev = dev;
5526 	host->flags = flags;
5527 	host->ops = ops;
5528 }
5529 
5530 /**
5531  *	ata_host_register - register initialized ATA host
5532  *	@host: ATA host to register
5533  *	@sht: template for SCSI host
5534  *
5535  *	Register initialized ATA host.  @host is allocated using
5536  *	ata_host_alloc() and fully initialized by LLD.  This function
5537  *	starts ports, registers @host with ATA and SCSI layers and
5538  *	probe registered devices.
5539  *
5540  *	LOCKING:
5541  *	Inherited from calling layer (may sleep).
5542  *
5543  *	RETURNS:
5544  *	0 on success, -errno otherwise.
5545  */
5546 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5547 {
5548 	int i, rc;
5549 
5550 	/* host must have been started */
5551 	if (!(host->flags & ATA_HOST_STARTED)) {
5552 		dev_printk(KERN_ERR, host->dev,
5553 			   "BUG: trying to register unstarted host\n");
5554 		WARN_ON(1);
5555 		return -EINVAL;
5556 	}
5557 
5558 	/* Blow away unused ports.  This happens when LLD can't
5559 	 * determine the exact number of ports to allocate at
5560 	 * allocation time.
5561 	 */
5562 	for (i = host->n_ports; host->ports[i]; i++)
5563 		kfree(host->ports[i]);
5564 
5565 	/* give ports names and add SCSI hosts */
5566 	for (i = 0; i < host->n_ports; i++)
5567 		host->ports[i]->print_id = ata_print_id++;
5568 
5569 	rc = ata_scsi_add_hosts(host, sht);
5570 	if (rc)
5571 		return rc;
5572 
5573 	/* associate with ACPI nodes */
5574 	ata_acpi_associate(host);
5575 
5576 	/* set cable, sata_spd_limit and report */
5577 	for (i = 0; i < host->n_ports; i++) {
5578 		struct ata_port *ap = host->ports[i];
5579 		unsigned long xfer_mask;
5580 
5581 		/* set SATA cable type if still unset */
5582 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5583 			ap->cbl = ATA_CBL_SATA;
5584 
5585 		/* init sata_spd_limit to the current value */
5586 		sata_link_init_spd(&ap->link);
5587 
5588 		/* print per-port info to dmesg */
5589 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5590 					      ap->udma_mask);
5591 
5592 		if (!ata_port_is_dummy(ap)) {
5593 			ata_port_printk(ap, KERN_INFO,
5594 					"%cATA max %s %s\n",
5595 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5596 					ata_mode_string(xfer_mask),
5597 					ap->link.eh_info.desc);
5598 			ata_ehi_clear_desc(&ap->link.eh_info);
5599 		} else
5600 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5601 	}
5602 
5603 	/* perform each probe synchronously */
5604 	DPRINTK("probe begin\n");
5605 	for (i = 0; i < host->n_ports; i++) {
5606 		struct ata_port *ap = host->ports[i];
5607 
5608 		/* probe */
5609 		if (ap->ops->error_handler) {
5610 			struct ata_eh_info *ehi = &ap->link.eh_info;
5611 			unsigned long flags;
5612 
5613 			ata_port_probe(ap);
5614 
5615 			/* kick EH for boot probing */
5616 			spin_lock_irqsave(ap->lock, flags);
5617 
5618 			ehi->probe_mask |= ATA_ALL_DEVICES;
5619 			ehi->action |= ATA_EH_RESET;
5620 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5621 
5622 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5623 			ap->pflags |= ATA_PFLAG_LOADING;
5624 			ata_port_schedule_eh(ap);
5625 
5626 			spin_unlock_irqrestore(ap->lock, flags);
5627 
5628 			/* wait for EH to finish */
5629 			ata_port_wait_eh(ap);
5630 		} else {
5631 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5632 			rc = ata_bus_probe(ap);
5633 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
5634 
5635 			if (rc) {
5636 				/* FIXME: do something useful here?
5637 				 * Current libata behavior will
5638 				 * tear down everything when
5639 				 * the module is removed
5640 				 * or the h/w is unplugged.
5641 				 */
5642 			}
5643 		}
5644 	}
5645 
5646 	/* probes are done, now scan each port's disk(s) */
5647 	DPRINTK("host probe begin\n");
5648 	for (i = 0; i < host->n_ports; i++) {
5649 		struct ata_port *ap = host->ports[i];
5650 
5651 		ata_scsi_scan_host(ap, 1);
5652 		ata_lpm_schedule(ap, ap->pm_policy);
5653 	}
5654 
5655 	return 0;
5656 }
5657 
5658 /**
5659  *	ata_host_activate - start host, request IRQ and register it
5660  *	@host: target ATA host
5661  *	@irq: IRQ to request
5662  *	@irq_handler: irq_handler used when requesting IRQ
5663  *	@irq_flags: irq_flags used when requesting IRQ
5664  *	@sht: scsi_host_template to use when registering the host
5665  *
5666  *	After allocating an ATA host and initializing it, most libata
5667  *	LLDs perform three steps to activate the host - start host,
5668  *	request IRQ and register it.  This helper takes necessasry
5669  *	arguments and performs the three steps in one go.
5670  *
5671  *	An invalid IRQ skips the IRQ registration and expects the host to
5672  *	have set polling mode on the port. In this case, @irq_handler
5673  *	should be NULL.
5674  *
5675  *	LOCKING:
5676  *	Inherited from calling layer (may sleep).
5677  *
5678  *	RETURNS:
5679  *	0 on success, -errno otherwise.
5680  */
5681 int ata_host_activate(struct ata_host *host, int irq,
5682 		      irq_handler_t irq_handler, unsigned long irq_flags,
5683 		      struct scsi_host_template *sht)
5684 {
5685 	int i, rc;
5686 
5687 	rc = ata_host_start(host);
5688 	if (rc)
5689 		return rc;
5690 
5691 	/* Special case for polling mode */
5692 	if (!irq) {
5693 		WARN_ON(irq_handler);
5694 		return ata_host_register(host, sht);
5695 	}
5696 
5697 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5698 			      dev_driver_string(host->dev), host);
5699 	if (rc)
5700 		return rc;
5701 
5702 	for (i = 0; i < host->n_ports; i++)
5703 		ata_port_desc(host->ports[i], "irq %d", irq);
5704 
5705 	rc = ata_host_register(host, sht);
5706 	/* if failed, just free the IRQ and leave ports alone */
5707 	if (rc)
5708 		devm_free_irq(host->dev, irq, host);
5709 
5710 	return rc;
5711 }
5712 
5713 /**
5714  *	ata_port_detach - Detach ATA port in prepration of device removal
5715  *	@ap: ATA port to be detached
5716  *
5717  *	Detach all ATA devices and the associated SCSI devices of @ap;
5718  *	then, remove the associated SCSI host.  @ap is guaranteed to
5719  *	be quiescent on return from this function.
5720  *
5721  *	LOCKING:
5722  *	Kernel thread context (may sleep).
5723  */
5724 static void ata_port_detach(struct ata_port *ap)
5725 {
5726 	unsigned long flags;
5727 	struct ata_link *link;
5728 	struct ata_device *dev;
5729 
5730 	if (!ap->ops->error_handler)
5731 		goto skip_eh;
5732 
5733 	/* tell EH we're leaving & flush EH */
5734 	spin_lock_irqsave(ap->lock, flags);
5735 	ap->pflags |= ATA_PFLAG_UNLOADING;
5736 	spin_unlock_irqrestore(ap->lock, flags);
5737 
5738 	ata_port_wait_eh(ap);
5739 
5740 	/* EH is now guaranteed to see UNLOADING - EH context belongs
5741 	 * to us.  Disable all existing devices.
5742 	 */
5743 	ata_port_for_each_link(link, ap) {
5744 		ata_link_for_each_dev(dev, link)
5745 			ata_dev_disable(dev);
5746 	}
5747 
5748 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
5749 	 * will be skipped and retrials will be terminated with bad
5750 	 * target.
5751 	 */
5752 	spin_lock_irqsave(ap->lock, flags);
5753 	ata_port_freeze(ap);	/* won't be thawed */
5754 	spin_unlock_irqrestore(ap->lock, flags);
5755 
5756 	ata_port_wait_eh(ap);
5757 	cancel_rearming_delayed_work(&ap->hotplug_task);
5758 
5759  skip_eh:
5760 	/* remove the associated SCSI host */
5761 	scsi_remove_host(ap->scsi_host);
5762 }
5763 
5764 /**
5765  *	ata_host_detach - Detach all ports of an ATA host
5766  *	@host: Host to detach
5767  *
5768  *	Detach all ports of @host.
5769  *
5770  *	LOCKING:
5771  *	Kernel thread context (may sleep).
5772  */
5773 void ata_host_detach(struct ata_host *host)
5774 {
5775 	int i;
5776 
5777 	for (i = 0; i < host->n_ports; i++)
5778 		ata_port_detach(host->ports[i]);
5779 
5780 	/* the host is dead now, dissociate ACPI */
5781 	ata_acpi_dissociate(host);
5782 }
5783 
5784 #ifdef CONFIG_PCI
5785 
5786 /**
5787  *	ata_pci_remove_one - PCI layer callback for device removal
5788  *	@pdev: PCI device that was removed
5789  *
5790  *	PCI layer indicates to libata via this hook that hot-unplug or
5791  *	module unload event has occurred.  Detach all ports.  Resource
5792  *	release is handled via devres.
5793  *
5794  *	LOCKING:
5795  *	Inherited from PCI layer (may sleep).
5796  */
5797 void ata_pci_remove_one(struct pci_dev *pdev)
5798 {
5799 	struct device *dev = &pdev->dev;
5800 	struct ata_host *host = dev_get_drvdata(dev);
5801 
5802 	ata_host_detach(host);
5803 }
5804 
5805 /* move to PCI subsystem */
5806 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5807 {
5808 	unsigned long tmp = 0;
5809 
5810 	switch (bits->width) {
5811 	case 1: {
5812 		u8 tmp8 = 0;
5813 		pci_read_config_byte(pdev, bits->reg, &tmp8);
5814 		tmp = tmp8;
5815 		break;
5816 	}
5817 	case 2: {
5818 		u16 tmp16 = 0;
5819 		pci_read_config_word(pdev, bits->reg, &tmp16);
5820 		tmp = tmp16;
5821 		break;
5822 	}
5823 	case 4: {
5824 		u32 tmp32 = 0;
5825 		pci_read_config_dword(pdev, bits->reg, &tmp32);
5826 		tmp = tmp32;
5827 		break;
5828 	}
5829 
5830 	default:
5831 		return -EINVAL;
5832 	}
5833 
5834 	tmp &= bits->mask;
5835 
5836 	return (tmp == bits->val) ? 1 : 0;
5837 }
5838 
5839 #ifdef CONFIG_PM
5840 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5841 {
5842 	pci_save_state(pdev);
5843 	pci_disable_device(pdev);
5844 
5845 	if (mesg.event & PM_EVENT_SLEEP)
5846 		pci_set_power_state(pdev, PCI_D3hot);
5847 }
5848 
5849 int ata_pci_device_do_resume(struct pci_dev *pdev)
5850 {
5851 	int rc;
5852 
5853 	pci_set_power_state(pdev, PCI_D0);
5854 	pci_restore_state(pdev);
5855 
5856 	rc = pcim_enable_device(pdev);
5857 	if (rc) {
5858 		dev_printk(KERN_ERR, &pdev->dev,
5859 			   "failed to enable device after resume (%d)\n", rc);
5860 		return rc;
5861 	}
5862 
5863 	pci_set_master(pdev);
5864 	return 0;
5865 }
5866 
5867 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5868 {
5869 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5870 	int rc = 0;
5871 
5872 	rc = ata_host_suspend(host, mesg);
5873 	if (rc)
5874 		return rc;
5875 
5876 	ata_pci_device_do_suspend(pdev, mesg);
5877 
5878 	return 0;
5879 }
5880 
5881 int ata_pci_device_resume(struct pci_dev *pdev)
5882 {
5883 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5884 	int rc;
5885 
5886 	rc = ata_pci_device_do_resume(pdev);
5887 	if (rc == 0)
5888 		ata_host_resume(host);
5889 	return rc;
5890 }
5891 #endif /* CONFIG_PM */
5892 
5893 #endif /* CONFIG_PCI */
5894 
5895 static int __init ata_parse_force_one(char **cur,
5896 				      struct ata_force_ent *force_ent,
5897 				      const char **reason)
5898 {
5899 	/* FIXME: Currently, there's no way to tag init const data and
5900 	 * using __initdata causes build failure on some versions of
5901 	 * gcc.  Once __initdataconst is implemented, add const to the
5902 	 * following structure.
5903 	 */
5904 	static struct ata_force_param force_tbl[] __initdata = {
5905 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
5906 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
5907 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
5908 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
5909 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
5910 		{ "sata",	.cbl		= ATA_CBL_SATA },
5911 		{ "1.5Gbps",	.spd_limit	= 1 },
5912 		{ "3.0Gbps",	.spd_limit	= 2 },
5913 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
5914 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
5915 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
5916 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
5917 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
5918 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
5919 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
5920 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
5921 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
5922 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
5923 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
5924 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
5925 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
5926 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
5927 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5928 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5929 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5930 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5931 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5932 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5933 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5934 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5935 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5936 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5937 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5938 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5939 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5940 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5941 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5942 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5943 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5944 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5945 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5946 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5947 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5948 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
5949 	};
5950 	char *start = *cur, *p = *cur;
5951 	char *id, *val, *endp;
5952 	const struct ata_force_param *match_fp = NULL;
5953 	int nr_matches = 0, i;
5954 
5955 	/* find where this param ends and update *cur */
5956 	while (*p != '\0' && *p != ',')
5957 		p++;
5958 
5959 	if (*p == '\0')
5960 		*cur = p;
5961 	else
5962 		*cur = p + 1;
5963 
5964 	*p = '\0';
5965 
5966 	/* parse */
5967 	p = strchr(start, ':');
5968 	if (!p) {
5969 		val = strstrip(start);
5970 		goto parse_val;
5971 	}
5972 	*p = '\0';
5973 
5974 	id = strstrip(start);
5975 	val = strstrip(p + 1);
5976 
5977 	/* parse id */
5978 	p = strchr(id, '.');
5979 	if (p) {
5980 		*p++ = '\0';
5981 		force_ent->device = simple_strtoul(p, &endp, 10);
5982 		if (p == endp || *endp != '\0') {
5983 			*reason = "invalid device";
5984 			return -EINVAL;
5985 		}
5986 	}
5987 
5988 	force_ent->port = simple_strtoul(id, &endp, 10);
5989 	if (p == endp || *endp != '\0') {
5990 		*reason = "invalid port/link";
5991 		return -EINVAL;
5992 	}
5993 
5994  parse_val:
5995 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
5996 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
5997 		const struct ata_force_param *fp = &force_tbl[i];
5998 
5999 		if (strncasecmp(val, fp->name, strlen(val)))
6000 			continue;
6001 
6002 		nr_matches++;
6003 		match_fp = fp;
6004 
6005 		if (strcasecmp(val, fp->name) == 0) {
6006 			nr_matches = 1;
6007 			break;
6008 		}
6009 	}
6010 
6011 	if (!nr_matches) {
6012 		*reason = "unknown value";
6013 		return -EINVAL;
6014 	}
6015 	if (nr_matches > 1) {
6016 		*reason = "ambigious value";
6017 		return -EINVAL;
6018 	}
6019 
6020 	force_ent->param = *match_fp;
6021 
6022 	return 0;
6023 }
6024 
6025 static void __init ata_parse_force_param(void)
6026 {
6027 	int idx = 0, size = 1;
6028 	int last_port = -1, last_device = -1;
6029 	char *p, *cur, *next;
6030 
6031 	/* calculate maximum number of params and allocate force_tbl */
6032 	for (p = ata_force_param_buf; *p; p++)
6033 		if (*p == ',')
6034 			size++;
6035 
6036 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6037 	if (!ata_force_tbl) {
6038 		printk(KERN_WARNING "ata: failed to extend force table, "
6039 		       "libata.force ignored\n");
6040 		return;
6041 	}
6042 
6043 	/* parse and populate the table */
6044 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6045 		const char *reason = "";
6046 		struct ata_force_ent te = { .port = -1, .device = -1 };
6047 
6048 		next = cur;
6049 		if (ata_parse_force_one(&next, &te, &reason)) {
6050 			printk(KERN_WARNING "ata: failed to parse force "
6051 			       "parameter \"%s\" (%s)\n",
6052 			       cur, reason);
6053 			continue;
6054 		}
6055 
6056 		if (te.port == -1) {
6057 			te.port = last_port;
6058 			te.device = last_device;
6059 		}
6060 
6061 		ata_force_tbl[idx++] = te;
6062 
6063 		last_port = te.port;
6064 		last_device = te.device;
6065 	}
6066 
6067 	ata_force_tbl_size = idx;
6068 }
6069 
6070 static int __init ata_init(void)
6071 {
6072 	ata_probe_timeout *= HZ;
6073 
6074 	ata_parse_force_param();
6075 
6076 	ata_wq = create_workqueue("ata");
6077 	if (!ata_wq)
6078 		return -ENOMEM;
6079 
6080 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6081 	if (!ata_aux_wq) {
6082 		destroy_workqueue(ata_wq);
6083 		return -ENOMEM;
6084 	}
6085 
6086 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6087 	return 0;
6088 }
6089 
6090 static void __exit ata_exit(void)
6091 {
6092 	kfree(ata_force_tbl);
6093 	destroy_workqueue(ata_wq);
6094 	destroy_workqueue(ata_aux_wq);
6095 }
6096 
6097 subsys_initcall(ata_init);
6098 module_exit(ata_exit);
6099 
6100 static unsigned long ratelimit_time;
6101 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6102 
6103 int ata_ratelimit(void)
6104 {
6105 	int rc;
6106 	unsigned long flags;
6107 
6108 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6109 
6110 	if (time_after(jiffies, ratelimit_time)) {
6111 		rc = 1;
6112 		ratelimit_time = jiffies + (HZ/5);
6113 	} else
6114 		rc = 0;
6115 
6116 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6117 
6118 	return rc;
6119 }
6120 
6121 /**
6122  *	ata_wait_register - wait until register value changes
6123  *	@reg: IO-mapped register
6124  *	@mask: Mask to apply to read register value
6125  *	@val: Wait condition
6126  *	@interval_msec: polling interval in milliseconds
6127  *	@timeout_msec: timeout in milliseconds
6128  *
6129  *	Waiting for some bits of register to change is a common
6130  *	operation for ATA controllers.  This function reads 32bit LE
6131  *	IO-mapped register @reg and tests for the following condition.
6132  *
6133  *	(*@reg & mask) != val
6134  *
6135  *	If the condition is met, it returns; otherwise, the process is
6136  *	repeated after @interval_msec until timeout.
6137  *
6138  *	LOCKING:
6139  *	Kernel thread context (may sleep)
6140  *
6141  *	RETURNS:
6142  *	The final register value.
6143  */
6144 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6145 		      unsigned long interval_msec,
6146 		      unsigned long timeout_msec)
6147 {
6148 	unsigned long timeout;
6149 	u32 tmp;
6150 
6151 	tmp = ioread32(reg);
6152 
6153 	/* Calculate timeout _after_ the first read to make sure
6154 	 * preceding writes reach the controller before starting to
6155 	 * eat away the timeout.
6156 	 */
6157 	timeout = jiffies + (timeout_msec * HZ) / 1000;
6158 
6159 	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6160 		msleep(interval_msec);
6161 		tmp = ioread32(reg);
6162 	}
6163 
6164 	return tmp;
6165 }
6166 
6167 /*
6168  * Dummy port_ops
6169  */
6170 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6171 {
6172 	return AC_ERR_SYSTEM;
6173 }
6174 
6175 static void ata_dummy_error_handler(struct ata_port *ap)
6176 {
6177 	/* truly dummy */
6178 }
6179 
6180 struct ata_port_operations ata_dummy_port_ops = {
6181 	.qc_prep		= ata_noop_qc_prep,
6182 	.qc_issue		= ata_dummy_qc_issue,
6183 	.error_handler		= ata_dummy_error_handler,
6184 };
6185 
6186 const struct ata_port_info ata_dummy_port_info = {
6187 	.port_ops		= &ata_dummy_port_ops,
6188 };
6189 
6190 /*
6191  * libata is essentially a library of internal helper functions for
6192  * low-level ATA host controller drivers.  As such, the API/ABI is
6193  * likely to change as new drivers are added and updated.
6194  * Do not depend on ABI/API stability.
6195  */
6196 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6197 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6198 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6199 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6200 EXPORT_SYMBOL_GPL(sata_port_ops);
6201 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6202 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6203 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6204 EXPORT_SYMBOL_GPL(ata_host_init);
6205 EXPORT_SYMBOL_GPL(ata_host_alloc);
6206 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6207 EXPORT_SYMBOL_GPL(ata_host_start);
6208 EXPORT_SYMBOL_GPL(ata_host_register);
6209 EXPORT_SYMBOL_GPL(ata_host_activate);
6210 EXPORT_SYMBOL_GPL(ata_host_detach);
6211 EXPORT_SYMBOL_GPL(ata_sg_init);
6212 EXPORT_SYMBOL_GPL(ata_qc_complete);
6213 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6214 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6215 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6216 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6217 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6218 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6219 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6220 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6221 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6222 EXPORT_SYMBOL_GPL(ata_mode_string);
6223 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6224 EXPORT_SYMBOL_GPL(ata_port_start);
6225 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6226 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6227 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6228 EXPORT_SYMBOL_GPL(ata_port_probe);
6229 EXPORT_SYMBOL_GPL(ata_dev_disable);
6230 EXPORT_SYMBOL_GPL(sata_set_spd);
6231 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6232 EXPORT_SYMBOL_GPL(sata_link_debounce);
6233 EXPORT_SYMBOL_GPL(sata_link_resume);
6234 EXPORT_SYMBOL_GPL(ata_std_prereset);
6235 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6236 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6237 EXPORT_SYMBOL_GPL(ata_std_postreset);
6238 EXPORT_SYMBOL_GPL(ata_dev_classify);
6239 EXPORT_SYMBOL_GPL(ata_dev_pair);
6240 EXPORT_SYMBOL_GPL(ata_port_disable);
6241 EXPORT_SYMBOL_GPL(ata_ratelimit);
6242 EXPORT_SYMBOL_GPL(ata_wait_register);
6243 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6244 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6245 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6246 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6247 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6248 EXPORT_SYMBOL_GPL(sata_scr_valid);
6249 EXPORT_SYMBOL_GPL(sata_scr_read);
6250 EXPORT_SYMBOL_GPL(sata_scr_write);
6251 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6252 EXPORT_SYMBOL_GPL(ata_link_online);
6253 EXPORT_SYMBOL_GPL(ata_link_offline);
6254 #ifdef CONFIG_PM
6255 EXPORT_SYMBOL_GPL(ata_host_suspend);
6256 EXPORT_SYMBOL_GPL(ata_host_resume);
6257 #endif /* CONFIG_PM */
6258 EXPORT_SYMBOL_GPL(ata_id_string);
6259 EXPORT_SYMBOL_GPL(ata_id_c_string);
6260 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6261 
6262 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6263 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6264 EXPORT_SYMBOL_GPL(ata_timing_compute);
6265 EXPORT_SYMBOL_GPL(ata_timing_merge);
6266 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6267 
6268 #ifdef CONFIG_PCI
6269 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6270 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6271 #ifdef CONFIG_PM
6272 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6273 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6274 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6275 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6276 #endif /* CONFIG_PM */
6277 #endif /* CONFIG_PCI */
6278 
6279 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6280 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6281 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6282 EXPORT_SYMBOL_GPL(ata_port_desc);
6283 #ifdef CONFIG_PCI
6284 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6285 #endif /* CONFIG_PCI */
6286 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6287 EXPORT_SYMBOL_GPL(ata_link_abort);
6288 EXPORT_SYMBOL_GPL(ata_port_abort);
6289 EXPORT_SYMBOL_GPL(ata_port_freeze);
6290 EXPORT_SYMBOL_GPL(sata_async_notification);
6291 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6292 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6293 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6294 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6295 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6296 EXPORT_SYMBOL_GPL(ata_do_eh);
6297 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6298 
6299 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6300 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6301 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6302 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6303 EXPORT_SYMBOL_GPL(ata_cable_sata);
6304