xref: /openbmc/linux/drivers/ata/libata-core.c (revision 545e4006)
1 /*
2  *  libata-core.c - helper library for ATA
3  *
4  *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5  *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6  *		    on emails.
7  *
8  *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9  *  Copyright 2003-2004 Jeff Garzik
10  *
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2, or (at your option)
15  *  any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; see the file COPYING.  If not, write to
24  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25  *
26  *
27  *  libata documentation is available via 'make {ps|pdf}docs',
28  *  as Documentation/DocBook/libata.*
29  *
30  *  Hardware documentation available from http://www.t13.org/ and
31  *  http://www.sata-io.org/
32  *
33  *  Standards documents from:
34  *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
35  *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
36  *	http://www.sata-io.org (SATA)
37  *	http://www.compactflash.org (CF)
38  *	http://www.qic.org (QIC157 - Tape and DSC)
39  *	http://www.ce-ata.org (CE-ATA: not supported)
40  *
41  */
42 
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 #include <linux/spinlock.h>
50 #include <linux/blkdev.h>
51 #include <linux/delay.h>
52 #include <linux/timer.h>
53 #include <linux/interrupt.h>
54 #include <linux/completion.h>
55 #include <linux/suspend.h>
56 #include <linux/workqueue.h>
57 #include <linux/scatterlist.h>
58 #include <linux/io.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_host.h>
62 #include <linux/libata.h>
63 #include <asm/byteorder.h>
64 #include <linux/cdrom.h>
65 
66 #include "libata.h"
67 
68 
69 /* debounce timing parameters in msecs { interval, duration, timeout } */
70 const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
71 const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
72 const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
73 
74 const struct ata_port_operations ata_base_port_ops = {
75 	.prereset		= ata_std_prereset,
76 	.postreset		= ata_std_postreset,
77 	.error_handler		= ata_std_error_handler,
78 };
79 
80 const struct ata_port_operations sata_port_ops = {
81 	.inherits		= &ata_base_port_ops,
82 
83 	.qc_defer		= ata_std_qc_defer,
84 	.hardreset		= sata_std_hardreset,
85 };
86 
87 static unsigned int ata_dev_init_params(struct ata_device *dev,
88 					u16 heads, u16 sectors);
89 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90 static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 					u8 enable, u8 feature);
92 static void ata_dev_xfermask(struct ata_device *dev);
93 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94 
95 unsigned int ata_print_id = 1;
96 static struct workqueue_struct *ata_wq;
97 
98 struct workqueue_struct *ata_aux_wq;
99 
100 struct ata_force_param {
101 	const char	*name;
102 	unsigned int	cbl;
103 	int		spd_limit;
104 	unsigned long	xfer_mask;
105 	unsigned int	horkage_on;
106 	unsigned int	horkage_off;
107 };
108 
109 struct ata_force_ent {
110 	int			port;
111 	int			device;
112 	struct ata_force_param	param;
113 };
114 
115 static struct ata_force_ent *ata_force_tbl;
116 static int ata_force_tbl_size;
117 
118 static char ata_force_param_buf[PAGE_SIZE] __initdata;
119 /* param_buf is thrown away after initialization, disallow read */
120 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
121 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122 
123 int atapi_enabled = 1;
124 module_param(atapi_enabled, int, 0444);
125 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
126 
127 static int atapi_dmadir = 0;
128 module_param(atapi_dmadir, int, 0444);
129 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
130 
131 int atapi_passthru16 = 1;
132 module_param(atapi_passthru16, int, 0444);
133 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
134 
135 int libata_fua = 0;
136 module_param_named(fua, libata_fua, int, 0444);
137 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
138 
139 static int ata_ignore_hpa;
140 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142 
143 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144 module_param_named(dma, libata_dma_mask, int, 0444);
145 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146 
147 static int ata_probe_timeout;
148 module_param(ata_probe_timeout, int, 0444);
149 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150 
151 int libata_noacpi = 0;
152 module_param_named(noacpi, libata_noacpi, int, 0444);
153 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
154 
155 int libata_allow_tpm = 0;
156 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
158 
159 MODULE_AUTHOR("Jeff Garzik");
160 MODULE_DESCRIPTION("Library module for ATA devices");
161 MODULE_LICENSE("GPL");
162 MODULE_VERSION(DRV_VERSION);
163 
164 
165 /**
166  *	ata_force_cbl - force cable type according to libata.force
167  *	@ap: ATA port of interest
168  *
169  *	Force cable type according to libata.force and whine about it.
170  *	The last entry which has matching port number is used, so it
171  *	can be specified as part of device force parameters.  For
172  *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
173  *	same effect.
174  *
175  *	LOCKING:
176  *	EH context.
177  */
178 void ata_force_cbl(struct ata_port *ap)
179 {
180 	int i;
181 
182 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
183 		const struct ata_force_ent *fe = &ata_force_tbl[i];
184 
185 		if (fe->port != -1 && fe->port != ap->print_id)
186 			continue;
187 
188 		if (fe->param.cbl == ATA_CBL_NONE)
189 			continue;
190 
191 		ap->cbl = fe->param.cbl;
192 		ata_port_printk(ap, KERN_NOTICE,
193 				"FORCE: cable set to %s\n", fe->param.name);
194 		return;
195 	}
196 }
197 
198 /**
199  *	ata_force_spd_limit - force SATA spd limit according to libata.force
200  *	@link: ATA link of interest
201  *
202  *	Force SATA spd limit according to libata.force and whine about
203  *	it.  When only the port part is specified (e.g. 1:), the limit
204  *	applies to all links connected to both the host link and all
205  *	fan-out ports connected via PMP.  If the device part is
206  *	specified as 0 (e.g. 1.00:), it specifies the first fan-out
207  *	link not the host link.  Device number 15 always points to the
208  *	host link whether PMP is attached or not.
209  *
210  *	LOCKING:
211  *	EH context.
212  */
213 static void ata_force_spd_limit(struct ata_link *link)
214 {
215 	int linkno, i;
216 
217 	if (ata_is_host_link(link))
218 		linkno = 15;
219 	else
220 		linkno = link->pmp;
221 
222 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
223 		const struct ata_force_ent *fe = &ata_force_tbl[i];
224 
225 		if (fe->port != -1 && fe->port != link->ap->print_id)
226 			continue;
227 
228 		if (fe->device != -1 && fe->device != linkno)
229 			continue;
230 
231 		if (!fe->param.spd_limit)
232 			continue;
233 
234 		link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
235 		ata_link_printk(link, KERN_NOTICE,
236 			"FORCE: PHY spd limit set to %s\n", fe->param.name);
237 		return;
238 	}
239 }
240 
241 /**
242  *	ata_force_xfermask - force xfermask according to libata.force
243  *	@dev: ATA device of interest
244  *
245  *	Force xfer_mask according to libata.force and whine about it.
246  *	For consistency with link selection, device number 15 selects
247  *	the first device connected to the host link.
248  *
249  *	LOCKING:
250  *	EH context.
251  */
252 static void ata_force_xfermask(struct ata_device *dev)
253 {
254 	int devno = dev->link->pmp + dev->devno;
255 	int alt_devno = devno;
256 	int i;
257 
258 	/* allow n.15 for the first device attached to host port */
259 	if (ata_is_host_link(dev->link) && devno == 0)
260 		alt_devno = 15;
261 
262 	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
263 		const struct ata_force_ent *fe = &ata_force_tbl[i];
264 		unsigned long pio_mask, mwdma_mask, udma_mask;
265 
266 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
267 			continue;
268 
269 		if (fe->device != -1 && fe->device != devno &&
270 		    fe->device != alt_devno)
271 			continue;
272 
273 		if (!fe->param.xfer_mask)
274 			continue;
275 
276 		ata_unpack_xfermask(fe->param.xfer_mask,
277 				    &pio_mask, &mwdma_mask, &udma_mask);
278 		if (udma_mask)
279 			dev->udma_mask = udma_mask;
280 		else if (mwdma_mask) {
281 			dev->udma_mask = 0;
282 			dev->mwdma_mask = mwdma_mask;
283 		} else {
284 			dev->udma_mask = 0;
285 			dev->mwdma_mask = 0;
286 			dev->pio_mask = pio_mask;
287 		}
288 
289 		ata_dev_printk(dev, KERN_NOTICE,
290 			"FORCE: xfer_mask set to %s\n", fe->param.name);
291 		return;
292 	}
293 }
294 
295 /**
296  *	ata_force_horkage - force horkage according to libata.force
297  *	@dev: ATA device of interest
298  *
299  *	Force horkage according to libata.force and whine about it.
300  *	For consistency with link selection, device number 15 selects
301  *	the first device connected to the host link.
302  *
303  *	LOCKING:
304  *	EH context.
305  */
306 static void ata_force_horkage(struct ata_device *dev)
307 {
308 	int devno = dev->link->pmp + dev->devno;
309 	int alt_devno = devno;
310 	int i;
311 
312 	/* allow n.15 for the first device attached to host port */
313 	if (ata_is_host_link(dev->link) && devno == 0)
314 		alt_devno = 15;
315 
316 	for (i = 0; i < ata_force_tbl_size; i++) {
317 		const struct ata_force_ent *fe = &ata_force_tbl[i];
318 
319 		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
320 			continue;
321 
322 		if (fe->device != -1 && fe->device != devno &&
323 		    fe->device != alt_devno)
324 			continue;
325 
326 		if (!(~dev->horkage & fe->param.horkage_on) &&
327 		    !(dev->horkage & fe->param.horkage_off))
328 			continue;
329 
330 		dev->horkage |= fe->param.horkage_on;
331 		dev->horkage &= ~fe->param.horkage_off;
332 
333 		ata_dev_printk(dev, KERN_NOTICE,
334 			"FORCE: horkage modified (%s)\n", fe->param.name);
335 	}
336 }
337 
338 /**
339  *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
340  *	@opcode: SCSI opcode
341  *
342  *	Determine ATAPI command type from @opcode.
343  *
344  *	LOCKING:
345  *	None.
346  *
347  *	RETURNS:
348  *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
349  */
350 int atapi_cmd_type(u8 opcode)
351 {
352 	switch (opcode) {
353 	case GPCMD_READ_10:
354 	case GPCMD_READ_12:
355 		return ATAPI_READ;
356 
357 	case GPCMD_WRITE_10:
358 	case GPCMD_WRITE_12:
359 	case GPCMD_WRITE_AND_VERIFY_10:
360 		return ATAPI_WRITE;
361 
362 	case GPCMD_READ_CD:
363 	case GPCMD_READ_CD_MSF:
364 		return ATAPI_READ_CD;
365 
366 	case ATA_16:
367 	case ATA_12:
368 		if (atapi_passthru16)
369 			return ATAPI_PASS_THRU;
370 		/* fall thru */
371 	default:
372 		return ATAPI_MISC;
373 	}
374 }
375 
376 /**
377  *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
378  *	@tf: Taskfile to convert
379  *	@pmp: Port multiplier port
380  *	@is_cmd: This FIS is for command
381  *	@fis: Buffer into which data will output
382  *
383  *	Converts a standard ATA taskfile to a Serial ATA
384  *	FIS structure (Register - Host to Device).
385  *
386  *	LOCKING:
387  *	Inherited from caller.
388  */
389 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
390 {
391 	fis[0] = 0x27;			/* Register - Host to Device FIS */
392 	fis[1] = pmp & 0xf;		/* Port multiplier number*/
393 	if (is_cmd)
394 		fis[1] |= (1 << 7);	/* bit 7 indicates Command FIS */
395 
396 	fis[2] = tf->command;
397 	fis[3] = tf->feature;
398 
399 	fis[4] = tf->lbal;
400 	fis[5] = tf->lbam;
401 	fis[6] = tf->lbah;
402 	fis[7] = tf->device;
403 
404 	fis[8] = tf->hob_lbal;
405 	fis[9] = tf->hob_lbam;
406 	fis[10] = tf->hob_lbah;
407 	fis[11] = tf->hob_feature;
408 
409 	fis[12] = tf->nsect;
410 	fis[13] = tf->hob_nsect;
411 	fis[14] = 0;
412 	fis[15] = tf->ctl;
413 
414 	fis[16] = 0;
415 	fis[17] = 0;
416 	fis[18] = 0;
417 	fis[19] = 0;
418 }
419 
420 /**
421  *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
422  *	@fis: Buffer from which data will be input
423  *	@tf: Taskfile to output
424  *
425  *	Converts a serial ATA FIS structure to a standard ATA taskfile.
426  *
427  *	LOCKING:
428  *	Inherited from caller.
429  */
430 
431 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
432 {
433 	tf->command	= fis[2];	/* status */
434 	tf->feature	= fis[3];	/* error */
435 
436 	tf->lbal	= fis[4];
437 	tf->lbam	= fis[5];
438 	tf->lbah	= fis[6];
439 	tf->device	= fis[7];
440 
441 	tf->hob_lbal	= fis[8];
442 	tf->hob_lbam	= fis[9];
443 	tf->hob_lbah	= fis[10];
444 
445 	tf->nsect	= fis[12];
446 	tf->hob_nsect	= fis[13];
447 }
448 
449 static const u8 ata_rw_cmds[] = {
450 	/* pio multi */
451 	ATA_CMD_READ_MULTI,
452 	ATA_CMD_WRITE_MULTI,
453 	ATA_CMD_READ_MULTI_EXT,
454 	ATA_CMD_WRITE_MULTI_EXT,
455 	0,
456 	0,
457 	0,
458 	ATA_CMD_WRITE_MULTI_FUA_EXT,
459 	/* pio */
460 	ATA_CMD_PIO_READ,
461 	ATA_CMD_PIO_WRITE,
462 	ATA_CMD_PIO_READ_EXT,
463 	ATA_CMD_PIO_WRITE_EXT,
464 	0,
465 	0,
466 	0,
467 	0,
468 	/* dma */
469 	ATA_CMD_READ,
470 	ATA_CMD_WRITE,
471 	ATA_CMD_READ_EXT,
472 	ATA_CMD_WRITE_EXT,
473 	0,
474 	0,
475 	0,
476 	ATA_CMD_WRITE_FUA_EXT
477 };
478 
479 /**
480  *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
481  *	@tf: command to examine and configure
482  *	@dev: device tf belongs to
483  *
484  *	Examine the device configuration and tf->flags to calculate
485  *	the proper read/write commands and protocol to use.
486  *
487  *	LOCKING:
488  *	caller.
489  */
490 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
491 {
492 	u8 cmd;
493 
494 	int index, fua, lba48, write;
495 
496 	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
497 	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
498 	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
499 
500 	if (dev->flags & ATA_DFLAG_PIO) {
501 		tf->protocol = ATA_PROT_PIO;
502 		index = dev->multi_count ? 0 : 8;
503 	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
504 		/* Unable to use DMA due to host limitation */
505 		tf->protocol = ATA_PROT_PIO;
506 		index = dev->multi_count ? 0 : 8;
507 	} else {
508 		tf->protocol = ATA_PROT_DMA;
509 		index = 16;
510 	}
511 
512 	cmd = ata_rw_cmds[index + fua + lba48 + write];
513 	if (cmd) {
514 		tf->command = cmd;
515 		return 0;
516 	}
517 	return -1;
518 }
519 
520 /**
521  *	ata_tf_read_block - Read block address from ATA taskfile
522  *	@tf: ATA taskfile of interest
523  *	@dev: ATA device @tf belongs to
524  *
525  *	LOCKING:
526  *	None.
527  *
528  *	Read block address from @tf.  This function can handle all
529  *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
530  *	flags select the address format to use.
531  *
532  *	RETURNS:
533  *	Block address read from @tf.
534  */
535 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
536 {
537 	u64 block = 0;
538 
539 	if (tf->flags & ATA_TFLAG_LBA) {
540 		if (tf->flags & ATA_TFLAG_LBA48) {
541 			block |= (u64)tf->hob_lbah << 40;
542 			block |= (u64)tf->hob_lbam << 32;
543 			block |= tf->hob_lbal << 24;
544 		} else
545 			block |= (tf->device & 0xf) << 24;
546 
547 		block |= tf->lbah << 16;
548 		block |= tf->lbam << 8;
549 		block |= tf->lbal;
550 	} else {
551 		u32 cyl, head, sect;
552 
553 		cyl = tf->lbam | (tf->lbah << 8);
554 		head = tf->device & 0xf;
555 		sect = tf->lbal;
556 
557 		block = (cyl * dev->heads + head) * dev->sectors + sect;
558 	}
559 
560 	return block;
561 }
562 
563 /**
564  *	ata_build_rw_tf - Build ATA taskfile for given read/write request
565  *	@tf: Target ATA taskfile
566  *	@dev: ATA device @tf belongs to
567  *	@block: Block address
568  *	@n_block: Number of blocks
569  *	@tf_flags: RW/FUA etc...
570  *	@tag: tag
571  *
572  *	LOCKING:
573  *	None.
574  *
575  *	Build ATA taskfile @tf for read/write request described by
576  *	@block, @n_block, @tf_flags and @tag on @dev.
577  *
578  *	RETURNS:
579  *
580  *	0 on success, -ERANGE if the request is too large for @dev,
581  *	-EINVAL if the request is invalid.
582  */
583 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
584 		    u64 block, u32 n_block, unsigned int tf_flags,
585 		    unsigned int tag)
586 {
587 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
588 	tf->flags |= tf_flags;
589 
590 	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
591 		/* yay, NCQ */
592 		if (!lba_48_ok(block, n_block))
593 			return -ERANGE;
594 
595 		tf->protocol = ATA_PROT_NCQ;
596 		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
597 
598 		if (tf->flags & ATA_TFLAG_WRITE)
599 			tf->command = ATA_CMD_FPDMA_WRITE;
600 		else
601 			tf->command = ATA_CMD_FPDMA_READ;
602 
603 		tf->nsect = tag << 3;
604 		tf->hob_feature = (n_block >> 8) & 0xff;
605 		tf->feature = n_block & 0xff;
606 
607 		tf->hob_lbah = (block >> 40) & 0xff;
608 		tf->hob_lbam = (block >> 32) & 0xff;
609 		tf->hob_lbal = (block >> 24) & 0xff;
610 		tf->lbah = (block >> 16) & 0xff;
611 		tf->lbam = (block >> 8) & 0xff;
612 		tf->lbal = block & 0xff;
613 
614 		tf->device = 1 << 6;
615 		if (tf->flags & ATA_TFLAG_FUA)
616 			tf->device |= 1 << 7;
617 	} else if (dev->flags & ATA_DFLAG_LBA) {
618 		tf->flags |= ATA_TFLAG_LBA;
619 
620 		if (lba_28_ok(block, n_block)) {
621 			/* use LBA28 */
622 			tf->device |= (block >> 24) & 0xf;
623 		} else if (lba_48_ok(block, n_block)) {
624 			if (!(dev->flags & ATA_DFLAG_LBA48))
625 				return -ERANGE;
626 
627 			/* use LBA48 */
628 			tf->flags |= ATA_TFLAG_LBA48;
629 
630 			tf->hob_nsect = (n_block >> 8) & 0xff;
631 
632 			tf->hob_lbah = (block >> 40) & 0xff;
633 			tf->hob_lbam = (block >> 32) & 0xff;
634 			tf->hob_lbal = (block >> 24) & 0xff;
635 		} else
636 			/* request too large even for LBA48 */
637 			return -ERANGE;
638 
639 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
640 			return -EINVAL;
641 
642 		tf->nsect = n_block & 0xff;
643 
644 		tf->lbah = (block >> 16) & 0xff;
645 		tf->lbam = (block >> 8) & 0xff;
646 		tf->lbal = block & 0xff;
647 
648 		tf->device |= ATA_LBA;
649 	} else {
650 		/* CHS */
651 		u32 sect, head, cyl, track;
652 
653 		/* The request -may- be too large for CHS addressing. */
654 		if (!lba_28_ok(block, n_block))
655 			return -ERANGE;
656 
657 		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
658 			return -EINVAL;
659 
660 		/* Convert LBA to CHS */
661 		track = (u32)block / dev->sectors;
662 		cyl   = track / dev->heads;
663 		head  = track % dev->heads;
664 		sect  = (u32)block % dev->sectors + 1;
665 
666 		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
667 			(u32)block, track, cyl, head, sect);
668 
669 		/* Check whether the converted CHS can fit.
670 		   Cylinder: 0-65535
671 		   Head: 0-15
672 		   Sector: 1-255*/
673 		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
674 			return -ERANGE;
675 
676 		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
677 		tf->lbal = sect;
678 		tf->lbam = cyl;
679 		tf->lbah = cyl >> 8;
680 		tf->device |= head;
681 	}
682 
683 	return 0;
684 }
685 
686 /**
687  *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
688  *	@pio_mask: pio_mask
689  *	@mwdma_mask: mwdma_mask
690  *	@udma_mask: udma_mask
691  *
692  *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
693  *	unsigned int xfer_mask.
694  *
695  *	LOCKING:
696  *	None.
697  *
698  *	RETURNS:
699  *	Packed xfer_mask.
700  */
701 unsigned long ata_pack_xfermask(unsigned long pio_mask,
702 				unsigned long mwdma_mask,
703 				unsigned long udma_mask)
704 {
705 	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
706 		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
707 		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
708 }
709 
710 /**
711  *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
712  *	@xfer_mask: xfer_mask to unpack
713  *	@pio_mask: resulting pio_mask
714  *	@mwdma_mask: resulting mwdma_mask
715  *	@udma_mask: resulting udma_mask
716  *
717  *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
718  *	Any NULL distination masks will be ignored.
719  */
720 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
721 			 unsigned long *mwdma_mask, unsigned long *udma_mask)
722 {
723 	if (pio_mask)
724 		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
725 	if (mwdma_mask)
726 		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
727 	if (udma_mask)
728 		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
729 }
730 
731 static const struct ata_xfer_ent {
732 	int shift, bits;
733 	u8 base;
734 } ata_xfer_tbl[] = {
735 	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
736 	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
737 	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
738 	{ -1, },
739 };
740 
741 /**
742  *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
743  *	@xfer_mask: xfer_mask of interest
744  *
745  *	Return matching XFER_* value for @xfer_mask.  Only the highest
746  *	bit of @xfer_mask is considered.
747  *
748  *	LOCKING:
749  *	None.
750  *
751  *	RETURNS:
752  *	Matching XFER_* value, 0xff if no match found.
753  */
754 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
755 {
756 	int highbit = fls(xfer_mask) - 1;
757 	const struct ata_xfer_ent *ent;
758 
759 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
760 		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
761 			return ent->base + highbit - ent->shift;
762 	return 0xff;
763 }
764 
765 /**
766  *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
767  *	@xfer_mode: XFER_* of interest
768  *
769  *	Return matching xfer_mask for @xfer_mode.
770  *
771  *	LOCKING:
772  *	None.
773  *
774  *	RETURNS:
775  *	Matching xfer_mask, 0 if no match found.
776  */
777 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
778 {
779 	const struct ata_xfer_ent *ent;
780 
781 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
782 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
783 			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
784 				& ~((1 << ent->shift) - 1);
785 	return 0;
786 }
787 
788 /**
789  *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
790  *	@xfer_mode: XFER_* of interest
791  *
792  *	Return matching xfer_shift for @xfer_mode.
793  *
794  *	LOCKING:
795  *	None.
796  *
797  *	RETURNS:
798  *	Matching xfer_shift, -1 if no match found.
799  */
800 int ata_xfer_mode2shift(unsigned long xfer_mode)
801 {
802 	const struct ata_xfer_ent *ent;
803 
804 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
805 		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
806 			return ent->shift;
807 	return -1;
808 }
809 
810 /**
811  *	ata_mode_string - convert xfer_mask to string
812  *	@xfer_mask: mask of bits supported; only highest bit counts.
813  *
814  *	Determine string which represents the highest speed
815  *	(highest bit in @modemask).
816  *
817  *	LOCKING:
818  *	None.
819  *
820  *	RETURNS:
821  *	Constant C string representing highest speed listed in
822  *	@mode_mask, or the constant C string "<n/a>".
823  */
824 const char *ata_mode_string(unsigned long xfer_mask)
825 {
826 	static const char * const xfer_mode_str[] = {
827 		"PIO0",
828 		"PIO1",
829 		"PIO2",
830 		"PIO3",
831 		"PIO4",
832 		"PIO5",
833 		"PIO6",
834 		"MWDMA0",
835 		"MWDMA1",
836 		"MWDMA2",
837 		"MWDMA3",
838 		"MWDMA4",
839 		"UDMA/16",
840 		"UDMA/25",
841 		"UDMA/33",
842 		"UDMA/44",
843 		"UDMA/66",
844 		"UDMA/100",
845 		"UDMA/133",
846 		"UDMA7",
847 	};
848 	int highbit;
849 
850 	highbit = fls(xfer_mask) - 1;
851 	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
852 		return xfer_mode_str[highbit];
853 	return "<n/a>";
854 }
855 
856 static const char *sata_spd_string(unsigned int spd)
857 {
858 	static const char * const spd_str[] = {
859 		"1.5 Gbps",
860 		"3.0 Gbps",
861 	};
862 
863 	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
864 		return "<unknown>";
865 	return spd_str[spd - 1];
866 }
867 
868 void ata_dev_disable(struct ata_device *dev)
869 {
870 	if (ata_dev_enabled(dev)) {
871 		if (ata_msg_drv(dev->link->ap))
872 			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
873 		ata_acpi_on_disable(dev);
874 		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
875 					     ATA_DNXFER_QUIET);
876 		dev->class++;
877 	}
878 }
879 
880 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
881 {
882 	struct ata_link *link = dev->link;
883 	struct ata_port *ap = link->ap;
884 	u32 scontrol;
885 	unsigned int err_mask;
886 	int rc;
887 
888 	/*
889 	 * disallow DIPM for drivers which haven't set
890 	 * ATA_FLAG_IPM.  This is because when DIPM is enabled,
891 	 * phy ready will be set in the interrupt status on
892 	 * state changes, which will cause some drivers to
893 	 * think there are errors - additionally drivers will
894 	 * need to disable hot plug.
895 	 */
896 	if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
897 		ap->pm_policy = NOT_AVAILABLE;
898 		return -EINVAL;
899 	}
900 
901 	/*
902 	 * For DIPM, we will only enable it for the
903 	 * min_power setting.
904 	 *
905 	 * Why?  Because Disks are too stupid to know that
906 	 * If the host rejects a request to go to SLUMBER
907 	 * they should retry at PARTIAL, and instead it
908 	 * just would give up.  So, for medium_power to
909 	 * work at all, we need to only allow HIPM.
910 	 */
911 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
912 	if (rc)
913 		return rc;
914 
915 	switch (policy) {
916 	case MIN_POWER:
917 		/* no restrictions on IPM transitions */
918 		scontrol &= ~(0x3 << 8);
919 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
920 		if (rc)
921 			return rc;
922 
923 		/* enable DIPM */
924 		if (dev->flags & ATA_DFLAG_DIPM)
925 			err_mask = ata_dev_set_feature(dev,
926 					SETFEATURES_SATA_ENABLE, SATA_DIPM);
927 		break;
928 	case MEDIUM_POWER:
929 		/* allow IPM to PARTIAL */
930 		scontrol &= ~(0x1 << 8);
931 		scontrol |= (0x2 << 8);
932 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
933 		if (rc)
934 			return rc;
935 
936 		/*
937 		 * we don't have to disable DIPM since IPM flags
938 		 * disallow transitions to SLUMBER, which effectively
939 		 * disable DIPM if it does not support PARTIAL
940 		 */
941 		break;
942 	case NOT_AVAILABLE:
943 	case MAX_PERFORMANCE:
944 		/* disable all IPM transitions */
945 		scontrol |= (0x3 << 8);
946 		rc = sata_scr_write(link, SCR_CONTROL, scontrol);
947 		if (rc)
948 			return rc;
949 
950 		/*
951 		 * we don't have to disable DIPM since IPM flags
952 		 * disallow all transitions which effectively
953 		 * disable DIPM anyway.
954 		 */
955 		break;
956 	}
957 
958 	/* FIXME: handle SET FEATURES failure */
959 	(void) err_mask;
960 
961 	return 0;
962 }
963 
964 /**
965  *	ata_dev_enable_pm - enable SATA interface power management
966  *	@dev:  device to enable power management
967  *	@policy: the link power management policy
968  *
969  *	Enable SATA Interface power management.  This will enable
970  *	Device Interface Power Management (DIPM) for min_power
971  * 	policy, and then call driver specific callbacks for
972  *	enabling Host Initiated Power management.
973  *
974  *	Locking: Caller.
975  *	Returns: -EINVAL if IPM is not supported, 0 otherwise.
976  */
977 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
978 {
979 	int rc = 0;
980 	struct ata_port *ap = dev->link->ap;
981 
982 	/* set HIPM first, then DIPM */
983 	if (ap->ops->enable_pm)
984 		rc = ap->ops->enable_pm(ap, policy);
985 	if (rc)
986 		goto enable_pm_out;
987 	rc = ata_dev_set_dipm(dev, policy);
988 
989 enable_pm_out:
990 	if (rc)
991 		ap->pm_policy = MAX_PERFORMANCE;
992 	else
993 		ap->pm_policy = policy;
994 	return /* rc */;	/* hopefully we can use 'rc' eventually */
995 }
996 
997 #ifdef CONFIG_PM
998 /**
999  *	ata_dev_disable_pm - disable SATA interface power management
1000  *	@dev: device to disable power management
1001  *
1002  *	Disable SATA Interface power management.  This will disable
1003  *	Device Interface Power Management (DIPM) without changing
1004  * 	policy,  call driver specific callbacks for disabling Host
1005  * 	Initiated Power management.
1006  *
1007  *	Locking: Caller.
1008  *	Returns: void
1009  */
1010 static void ata_dev_disable_pm(struct ata_device *dev)
1011 {
1012 	struct ata_port *ap = dev->link->ap;
1013 
1014 	ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1015 	if (ap->ops->disable_pm)
1016 		ap->ops->disable_pm(ap);
1017 }
1018 #endif	/* CONFIG_PM */
1019 
1020 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1021 {
1022 	ap->pm_policy = policy;
1023 	ap->link.eh_info.action |= ATA_EH_LPM;
1024 	ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1025 	ata_port_schedule_eh(ap);
1026 }
1027 
1028 #ifdef CONFIG_PM
1029 static void ata_lpm_enable(struct ata_host *host)
1030 {
1031 	struct ata_link *link;
1032 	struct ata_port *ap;
1033 	struct ata_device *dev;
1034 	int i;
1035 
1036 	for (i = 0; i < host->n_ports; i++) {
1037 		ap = host->ports[i];
1038 		ata_port_for_each_link(link, ap) {
1039 			ata_link_for_each_dev(dev, link)
1040 				ata_dev_disable_pm(dev);
1041 		}
1042 	}
1043 }
1044 
1045 static void ata_lpm_disable(struct ata_host *host)
1046 {
1047 	int i;
1048 
1049 	for (i = 0; i < host->n_ports; i++) {
1050 		struct ata_port *ap = host->ports[i];
1051 		ata_lpm_schedule(ap, ap->pm_policy);
1052 	}
1053 }
1054 #endif	/* CONFIG_PM */
1055 
1056 /**
1057  *	ata_dev_classify - determine device type based on ATA-spec signature
1058  *	@tf: ATA taskfile register set for device to be identified
1059  *
1060  *	Determine from taskfile register contents whether a device is
1061  *	ATA or ATAPI, as per "Signature and persistence" section
1062  *	of ATA/PI spec (volume 1, sect 5.14).
1063  *
1064  *	LOCKING:
1065  *	None.
1066  *
1067  *	RETURNS:
1068  *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1069  *	%ATA_DEV_UNKNOWN the event of failure.
1070  */
1071 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1072 {
1073 	/* Apple's open source Darwin code hints that some devices only
1074 	 * put a proper signature into the LBA mid/high registers,
1075 	 * So, we only check those.  It's sufficient for uniqueness.
1076 	 *
1077 	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1078 	 * signatures for ATA and ATAPI devices attached on SerialATA,
1079 	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1080 	 * spec has never mentioned about using different signatures
1081 	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1082 	 * Multiplier specification began to use 0x69/0x96 to identify
1083 	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1084 	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1085 	 * 0x69/0x96 shortly and described them as reserved for
1086 	 * SerialATA.
1087 	 *
1088 	 * We follow the current spec and consider that 0x69/0x96
1089 	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1090 	 */
1091 	if ((tf->lbam == 0) && (tf->lbah == 0)) {
1092 		DPRINTK("found ATA device by sig\n");
1093 		return ATA_DEV_ATA;
1094 	}
1095 
1096 	if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1097 		DPRINTK("found ATAPI device by sig\n");
1098 		return ATA_DEV_ATAPI;
1099 	}
1100 
1101 	if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1102 		DPRINTK("found PMP device by sig\n");
1103 		return ATA_DEV_PMP;
1104 	}
1105 
1106 	if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1107 		printk(KERN_INFO "ata: SEMB device ignored\n");
1108 		return ATA_DEV_SEMB_UNSUP; /* not yet */
1109 	}
1110 
1111 	DPRINTK("unknown device\n");
1112 	return ATA_DEV_UNKNOWN;
1113 }
1114 
1115 /**
1116  *	ata_id_string - Convert IDENTIFY DEVICE page into string
1117  *	@id: IDENTIFY DEVICE results we will examine
1118  *	@s: string into which data is output
1119  *	@ofs: offset into identify device page
1120  *	@len: length of string to return. must be an even number.
1121  *
1122  *	The strings in the IDENTIFY DEVICE page are broken up into
1123  *	16-bit chunks.  Run through the string, and output each
1124  *	8-bit chunk linearly, regardless of platform.
1125  *
1126  *	LOCKING:
1127  *	caller.
1128  */
1129 
1130 void ata_id_string(const u16 *id, unsigned char *s,
1131 		   unsigned int ofs, unsigned int len)
1132 {
1133 	unsigned int c;
1134 
1135 	while (len > 0) {
1136 		c = id[ofs] >> 8;
1137 		*s = c;
1138 		s++;
1139 
1140 		c = id[ofs] & 0xff;
1141 		*s = c;
1142 		s++;
1143 
1144 		ofs++;
1145 		len -= 2;
1146 	}
1147 }
1148 
1149 /**
1150  *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1151  *	@id: IDENTIFY DEVICE results we will examine
1152  *	@s: string into which data is output
1153  *	@ofs: offset into identify device page
1154  *	@len: length of string to return. must be an odd number.
1155  *
1156  *	This function is identical to ata_id_string except that it
1157  *	trims trailing spaces and terminates the resulting string with
1158  *	null.  @len must be actual maximum length (even number) + 1.
1159  *
1160  *	LOCKING:
1161  *	caller.
1162  */
1163 void ata_id_c_string(const u16 *id, unsigned char *s,
1164 		     unsigned int ofs, unsigned int len)
1165 {
1166 	unsigned char *p;
1167 
1168 	WARN_ON(!(len & 1));
1169 
1170 	ata_id_string(id, s, ofs, len - 1);
1171 
1172 	p = s + strnlen(s, len - 1);
1173 	while (p > s && p[-1] == ' ')
1174 		p--;
1175 	*p = '\0';
1176 }
1177 
1178 static u64 ata_id_n_sectors(const u16 *id)
1179 {
1180 	if (ata_id_has_lba(id)) {
1181 		if (ata_id_has_lba48(id))
1182 			return ata_id_u64(id, 100);
1183 		else
1184 			return ata_id_u32(id, 60);
1185 	} else {
1186 		if (ata_id_current_chs_valid(id))
1187 			return ata_id_u32(id, 57);
1188 		else
1189 			return id[1] * id[3] * id[6];
1190 	}
1191 }
1192 
1193 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1194 {
1195 	u64 sectors = 0;
1196 
1197 	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1198 	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1199 	sectors |= (tf->hob_lbal & 0xff) << 24;
1200 	sectors |= (tf->lbah & 0xff) << 16;
1201 	sectors |= (tf->lbam & 0xff) << 8;
1202 	sectors |= (tf->lbal & 0xff);
1203 
1204 	return sectors;
1205 }
1206 
1207 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1208 {
1209 	u64 sectors = 0;
1210 
1211 	sectors |= (tf->device & 0x0f) << 24;
1212 	sectors |= (tf->lbah & 0xff) << 16;
1213 	sectors |= (tf->lbam & 0xff) << 8;
1214 	sectors |= (tf->lbal & 0xff);
1215 
1216 	return sectors;
1217 }
1218 
1219 /**
1220  *	ata_read_native_max_address - Read native max address
1221  *	@dev: target device
1222  *	@max_sectors: out parameter for the result native max address
1223  *
1224  *	Perform an LBA48 or LBA28 native size query upon the device in
1225  *	question.
1226  *
1227  *	RETURNS:
1228  *	0 on success, -EACCES if command is aborted by the drive.
1229  *	-EIO on other errors.
1230  */
1231 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1232 {
1233 	unsigned int err_mask;
1234 	struct ata_taskfile tf;
1235 	int lba48 = ata_id_has_lba48(dev->id);
1236 
1237 	ata_tf_init(dev, &tf);
1238 
1239 	/* always clear all address registers */
1240 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1241 
1242 	if (lba48) {
1243 		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1244 		tf.flags |= ATA_TFLAG_LBA48;
1245 	} else
1246 		tf.command = ATA_CMD_READ_NATIVE_MAX;
1247 
1248 	tf.protocol |= ATA_PROT_NODATA;
1249 	tf.device |= ATA_LBA;
1250 
1251 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1252 	if (err_mask) {
1253 		ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1254 			       "max address (err_mask=0x%x)\n", err_mask);
1255 		if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1256 			return -EACCES;
1257 		return -EIO;
1258 	}
1259 
1260 	if (lba48)
1261 		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1262 	else
1263 		*max_sectors = ata_tf_to_lba(&tf) + 1;
1264 	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1265 		(*max_sectors)--;
1266 	return 0;
1267 }
1268 
1269 /**
1270  *	ata_set_max_sectors - Set max sectors
1271  *	@dev: target device
1272  *	@new_sectors: new max sectors value to set for the device
1273  *
1274  *	Set max sectors of @dev to @new_sectors.
1275  *
1276  *	RETURNS:
1277  *	0 on success, -EACCES if command is aborted or denied (due to
1278  *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1279  *	errors.
1280  */
1281 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1282 {
1283 	unsigned int err_mask;
1284 	struct ata_taskfile tf;
1285 	int lba48 = ata_id_has_lba48(dev->id);
1286 
1287 	new_sectors--;
1288 
1289 	ata_tf_init(dev, &tf);
1290 
1291 	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1292 
1293 	if (lba48) {
1294 		tf.command = ATA_CMD_SET_MAX_EXT;
1295 		tf.flags |= ATA_TFLAG_LBA48;
1296 
1297 		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1298 		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1299 		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1300 	} else {
1301 		tf.command = ATA_CMD_SET_MAX;
1302 
1303 		tf.device |= (new_sectors >> 24) & 0xf;
1304 	}
1305 
1306 	tf.protocol |= ATA_PROT_NODATA;
1307 	tf.device |= ATA_LBA;
1308 
1309 	tf.lbal = (new_sectors >> 0) & 0xff;
1310 	tf.lbam = (new_sectors >> 8) & 0xff;
1311 	tf.lbah = (new_sectors >> 16) & 0xff;
1312 
1313 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1314 	if (err_mask) {
1315 		ata_dev_printk(dev, KERN_WARNING, "failed to set "
1316 			       "max address (err_mask=0x%x)\n", err_mask);
1317 		if (err_mask == AC_ERR_DEV &&
1318 		    (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1319 			return -EACCES;
1320 		return -EIO;
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 /**
1327  *	ata_hpa_resize		-	Resize a device with an HPA set
1328  *	@dev: Device to resize
1329  *
1330  *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1331  *	it if required to the full size of the media. The caller must check
1332  *	the drive has the HPA feature set enabled.
1333  *
1334  *	RETURNS:
1335  *	0 on success, -errno on failure.
1336  */
1337 static int ata_hpa_resize(struct ata_device *dev)
1338 {
1339 	struct ata_eh_context *ehc = &dev->link->eh_context;
1340 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1341 	u64 sectors = ata_id_n_sectors(dev->id);
1342 	u64 native_sectors;
1343 	int rc;
1344 
1345 	/* do we need to do it? */
1346 	if (dev->class != ATA_DEV_ATA ||
1347 	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1348 	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1349 		return 0;
1350 
1351 	/* read native max address */
1352 	rc = ata_read_native_max_address(dev, &native_sectors);
1353 	if (rc) {
1354 		/* If device aborted the command or HPA isn't going to
1355 		 * be unlocked, skip HPA resizing.
1356 		 */
1357 		if (rc == -EACCES || !ata_ignore_hpa) {
1358 			ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1359 				       "broken, skipping HPA handling\n");
1360 			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1361 
1362 			/* we can continue if device aborted the command */
1363 			if (rc == -EACCES)
1364 				rc = 0;
1365 		}
1366 
1367 		return rc;
1368 	}
1369 
1370 	/* nothing to do? */
1371 	if (native_sectors <= sectors || !ata_ignore_hpa) {
1372 		if (!print_info || native_sectors == sectors)
1373 			return 0;
1374 
1375 		if (native_sectors > sectors)
1376 			ata_dev_printk(dev, KERN_INFO,
1377 				"HPA detected: current %llu, native %llu\n",
1378 				(unsigned long long)sectors,
1379 				(unsigned long long)native_sectors);
1380 		else if (native_sectors < sectors)
1381 			ata_dev_printk(dev, KERN_WARNING,
1382 				"native sectors (%llu) is smaller than "
1383 				"sectors (%llu)\n",
1384 				(unsigned long long)native_sectors,
1385 				(unsigned long long)sectors);
1386 		return 0;
1387 	}
1388 
1389 	/* let's unlock HPA */
1390 	rc = ata_set_max_sectors(dev, native_sectors);
1391 	if (rc == -EACCES) {
1392 		/* if device aborted the command, skip HPA resizing */
1393 		ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1394 			       "(%llu -> %llu), skipping HPA handling\n",
1395 			       (unsigned long long)sectors,
1396 			       (unsigned long long)native_sectors);
1397 		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1398 		return 0;
1399 	} else if (rc)
1400 		return rc;
1401 
1402 	/* re-read IDENTIFY data */
1403 	rc = ata_dev_reread_id(dev, 0);
1404 	if (rc) {
1405 		ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1406 			       "data after HPA resizing\n");
1407 		return rc;
1408 	}
1409 
1410 	if (print_info) {
1411 		u64 new_sectors = ata_id_n_sectors(dev->id);
1412 		ata_dev_printk(dev, KERN_INFO,
1413 			"HPA unlocked: %llu -> %llu, native %llu\n",
1414 			(unsigned long long)sectors,
1415 			(unsigned long long)new_sectors,
1416 			(unsigned long long)native_sectors);
1417 	}
1418 
1419 	return 0;
1420 }
1421 
1422 /**
1423  *	ata_dump_id - IDENTIFY DEVICE info debugging output
1424  *	@id: IDENTIFY DEVICE page to dump
1425  *
1426  *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1427  *	page.
1428  *
1429  *	LOCKING:
1430  *	caller.
1431  */
1432 
1433 static inline void ata_dump_id(const u16 *id)
1434 {
1435 	DPRINTK("49==0x%04x  "
1436 		"53==0x%04x  "
1437 		"63==0x%04x  "
1438 		"64==0x%04x  "
1439 		"75==0x%04x  \n",
1440 		id[49],
1441 		id[53],
1442 		id[63],
1443 		id[64],
1444 		id[75]);
1445 	DPRINTK("80==0x%04x  "
1446 		"81==0x%04x  "
1447 		"82==0x%04x  "
1448 		"83==0x%04x  "
1449 		"84==0x%04x  \n",
1450 		id[80],
1451 		id[81],
1452 		id[82],
1453 		id[83],
1454 		id[84]);
1455 	DPRINTK("88==0x%04x  "
1456 		"93==0x%04x\n",
1457 		id[88],
1458 		id[93]);
1459 }
1460 
1461 /**
1462  *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1463  *	@id: IDENTIFY data to compute xfer mask from
1464  *
1465  *	Compute the xfermask for this device. This is not as trivial
1466  *	as it seems if we must consider early devices correctly.
1467  *
1468  *	FIXME: pre IDE drive timing (do we care ?).
1469  *
1470  *	LOCKING:
1471  *	None.
1472  *
1473  *	RETURNS:
1474  *	Computed xfermask
1475  */
1476 unsigned long ata_id_xfermask(const u16 *id)
1477 {
1478 	unsigned long pio_mask, mwdma_mask, udma_mask;
1479 
1480 	/* Usual case. Word 53 indicates word 64 is valid */
1481 	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1482 		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1483 		pio_mask <<= 3;
1484 		pio_mask |= 0x7;
1485 	} else {
1486 		/* If word 64 isn't valid then Word 51 high byte holds
1487 		 * the PIO timing number for the maximum. Turn it into
1488 		 * a mask.
1489 		 */
1490 		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1491 		if (mode < 5)	/* Valid PIO range */
1492 			pio_mask = (2 << mode) - 1;
1493 		else
1494 			pio_mask = 1;
1495 
1496 		/* But wait.. there's more. Design your standards by
1497 		 * committee and you too can get a free iordy field to
1498 		 * process. However its the speeds not the modes that
1499 		 * are supported... Note drivers using the timing API
1500 		 * will get this right anyway
1501 		 */
1502 	}
1503 
1504 	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1505 
1506 	if (ata_id_is_cfa(id)) {
1507 		/*
1508 		 *	Process compact flash extended modes
1509 		 */
1510 		int pio = id[163] & 0x7;
1511 		int dma = (id[163] >> 3) & 7;
1512 
1513 		if (pio)
1514 			pio_mask |= (1 << 5);
1515 		if (pio > 1)
1516 			pio_mask |= (1 << 6);
1517 		if (dma)
1518 			mwdma_mask |= (1 << 3);
1519 		if (dma > 1)
1520 			mwdma_mask |= (1 << 4);
1521 	}
1522 
1523 	udma_mask = 0;
1524 	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1525 		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1526 
1527 	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1528 }
1529 
1530 /**
1531  *	ata_pio_queue_task - Queue port_task
1532  *	@ap: The ata_port to queue port_task for
1533  *	@fn: workqueue function to be scheduled
1534  *	@data: data for @fn to use
1535  *	@delay: delay time in msecs for workqueue function
1536  *
1537  *	Schedule @fn(@data) for execution after @delay jiffies using
1538  *	port_task.  There is one port_task per port and it's the
1539  *	user(low level driver)'s responsibility to make sure that only
1540  *	one task is active at any given time.
1541  *
1542  *	libata core layer takes care of synchronization between
1543  *	port_task and EH.  ata_pio_queue_task() may be ignored for EH
1544  *	synchronization.
1545  *
1546  *	LOCKING:
1547  *	Inherited from caller.
1548  */
1549 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1550 {
1551 	ap->port_task_data = data;
1552 
1553 	/* may fail if ata_port_flush_task() in progress */
1554 	queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1555 }
1556 
1557 /**
1558  *	ata_port_flush_task - Flush port_task
1559  *	@ap: The ata_port to flush port_task for
1560  *
1561  *	After this function completes, port_task is guranteed not to
1562  *	be running or scheduled.
1563  *
1564  *	LOCKING:
1565  *	Kernel thread context (may sleep)
1566  */
1567 void ata_port_flush_task(struct ata_port *ap)
1568 {
1569 	DPRINTK("ENTER\n");
1570 
1571 	cancel_rearming_delayed_work(&ap->port_task);
1572 
1573 	if (ata_msg_ctl(ap))
1574 		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1575 }
1576 
1577 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1578 {
1579 	struct completion *waiting = qc->private_data;
1580 
1581 	complete(waiting);
1582 }
1583 
1584 /**
1585  *	ata_exec_internal_sg - execute libata internal command
1586  *	@dev: Device to which the command is sent
1587  *	@tf: Taskfile registers for the command and the result
1588  *	@cdb: CDB for packet command
1589  *	@dma_dir: Data tranfer direction of the command
1590  *	@sgl: sg list for the data buffer of the command
1591  *	@n_elem: Number of sg entries
1592  *	@timeout: Timeout in msecs (0 for default)
1593  *
1594  *	Executes libata internal command with timeout.  @tf contains
1595  *	command on entry and result on return.  Timeout and error
1596  *	conditions are reported via return value.  No recovery action
1597  *	is taken after a command times out.  It's caller's duty to
1598  *	clean up after timeout.
1599  *
1600  *	LOCKING:
1601  *	None.  Should be called with kernel context, might sleep.
1602  *
1603  *	RETURNS:
1604  *	Zero on success, AC_ERR_* mask on failure
1605  */
1606 unsigned ata_exec_internal_sg(struct ata_device *dev,
1607 			      struct ata_taskfile *tf, const u8 *cdb,
1608 			      int dma_dir, struct scatterlist *sgl,
1609 			      unsigned int n_elem, unsigned long timeout)
1610 {
1611 	struct ata_link *link = dev->link;
1612 	struct ata_port *ap = link->ap;
1613 	u8 command = tf->command;
1614 	int auto_timeout = 0;
1615 	struct ata_queued_cmd *qc;
1616 	unsigned int tag, preempted_tag;
1617 	u32 preempted_sactive, preempted_qc_active;
1618 	int preempted_nr_active_links;
1619 	DECLARE_COMPLETION_ONSTACK(wait);
1620 	unsigned long flags;
1621 	unsigned int err_mask;
1622 	int rc;
1623 
1624 	spin_lock_irqsave(ap->lock, flags);
1625 
1626 	/* no internal command while frozen */
1627 	if (ap->pflags & ATA_PFLAG_FROZEN) {
1628 		spin_unlock_irqrestore(ap->lock, flags);
1629 		return AC_ERR_SYSTEM;
1630 	}
1631 
1632 	/* initialize internal qc */
1633 
1634 	/* XXX: Tag 0 is used for drivers with legacy EH as some
1635 	 * drivers choke if any other tag is given.  This breaks
1636 	 * ata_tag_internal() test for those drivers.  Don't use new
1637 	 * EH stuff without converting to it.
1638 	 */
1639 	if (ap->ops->error_handler)
1640 		tag = ATA_TAG_INTERNAL;
1641 	else
1642 		tag = 0;
1643 
1644 	if (test_and_set_bit(tag, &ap->qc_allocated))
1645 		BUG();
1646 	qc = __ata_qc_from_tag(ap, tag);
1647 
1648 	qc->tag = tag;
1649 	qc->scsicmd = NULL;
1650 	qc->ap = ap;
1651 	qc->dev = dev;
1652 	ata_qc_reinit(qc);
1653 
1654 	preempted_tag = link->active_tag;
1655 	preempted_sactive = link->sactive;
1656 	preempted_qc_active = ap->qc_active;
1657 	preempted_nr_active_links = ap->nr_active_links;
1658 	link->active_tag = ATA_TAG_POISON;
1659 	link->sactive = 0;
1660 	ap->qc_active = 0;
1661 	ap->nr_active_links = 0;
1662 
1663 	/* prepare & issue qc */
1664 	qc->tf = *tf;
1665 	if (cdb)
1666 		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1667 	qc->flags |= ATA_QCFLAG_RESULT_TF;
1668 	qc->dma_dir = dma_dir;
1669 	if (dma_dir != DMA_NONE) {
1670 		unsigned int i, buflen = 0;
1671 		struct scatterlist *sg;
1672 
1673 		for_each_sg(sgl, sg, n_elem, i)
1674 			buflen += sg->length;
1675 
1676 		ata_sg_init(qc, sgl, n_elem);
1677 		qc->nbytes = buflen;
1678 	}
1679 
1680 	qc->private_data = &wait;
1681 	qc->complete_fn = ata_qc_complete_internal;
1682 
1683 	ata_qc_issue(qc);
1684 
1685 	spin_unlock_irqrestore(ap->lock, flags);
1686 
1687 	if (!timeout) {
1688 		if (ata_probe_timeout)
1689 			timeout = ata_probe_timeout * 1000;
1690 		else {
1691 			timeout = ata_internal_cmd_timeout(dev, command);
1692 			auto_timeout = 1;
1693 		}
1694 	}
1695 
1696 	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1697 
1698 	ata_port_flush_task(ap);
1699 
1700 	if (!rc) {
1701 		spin_lock_irqsave(ap->lock, flags);
1702 
1703 		/* We're racing with irq here.  If we lose, the
1704 		 * following test prevents us from completing the qc
1705 		 * twice.  If we win, the port is frozen and will be
1706 		 * cleaned up by ->post_internal_cmd().
1707 		 */
1708 		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1709 			qc->err_mask |= AC_ERR_TIMEOUT;
1710 
1711 			if (ap->ops->error_handler)
1712 				ata_port_freeze(ap);
1713 			else
1714 				ata_qc_complete(qc);
1715 
1716 			if (ata_msg_warn(ap))
1717 				ata_dev_printk(dev, KERN_WARNING,
1718 					"qc timeout (cmd 0x%x)\n", command);
1719 		}
1720 
1721 		spin_unlock_irqrestore(ap->lock, flags);
1722 	}
1723 
1724 	/* do post_internal_cmd */
1725 	if (ap->ops->post_internal_cmd)
1726 		ap->ops->post_internal_cmd(qc);
1727 
1728 	/* perform minimal error analysis */
1729 	if (qc->flags & ATA_QCFLAG_FAILED) {
1730 		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1731 			qc->err_mask |= AC_ERR_DEV;
1732 
1733 		if (!qc->err_mask)
1734 			qc->err_mask |= AC_ERR_OTHER;
1735 
1736 		if (qc->err_mask & ~AC_ERR_OTHER)
1737 			qc->err_mask &= ~AC_ERR_OTHER;
1738 	}
1739 
1740 	/* finish up */
1741 	spin_lock_irqsave(ap->lock, flags);
1742 
1743 	*tf = qc->result_tf;
1744 	err_mask = qc->err_mask;
1745 
1746 	ata_qc_free(qc);
1747 	link->active_tag = preempted_tag;
1748 	link->sactive = preempted_sactive;
1749 	ap->qc_active = preempted_qc_active;
1750 	ap->nr_active_links = preempted_nr_active_links;
1751 
1752 	/* XXX - Some LLDDs (sata_mv) disable port on command failure.
1753 	 * Until those drivers are fixed, we detect the condition
1754 	 * here, fail the command with AC_ERR_SYSTEM and reenable the
1755 	 * port.
1756 	 *
1757 	 * Note that this doesn't change any behavior as internal
1758 	 * command failure results in disabling the device in the
1759 	 * higher layer for LLDDs without new reset/EH callbacks.
1760 	 *
1761 	 * Kill the following code as soon as those drivers are fixed.
1762 	 */
1763 	if (ap->flags & ATA_FLAG_DISABLED) {
1764 		err_mask |= AC_ERR_SYSTEM;
1765 		ata_port_probe(ap);
1766 	}
1767 
1768 	spin_unlock_irqrestore(ap->lock, flags);
1769 
1770 	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1771 		ata_internal_cmd_timed_out(dev, command);
1772 
1773 	return err_mask;
1774 }
1775 
1776 /**
1777  *	ata_exec_internal - execute libata internal command
1778  *	@dev: Device to which the command is sent
1779  *	@tf: Taskfile registers for the command and the result
1780  *	@cdb: CDB for packet command
1781  *	@dma_dir: Data tranfer direction of the command
1782  *	@buf: Data buffer of the command
1783  *	@buflen: Length of data buffer
1784  *	@timeout: Timeout in msecs (0 for default)
1785  *
1786  *	Wrapper around ata_exec_internal_sg() which takes simple
1787  *	buffer instead of sg list.
1788  *
1789  *	LOCKING:
1790  *	None.  Should be called with kernel context, might sleep.
1791  *
1792  *	RETURNS:
1793  *	Zero on success, AC_ERR_* mask on failure
1794  */
1795 unsigned ata_exec_internal(struct ata_device *dev,
1796 			   struct ata_taskfile *tf, const u8 *cdb,
1797 			   int dma_dir, void *buf, unsigned int buflen,
1798 			   unsigned long timeout)
1799 {
1800 	struct scatterlist *psg = NULL, sg;
1801 	unsigned int n_elem = 0;
1802 
1803 	if (dma_dir != DMA_NONE) {
1804 		WARN_ON(!buf);
1805 		sg_init_one(&sg, buf, buflen);
1806 		psg = &sg;
1807 		n_elem++;
1808 	}
1809 
1810 	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1811 				    timeout);
1812 }
1813 
1814 /**
1815  *	ata_do_simple_cmd - execute simple internal command
1816  *	@dev: Device to which the command is sent
1817  *	@cmd: Opcode to execute
1818  *
1819  *	Execute a 'simple' command, that only consists of the opcode
1820  *	'cmd' itself, without filling any other registers
1821  *
1822  *	LOCKING:
1823  *	Kernel thread context (may sleep).
1824  *
1825  *	RETURNS:
1826  *	Zero on success, AC_ERR_* mask on failure
1827  */
1828 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1829 {
1830 	struct ata_taskfile tf;
1831 
1832 	ata_tf_init(dev, &tf);
1833 
1834 	tf.command = cmd;
1835 	tf.flags |= ATA_TFLAG_DEVICE;
1836 	tf.protocol = ATA_PROT_NODATA;
1837 
1838 	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1839 }
1840 
1841 /**
1842  *	ata_pio_need_iordy	-	check if iordy needed
1843  *	@adev: ATA device
1844  *
1845  *	Check if the current speed of the device requires IORDY. Used
1846  *	by various controllers for chip configuration.
1847  */
1848 
1849 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1850 {
1851 	/* Controller doesn't support  IORDY. Probably a pointless check
1852 	   as the caller should know this */
1853 	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1854 		return 0;
1855 	/* PIO3 and higher it is mandatory */
1856 	if (adev->pio_mode > XFER_PIO_2)
1857 		return 1;
1858 	/* We turn it on when possible */
1859 	if (ata_id_has_iordy(adev->id))
1860 		return 1;
1861 	return 0;
1862 }
1863 
1864 /**
1865  *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1866  *	@adev: ATA device
1867  *
1868  *	Compute the highest mode possible if we are not using iordy. Return
1869  *	-1 if no iordy mode is available.
1870  */
1871 
1872 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1873 {
1874 	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1875 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1876 		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1877 		/* Is the speed faster than the drive allows non IORDY ? */
1878 		if (pio) {
1879 			/* This is cycle times not frequency - watch the logic! */
1880 			if (pio > 240)	/* PIO2 is 240nS per cycle */
1881 				return 3 << ATA_SHIFT_PIO;
1882 			return 7 << ATA_SHIFT_PIO;
1883 		}
1884 	}
1885 	return 3 << ATA_SHIFT_PIO;
1886 }
1887 
1888 /**
1889  *	ata_dev_read_id - Read ID data from the specified device
1890  *	@dev: target device
1891  *	@p_class: pointer to class of the target device (may be changed)
1892  *	@flags: ATA_READID_* flags
1893  *	@id: buffer to read IDENTIFY data into
1894  *
1895  *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1896  *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1897  *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1898  *	for pre-ATA4 drives.
1899  *
1900  *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1901  *	now we abort if we hit that case.
1902  *
1903  *	LOCKING:
1904  *	Kernel thread context (may sleep)
1905  *
1906  *	RETURNS:
1907  *	0 on success, -errno otherwise.
1908  */
1909 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1910 		    unsigned int flags, u16 *id)
1911 {
1912 	struct ata_port *ap = dev->link->ap;
1913 	unsigned int class = *p_class;
1914 	struct ata_taskfile tf;
1915 	unsigned int err_mask = 0;
1916 	const char *reason;
1917 	int may_fallback = 1, tried_spinup = 0;
1918 	int rc;
1919 
1920 	if (ata_msg_ctl(ap))
1921 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1922 
1923  retry:
1924 	ata_tf_init(dev, &tf);
1925 
1926 	switch (class) {
1927 	case ATA_DEV_ATA:
1928 		tf.command = ATA_CMD_ID_ATA;
1929 		break;
1930 	case ATA_DEV_ATAPI:
1931 		tf.command = ATA_CMD_ID_ATAPI;
1932 		break;
1933 	default:
1934 		rc = -ENODEV;
1935 		reason = "unsupported class";
1936 		goto err_out;
1937 	}
1938 
1939 	tf.protocol = ATA_PROT_PIO;
1940 
1941 	/* Some devices choke if TF registers contain garbage.  Make
1942 	 * sure those are properly initialized.
1943 	 */
1944 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1945 
1946 	/* Device presence detection is unreliable on some
1947 	 * controllers.  Always poll IDENTIFY if available.
1948 	 */
1949 	tf.flags |= ATA_TFLAG_POLLING;
1950 
1951 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1952 				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1953 	if (err_mask) {
1954 		if (err_mask & AC_ERR_NODEV_HINT) {
1955 			ata_dev_printk(dev, KERN_DEBUG,
1956 				       "NODEV after polling detection\n");
1957 			return -ENOENT;
1958 		}
1959 
1960 		if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1961 			/* Device or controller might have reported
1962 			 * the wrong device class.  Give a shot at the
1963 			 * other IDENTIFY if the current one is
1964 			 * aborted by the device.
1965 			 */
1966 			if (may_fallback) {
1967 				may_fallback = 0;
1968 
1969 				if (class == ATA_DEV_ATA)
1970 					class = ATA_DEV_ATAPI;
1971 				else
1972 					class = ATA_DEV_ATA;
1973 				goto retry;
1974 			}
1975 
1976 			/* Control reaches here iff the device aborted
1977 			 * both flavors of IDENTIFYs which happens
1978 			 * sometimes with phantom devices.
1979 			 */
1980 			ata_dev_printk(dev, KERN_DEBUG,
1981 				       "both IDENTIFYs aborted, assuming NODEV\n");
1982 			return -ENOENT;
1983 		}
1984 
1985 		rc = -EIO;
1986 		reason = "I/O error";
1987 		goto err_out;
1988 	}
1989 
1990 	/* Falling back doesn't make sense if ID data was read
1991 	 * successfully at least once.
1992 	 */
1993 	may_fallback = 0;
1994 
1995 	swap_buf_le16(id, ATA_ID_WORDS);
1996 
1997 	/* sanity check */
1998 	rc = -EINVAL;
1999 	reason = "device reports invalid type";
2000 
2001 	if (class == ATA_DEV_ATA) {
2002 		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2003 			goto err_out;
2004 	} else {
2005 		if (ata_id_is_ata(id))
2006 			goto err_out;
2007 	}
2008 
2009 	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2010 		tried_spinup = 1;
2011 		/*
2012 		 * Drive powered-up in standby mode, and requires a specific
2013 		 * SET_FEATURES spin-up subcommand before it will accept
2014 		 * anything other than the original IDENTIFY command.
2015 		 */
2016 		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2017 		if (err_mask && id[2] != 0x738c) {
2018 			rc = -EIO;
2019 			reason = "SPINUP failed";
2020 			goto err_out;
2021 		}
2022 		/*
2023 		 * If the drive initially returned incomplete IDENTIFY info,
2024 		 * we now must reissue the IDENTIFY command.
2025 		 */
2026 		if (id[2] == 0x37c8)
2027 			goto retry;
2028 	}
2029 
2030 	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2031 		/*
2032 		 * The exact sequence expected by certain pre-ATA4 drives is:
2033 		 * SRST RESET
2034 		 * IDENTIFY (optional in early ATA)
2035 		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2036 		 * anything else..
2037 		 * Some drives were very specific about that exact sequence.
2038 		 *
2039 		 * Note that ATA4 says lba is mandatory so the second check
2040 		 * shoud never trigger.
2041 		 */
2042 		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2043 			err_mask = ata_dev_init_params(dev, id[3], id[6]);
2044 			if (err_mask) {
2045 				rc = -EIO;
2046 				reason = "INIT_DEV_PARAMS failed";
2047 				goto err_out;
2048 			}
2049 
2050 			/* current CHS translation info (id[53-58]) might be
2051 			 * changed. reread the identify device info.
2052 			 */
2053 			flags &= ~ATA_READID_POSTRESET;
2054 			goto retry;
2055 		}
2056 	}
2057 
2058 	*p_class = class;
2059 
2060 	return 0;
2061 
2062  err_out:
2063 	if (ata_msg_warn(ap))
2064 		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2065 			       "(%s, err_mask=0x%x)\n", reason, err_mask);
2066 	return rc;
2067 }
2068 
2069 static inline u8 ata_dev_knobble(struct ata_device *dev)
2070 {
2071 	struct ata_port *ap = dev->link->ap;
2072 	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2073 }
2074 
2075 static void ata_dev_config_ncq(struct ata_device *dev,
2076 			       char *desc, size_t desc_sz)
2077 {
2078 	struct ata_port *ap = dev->link->ap;
2079 	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2080 
2081 	if (!ata_id_has_ncq(dev->id)) {
2082 		desc[0] = '\0';
2083 		return;
2084 	}
2085 	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2086 		snprintf(desc, desc_sz, "NCQ (not used)");
2087 		return;
2088 	}
2089 	if (ap->flags & ATA_FLAG_NCQ) {
2090 		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2091 		dev->flags |= ATA_DFLAG_NCQ;
2092 	}
2093 
2094 	if (hdepth >= ddepth)
2095 		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2096 	else
2097 		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2098 }
2099 
2100 /**
2101  *	ata_dev_configure - Configure the specified ATA/ATAPI device
2102  *	@dev: Target device to configure
2103  *
2104  *	Configure @dev according to @dev->id.  Generic and low-level
2105  *	driver specific fixups are also applied.
2106  *
2107  *	LOCKING:
2108  *	Kernel thread context (may sleep)
2109  *
2110  *	RETURNS:
2111  *	0 on success, -errno otherwise
2112  */
2113 int ata_dev_configure(struct ata_device *dev)
2114 {
2115 	struct ata_port *ap = dev->link->ap;
2116 	struct ata_eh_context *ehc = &dev->link->eh_context;
2117 	int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2118 	const u16 *id = dev->id;
2119 	unsigned long xfer_mask;
2120 	char revbuf[7];		/* XYZ-99\0 */
2121 	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2122 	char modelbuf[ATA_ID_PROD_LEN+1];
2123 	int rc;
2124 
2125 	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2126 		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2127 			       __func__);
2128 		return 0;
2129 	}
2130 
2131 	if (ata_msg_probe(ap))
2132 		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2133 
2134 	/* set horkage */
2135 	dev->horkage |= ata_dev_blacklisted(dev);
2136 	ata_force_horkage(dev);
2137 
2138 	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2139 		ata_dev_printk(dev, KERN_INFO,
2140 			       "unsupported device, disabling\n");
2141 		ata_dev_disable(dev);
2142 		return 0;
2143 	}
2144 
2145 	/* let ACPI work its magic */
2146 	rc = ata_acpi_on_devcfg(dev);
2147 	if (rc)
2148 		return rc;
2149 
2150 	/* massage HPA, do it early as it might change IDENTIFY data */
2151 	rc = ata_hpa_resize(dev);
2152 	if (rc)
2153 		return rc;
2154 
2155 	/* print device capabilities */
2156 	if (ata_msg_probe(ap))
2157 		ata_dev_printk(dev, KERN_DEBUG,
2158 			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2159 			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
2160 			       __func__,
2161 			       id[49], id[82], id[83], id[84],
2162 			       id[85], id[86], id[87], id[88]);
2163 
2164 	/* initialize to-be-configured parameters */
2165 	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2166 	dev->max_sectors = 0;
2167 	dev->cdb_len = 0;
2168 	dev->n_sectors = 0;
2169 	dev->cylinders = 0;
2170 	dev->heads = 0;
2171 	dev->sectors = 0;
2172 
2173 	/*
2174 	 * common ATA, ATAPI feature tests
2175 	 */
2176 
2177 	/* find max transfer mode; for printk only */
2178 	xfer_mask = ata_id_xfermask(id);
2179 
2180 	if (ata_msg_probe(ap))
2181 		ata_dump_id(id);
2182 
2183 	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2184 	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2185 			sizeof(fwrevbuf));
2186 
2187 	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2188 			sizeof(modelbuf));
2189 
2190 	/* ATA-specific feature tests */
2191 	if (dev->class == ATA_DEV_ATA) {
2192 		if (ata_id_is_cfa(id)) {
2193 			if (id[162] & 1) /* CPRM may make this media unusable */
2194 				ata_dev_printk(dev, KERN_WARNING,
2195 					       "supports DRM functions and may "
2196 					       "not be fully accessable.\n");
2197 			snprintf(revbuf, 7, "CFA");
2198 		} else {
2199 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2200 			/* Warn the user if the device has TPM extensions */
2201 			if (ata_id_has_tpm(id))
2202 				ata_dev_printk(dev, KERN_WARNING,
2203 					       "supports DRM functions and may "
2204 					       "not be fully accessable.\n");
2205 		}
2206 
2207 		dev->n_sectors = ata_id_n_sectors(id);
2208 
2209 		if (dev->id[59] & 0x100)
2210 			dev->multi_count = dev->id[59] & 0xff;
2211 
2212 		if (ata_id_has_lba(id)) {
2213 			const char *lba_desc;
2214 			char ncq_desc[20];
2215 
2216 			lba_desc = "LBA";
2217 			dev->flags |= ATA_DFLAG_LBA;
2218 			if (ata_id_has_lba48(id)) {
2219 				dev->flags |= ATA_DFLAG_LBA48;
2220 				lba_desc = "LBA48";
2221 
2222 				if (dev->n_sectors >= (1UL << 28) &&
2223 				    ata_id_has_flush_ext(id))
2224 					dev->flags |= ATA_DFLAG_FLUSH_EXT;
2225 			}
2226 
2227 			/* config NCQ */
2228 			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2229 
2230 			/* print device info to dmesg */
2231 			if (ata_msg_drv(ap) && print_info) {
2232 				ata_dev_printk(dev, KERN_INFO,
2233 					"%s: %s, %s, max %s\n",
2234 					revbuf, modelbuf, fwrevbuf,
2235 					ata_mode_string(xfer_mask));
2236 				ata_dev_printk(dev, KERN_INFO,
2237 					"%Lu sectors, multi %u: %s %s\n",
2238 					(unsigned long long)dev->n_sectors,
2239 					dev->multi_count, lba_desc, ncq_desc);
2240 			}
2241 		} else {
2242 			/* CHS */
2243 
2244 			/* Default translation */
2245 			dev->cylinders	= id[1];
2246 			dev->heads	= id[3];
2247 			dev->sectors	= id[6];
2248 
2249 			if (ata_id_current_chs_valid(id)) {
2250 				/* Current CHS translation is valid. */
2251 				dev->cylinders = id[54];
2252 				dev->heads     = id[55];
2253 				dev->sectors   = id[56];
2254 			}
2255 
2256 			/* print device info to dmesg */
2257 			if (ata_msg_drv(ap) && print_info) {
2258 				ata_dev_printk(dev, KERN_INFO,
2259 					"%s: %s, %s, max %s\n",
2260 					revbuf,	modelbuf, fwrevbuf,
2261 					ata_mode_string(xfer_mask));
2262 				ata_dev_printk(dev, KERN_INFO,
2263 					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
2264 					(unsigned long long)dev->n_sectors,
2265 					dev->multi_count, dev->cylinders,
2266 					dev->heads, dev->sectors);
2267 			}
2268 		}
2269 
2270 		dev->cdb_len = 16;
2271 	}
2272 
2273 	/* ATAPI-specific feature tests */
2274 	else if (dev->class == ATA_DEV_ATAPI) {
2275 		const char *cdb_intr_string = "";
2276 		const char *atapi_an_string = "";
2277 		const char *dma_dir_string = "";
2278 		u32 sntf;
2279 
2280 		rc = atapi_cdb_len(id);
2281 		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2282 			if (ata_msg_warn(ap))
2283 				ata_dev_printk(dev, KERN_WARNING,
2284 					       "unsupported CDB len\n");
2285 			rc = -EINVAL;
2286 			goto err_out_nosup;
2287 		}
2288 		dev->cdb_len = (unsigned int) rc;
2289 
2290 		/* Enable ATAPI AN if both the host and device have
2291 		 * the support.  If PMP is attached, SNTF is required
2292 		 * to enable ATAPI AN to discern between PHY status
2293 		 * changed notifications and ATAPI ANs.
2294 		 */
2295 		if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2296 		    (!sata_pmp_attached(ap) ||
2297 		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2298 			unsigned int err_mask;
2299 
2300 			/* issue SET feature command to turn this on */
2301 			err_mask = ata_dev_set_feature(dev,
2302 					SETFEATURES_SATA_ENABLE, SATA_AN);
2303 			if (err_mask)
2304 				ata_dev_printk(dev, KERN_ERR,
2305 					"failed to enable ATAPI AN "
2306 					"(err_mask=0x%x)\n", err_mask);
2307 			else {
2308 				dev->flags |= ATA_DFLAG_AN;
2309 				atapi_an_string = ", ATAPI AN";
2310 			}
2311 		}
2312 
2313 		if (ata_id_cdb_intr(dev->id)) {
2314 			dev->flags |= ATA_DFLAG_CDB_INTR;
2315 			cdb_intr_string = ", CDB intr";
2316 		}
2317 
2318 		if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2319 			dev->flags |= ATA_DFLAG_DMADIR;
2320 			dma_dir_string = ", DMADIR";
2321 		}
2322 
2323 		/* print device info to dmesg */
2324 		if (ata_msg_drv(ap) && print_info)
2325 			ata_dev_printk(dev, KERN_INFO,
2326 				       "ATAPI: %s, %s, max %s%s%s%s\n",
2327 				       modelbuf, fwrevbuf,
2328 				       ata_mode_string(xfer_mask),
2329 				       cdb_intr_string, atapi_an_string,
2330 				       dma_dir_string);
2331 	}
2332 
2333 	/* determine max_sectors */
2334 	dev->max_sectors = ATA_MAX_SECTORS;
2335 	if (dev->flags & ATA_DFLAG_LBA48)
2336 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2337 
2338 	if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2339 		if (ata_id_has_hipm(dev->id))
2340 			dev->flags |= ATA_DFLAG_HIPM;
2341 		if (ata_id_has_dipm(dev->id))
2342 			dev->flags |= ATA_DFLAG_DIPM;
2343 	}
2344 
2345 	/* Limit PATA drive on SATA cable bridge transfers to udma5,
2346 	   200 sectors */
2347 	if (ata_dev_knobble(dev)) {
2348 		if (ata_msg_drv(ap) && print_info)
2349 			ata_dev_printk(dev, KERN_INFO,
2350 				       "applying bridge limits\n");
2351 		dev->udma_mask &= ATA_UDMA5;
2352 		dev->max_sectors = ATA_MAX_SECTORS;
2353 	}
2354 
2355 	if ((dev->class == ATA_DEV_ATAPI) &&
2356 	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
2357 		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2358 		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2359 	}
2360 
2361 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2362 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2363 					 dev->max_sectors);
2364 
2365 	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2366 		dev->horkage |= ATA_HORKAGE_IPM;
2367 
2368 		/* reset link pm_policy for this port to no pm */
2369 		ap->pm_policy = MAX_PERFORMANCE;
2370 	}
2371 
2372 	if (ap->ops->dev_config)
2373 		ap->ops->dev_config(dev);
2374 
2375 	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2376 		/* Let the user know. We don't want to disallow opens for
2377 		   rescue purposes, or in case the vendor is just a blithering
2378 		   idiot. Do this after the dev_config call as some controllers
2379 		   with buggy firmware may want to avoid reporting false device
2380 		   bugs */
2381 
2382 		if (print_info) {
2383 			ata_dev_printk(dev, KERN_WARNING,
2384 "Drive reports diagnostics failure. This may indicate a drive\n");
2385 			ata_dev_printk(dev, KERN_WARNING,
2386 "fault or invalid emulation. Contact drive vendor for information.\n");
2387 		}
2388 	}
2389 
2390 	return 0;
2391 
2392 err_out_nosup:
2393 	if (ata_msg_probe(ap))
2394 		ata_dev_printk(dev, KERN_DEBUG,
2395 			       "%s: EXIT, err\n", __func__);
2396 	return rc;
2397 }
2398 
2399 /**
2400  *	ata_cable_40wire	-	return 40 wire cable type
2401  *	@ap: port
2402  *
2403  *	Helper method for drivers which want to hardwire 40 wire cable
2404  *	detection.
2405  */
2406 
2407 int ata_cable_40wire(struct ata_port *ap)
2408 {
2409 	return ATA_CBL_PATA40;
2410 }
2411 
2412 /**
2413  *	ata_cable_80wire	-	return 80 wire cable type
2414  *	@ap: port
2415  *
2416  *	Helper method for drivers which want to hardwire 80 wire cable
2417  *	detection.
2418  */
2419 
2420 int ata_cable_80wire(struct ata_port *ap)
2421 {
2422 	return ATA_CBL_PATA80;
2423 }
2424 
2425 /**
2426  *	ata_cable_unknown	-	return unknown PATA cable.
2427  *	@ap: port
2428  *
2429  *	Helper method for drivers which have no PATA cable detection.
2430  */
2431 
2432 int ata_cable_unknown(struct ata_port *ap)
2433 {
2434 	return ATA_CBL_PATA_UNK;
2435 }
2436 
2437 /**
2438  *	ata_cable_ignore	-	return ignored PATA cable.
2439  *	@ap: port
2440  *
2441  *	Helper method for drivers which don't use cable type to limit
2442  *	transfer mode.
2443  */
2444 int ata_cable_ignore(struct ata_port *ap)
2445 {
2446 	return ATA_CBL_PATA_IGN;
2447 }
2448 
2449 /**
2450  *	ata_cable_sata	-	return SATA cable type
2451  *	@ap: port
2452  *
2453  *	Helper method for drivers which have SATA cables
2454  */
2455 
2456 int ata_cable_sata(struct ata_port *ap)
2457 {
2458 	return ATA_CBL_SATA;
2459 }
2460 
2461 /**
2462  *	ata_bus_probe - Reset and probe ATA bus
2463  *	@ap: Bus to probe
2464  *
2465  *	Master ATA bus probing function.  Initiates a hardware-dependent
2466  *	bus reset, then attempts to identify any devices found on
2467  *	the bus.
2468  *
2469  *	LOCKING:
2470  *	PCI/etc. bus probe sem.
2471  *
2472  *	RETURNS:
2473  *	Zero on success, negative errno otherwise.
2474  */
2475 
2476 int ata_bus_probe(struct ata_port *ap)
2477 {
2478 	unsigned int classes[ATA_MAX_DEVICES];
2479 	int tries[ATA_MAX_DEVICES];
2480 	int rc;
2481 	struct ata_device *dev;
2482 
2483 	ata_port_probe(ap);
2484 
2485 	ata_link_for_each_dev(dev, &ap->link)
2486 		tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2487 
2488  retry:
2489 	ata_link_for_each_dev(dev, &ap->link) {
2490 		/* If we issue an SRST then an ATA drive (not ATAPI)
2491 		 * may change configuration and be in PIO0 timing. If
2492 		 * we do a hard reset (or are coming from power on)
2493 		 * this is true for ATA or ATAPI. Until we've set a
2494 		 * suitable controller mode we should not touch the
2495 		 * bus as we may be talking too fast.
2496 		 */
2497 		dev->pio_mode = XFER_PIO_0;
2498 
2499 		/* If the controller has a pio mode setup function
2500 		 * then use it to set the chipset to rights. Don't
2501 		 * touch the DMA setup as that will be dealt with when
2502 		 * configuring devices.
2503 		 */
2504 		if (ap->ops->set_piomode)
2505 			ap->ops->set_piomode(ap, dev);
2506 	}
2507 
2508 	/* reset and determine device classes */
2509 	ap->ops->phy_reset(ap);
2510 
2511 	ata_link_for_each_dev(dev, &ap->link) {
2512 		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2513 		    dev->class != ATA_DEV_UNKNOWN)
2514 			classes[dev->devno] = dev->class;
2515 		else
2516 			classes[dev->devno] = ATA_DEV_NONE;
2517 
2518 		dev->class = ATA_DEV_UNKNOWN;
2519 	}
2520 
2521 	ata_port_probe(ap);
2522 
2523 	/* read IDENTIFY page and configure devices. We have to do the identify
2524 	   specific sequence bass-ackwards so that PDIAG- is released by
2525 	   the slave device */
2526 
2527 	ata_link_for_each_dev_reverse(dev, &ap->link) {
2528 		if (tries[dev->devno])
2529 			dev->class = classes[dev->devno];
2530 
2531 		if (!ata_dev_enabled(dev))
2532 			continue;
2533 
2534 		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2535 				     dev->id);
2536 		if (rc)
2537 			goto fail;
2538 	}
2539 
2540 	/* Now ask for the cable type as PDIAG- should have been released */
2541 	if (ap->ops->cable_detect)
2542 		ap->cbl = ap->ops->cable_detect(ap);
2543 
2544 	/* We may have SATA bridge glue hiding here irrespective of the
2545 	   reported cable types and sensed types */
2546 	ata_link_for_each_dev(dev, &ap->link) {
2547 		if (!ata_dev_enabled(dev))
2548 			continue;
2549 		/* SATA drives indicate we have a bridge. We don't know which
2550 		   end of the link the bridge is which is a problem */
2551 		if (ata_id_is_sata(dev->id))
2552 			ap->cbl = ATA_CBL_SATA;
2553 	}
2554 
2555 	/* After the identify sequence we can now set up the devices. We do
2556 	   this in the normal order so that the user doesn't get confused */
2557 
2558 	ata_link_for_each_dev(dev, &ap->link) {
2559 		if (!ata_dev_enabled(dev))
2560 			continue;
2561 
2562 		ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2563 		rc = ata_dev_configure(dev);
2564 		ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2565 		if (rc)
2566 			goto fail;
2567 	}
2568 
2569 	/* configure transfer mode */
2570 	rc = ata_set_mode(&ap->link, &dev);
2571 	if (rc)
2572 		goto fail;
2573 
2574 	ata_link_for_each_dev(dev, &ap->link)
2575 		if (ata_dev_enabled(dev))
2576 			return 0;
2577 
2578 	/* no device present, disable port */
2579 	ata_port_disable(ap);
2580 	return -ENODEV;
2581 
2582  fail:
2583 	tries[dev->devno]--;
2584 
2585 	switch (rc) {
2586 	case -EINVAL:
2587 		/* eeek, something went very wrong, give up */
2588 		tries[dev->devno] = 0;
2589 		break;
2590 
2591 	case -ENODEV:
2592 		/* give it just one more chance */
2593 		tries[dev->devno] = min(tries[dev->devno], 1);
2594 	case -EIO:
2595 		if (tries[dev->devno] == 1) {
2596 			/* This is the last chance, better to slow
2597 			 * down than lose it.
2598 			 */
2599 			sata_down_spd_limit(&ap->link);
2600 			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2601 		}
2602 	}
2603 
2604 	if (!tries[dev->devno])
2605 		ata_dev_disable(dev);
2606 
2607 	goto retry;
2608 }
2609 
2610 /**
2611  *	ata_port_probe - Mark port as enabled
2612  *	@ap: Port for which we indicate enablement
2613  *
2614  *	Modify @ap data structure such that the system
2615  *	thinks that the entire port is enabled.
2616  *
2617  *	LOCKING: host lock, or some other form of
2618  *	serialization.
2619  */
2620 
2621 void ata_port_probe(struct ata_port *ap)
2622 {
2623 	ap->flags &= ~ATA_FLAG_DISABLED;
2624 }
2625 
2626 /**
2627  *	sata_print_link_status - Print SATA link status
2628  *	@link: SATA link to printk link status about
2629  *
2630  *	This function prints link speed and status of a SATA link.
2631  *
2632  *	LOCKING:
2633  *	None.
2634  */
2635 static void sata_print_link_status(struct ata_link *link)
2636 {
2637 	u32 sstatus, scontrol, tmp;
2638 
2639 	if (sata_scr_read(link, SCR_STATUS, &sstatus))
2640 		return;
2641 	sata_scr_read(link, SCR_CONTROL, &scontrol);
2642 
2643 	if (ata_link_online(link)) {
2644 		tmp = (sstatus >> 4) & 0xf;
2645 		ata_link_printk(link, KERN_INFO,
2646 				"SATA link up %s (SStatus %X SControl %X)\n",
2647 				sata_spd_string(tmp), sstatus, scontrol);
2648 	} else {
2649 		ata_link_printk(link, KERN_INFO,
2650 				"SATA link down (SStatus %X SControl %X)\n",
2651 				sstatus, scontrol);
2652 	}
2653 }
2654 
2655 /**
2656  *	ata_dev_pair		-	return other device on cable
2657  *	@adev: device
2658  *
2659  *	Obtain the other device on the same cable, or if none is
2660  *	present NULL is returned
2661  */
2662 
2663 struct ata_device *ata_dev_pair(struct ata_device *adev)
2664 {
2665 	struct ata_link *link = adev->link;
2666 	struct ata_device *pair = &link->device[1 - adev->devno];
2667 	if (!ata_dev_enabled(pair))
2668 		return NULL;
2669 	return pair;
2670 }
2671 
2672 /**
2673  *	ata_port_disable - Disable port.
2674  *	@ap: Port to be disabled.
2675  *
2676  *	Modify @ap data structure such that the system
2677  *	thinks that the entire port is disabled, and should
2678  *	never attempt to probe or communicate with devices
2679  *	on this port.
2680  *
2681  *	LOCKING: host lock, or some other form of
2682  *	serialization.
2683  */
2684 
2685 void ata_port_disable(struct ata_port *ap)
2686 {
2687 	ap->link.device[0].class = ATA_DEV_NONE;
2688 	ap->link.device[1].class = ATA_DEV_NONE;
2689 	ap->flags |= ATA_FLAG_DISABLED;
2690 }
2691 
2692 /**
2693  *	sata_down_spd_limit - adjust SATA spd limit downward
2694  *	@link: Link to adjust SATA spd limit for
2695  *
2696  *	Adjust SATA spd limit of @link downward.  Note that this
2697  *	function only adjusts the limit.  The change must be applied
2698  *	using sata_set_spd().
2699  *
2700  *	LOCKING:
2701  *	Inherited from caller.
2702  *
2703  *	RETURNS:
2704  *	0 on success, negative errno on failure
2705  */
2706 int sata_down_spd_limit(struct ata_link *link)
2707 {
2708 	u32 sstatus, spd, mask;
2709 	int rc, highbit;
2710 
2711 	if (!sata_scr_valid(link))
2712 		return -EOPNOTSUPP;
2713 
2714 	/* If SCR can be read, use it to determine the current SPD.
2715 	 * If not, use cached value in link->sata_spd.
2716 	 */
2717 	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2718 	if (rc == 0)
2719 		spd = (sstatus >> 4) & 0xf;
2720 	else
2721 		spd = link->sata_spd;
2722 
2723 	mask = link->sata_spd_limit;
2724 	if (mask <= 1)
2725 		return -EINVAL;
2726 
2727 	/* unconditionally mask off the highest bit */
2728 	highbit = fls(mask) - 1;
2729 	mask &= ~(1 << highbit);
2730 
2731 	/* Mask off all speeds higher than or equal to the current
2732 	 * one.  Force 1.5Gbps if current SPD is not available.
2733 	 */
2734 	if (spd > 1)
2735 		mask &= (1 << (spd - 1)) - 1;
2736 	else
2737 		mask &= 1;
2738 
2739 	/* were we already at the bottom? */
2740 	if (!mask)
2741 		return -EINVAL;
2742 
2743 	link->sata_spd_limit = mask;
2744 
2745 	ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2746 			sata_spd_string(fls(mask)));
2747 
2748 	return 0;
2749 }
2750 
2751 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2752 {
2753 	struct ata_link *host_link = &link->ap->link;
2754 	u32 limit, target, spd;
2755 
2756 	limit = link->sata_spd_limit;
2757 
2758 	/* Don't configure downstream link faster than upstream link.
2759 	 * It doesn't speed up anything and some PMPs choke on such
2760 	 * configuration.
2761 	 */
2762 	if (!ata_is_host_link(link) && host_link->sata_spd)
2763 		limit &= (1 << host_link->sata_spd) - 1;
2764 
2765 	if (limit == UINT_MAX)
2766 		target = 0;
2767 	else
2768 		target = fls(limit);
2769 
2770 	spd = (*scontrol >> 4) & 0xf;
2771 	*scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2772 
2773 	return spd != target;
2774 }
2775 
2776 /**
2777  *	sata_set_spd_needed - is SATA spd configuration needed
2778  *	@link: Link in question
2779  *
2780  *	Test whether the spd limit in SControl matches
2781  *	@link->sata_spd_limit.  This function is used to determine
2782  *	whether hardreset is necessary to apply SATA spd
2783  *	configuration.
2784  *
2785  *	LOCKING:
2786  *	Inherited from caller.
2787  *
2788  *	RETURNS:
2789  *	1 if SATA spd configuration is needed, 0 otherwise.
2790  */
2791 static int sata_set_spd_needed(struct ata_link *link)
2792 {
2793 	u32 scontrol;
2794 
2795 	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2796 		return 1;
2797 
2798 	return __sata_set_spd_needed(link, &scontrol);
2799 }
2800 
2801 /**
2802  *	sata_set_spd - set SATA spd according to spd limit
2803  *	@link: Link to set SATA spd for
2804  *
2805  *	Set SATA spd of @link according to sata_spd_limit.
2806  *
2807  *	LOCKING:
2808  *	Inherited from caller.
2809  *
2810  *	RETURNS:
2811  *	0 if spd doesn't need to be changed, 1 if spd has been
2812  *	changed.  Negative errno if SCR registers are inaccessible.
2813  */
2814 int sata_set_spd(struct ata_link *link)
2815 {
2816 	u32 scontrol;
2817 	int rc;
2818 
2819 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2820 		return rc;
2821 
2822 	if (!__sata_set_spd_needed(link, &scontrol))
2823 		return 0;
2824 
2825 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2826 		return rc;
2827 
2828 	return 1;
2829 }
2830 
2831 /*
2832  * This mode timing computation functionality is ported over from
2833  * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2834  */
2835 /*
2836  * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2837  * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2838  * for UDMA6, which is currently supported only by Maxtor drives.
2839  *
2840  * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2841  */
2842 
2843 static const struct ata_timing ata_timing[] = {
2844 /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2845 	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2846 	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2847 	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2848 	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2849 	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2850 	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2851 	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2852 
2853 	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2854 	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2855 	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2856 
2857 	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2858 	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2859 	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2860 	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2861 	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2862 
2863 /*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2864 	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2865 	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2866 	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2867 	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2868 	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2869 	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2870 	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2871 
2872 	{ 0xFF }
2873 };
2874 
2875 #define ENOUGH(v, unit)		(((v)-1)/(unit)+1)
2876 #define EZ(v, unit)		((v)?ENOUGH(v, unit):0)
2877 
2878 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2879 {
2880 	q->setup   = EZ(t->setup   * 1000,  T);
2881 	q->act8b   = EZ(t->act8b   * 1000,  T);
2882 	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2883 	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2884 	q->active  = EZ(t->active  * 1000,  T);
2885 	q->recover = EZ(t->recover * 1000,  T);
2886 	q->cycle   = EZ(t->cycle   * 1000,  T);
2887 	q->udma    = EZ(t->udma    * 1000, UT);
2888 }
2889 
2890 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2891 		      struct ata_timing *m, unsigned int what)
2892 {
2893 	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2894 	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2895 	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2896 	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2897 	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2898 	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2899 	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2900 	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2901 }
2902 
2903 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
2904 {
2905 	const struct ata_timing *t = ata_timing;
2906 
2907 	while (xfer_mode > t->mode)
2908 		t++;
2909 
2910 	if (xfer_mode == t->mode)
2911 		return t;
2912 	return NULL;
2913 }
2914 
2915 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2916 		       struct ata_timing *t, int T, int UT)
2917 {
2918 	const struct ata_timing *s;
2919 	struct ata_timing p;
2920 
2921 	/*
2922 	 * Find the mode.
2923 	 */
2924 
2925 	if (!(s = ata_timing_find_mode(speed)))
2926 		return -EINVAL;
2927 
2928 	memcpy(t, s, sizeof(*s));
2929 
2930 	/*
2931 	 * If the drive is an EIDE drive, it can tell us it needs extended
2932 	 * PIO/MW_DMA cycle timing.
2933 	 */
2934 
2935 	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2936 		memset(&p, 0, sizeof(p));
2937 		if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2938 			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2939 					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2940 		} else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2941 			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2942 		}
2943 		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2944 	}
2945 
2946 	/*
2947 	 * Convert the timing to bus clock counts.
2948 	 */
2949 
2950 	ata_timing_quantize(t, t, T, UT);
2951 
2952 	/*
2953 	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2954 	 * S.M.A.R.T * and some other commands. We have to ensure that the
2955 	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2956 	 */
2957 
2958 	if (speed > XFER_PIO_6) {
2959 		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2960 		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2961 	}
2962 
2963 	/*
2964 	 * Lengthen active & recovery time so that cycle time is correct.
2965 	 */
2966 
2967 	if (t->act8b + t->rec8b < t->cyc8b) {
2968 		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2969 		t->rec8b = t->cyc8b - t->act8b;
2970 	}
2971 
2972 	if (t->active + t->recover < t->cycle) {
2973 		t->active += (t->cycle - (t->active + t->recover)) / 2;
2974 		t->recover = t->cycle - t->active;
2975 	}
2976 
2977 	/* In a few cases quantisation may produce enough errors to
2978 	   leave t->cycle too low for the sum of active and recovery
2979 	   if so we must correct this */
2980 	if (t->active + t->recover > t->cycle)
2981 		t->cycle = t->active + t->recover;
2982 
2983 	return 0;
2984 }
2985 
2986 /**
2987  *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2988  *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2989  *	@cycle: cycle duration in ns
2990  *
2991  *	Return matching xfer mode for @cycle.  The returned mode is of
2992  *	the transfer type specified by @xfer_shift.  If @cycle is too
2993  *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
2994  *	than the fastest known mode, the fasted mode is returned.
2995  *
2996  *	LOCKING:
2997  *	None.
2998  *
2999  *	RETURNS:
3000  *	Matching xfer_mode, 0xff if no match found.
3001  */
3002 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3003 {
3004 	u8 base_mode = 0xff, last_mode = 0xff;
3005 	const struct ata_xfer_ent *ent;
3006 	const struct ata_timing *t;
3007 
3008 	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3009 		if (ent->shift == xfer_shift)
3010 			base_mode = ent->base;
3011 
3012 	for (t = ata_timing_find_mode(base_mode);
3013 	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3014 		unsigned short this_cycle;
3015 
3016 		switch (xfer_shift) {
3017 		case ATA_SHIFT_PIO:
3018 		case ATA_SHIFT_MWDMA:
3019 			this_cycle = t->cycle;
3020 			break;
3021 		case ATA_SHIFT_UDMA:
3022 			this_cycle = t->udma;
3023 			break;
3024 		default:
3025 			return 0xff;
3026 		}
3027 
3028 		if (cycle > this_cycle)
3029 			break;
3030 
3031 		last_mode = t->mode;
3032 	}
3033 
3034 	return last_mode;
3035 }
3036 
3037 /**
3038  *	ata_down_xfermask_limit - adjust dev xfer masks downward
3039  *	@dev: Device to adjust xfer masks
3040  *	@sel: ATA_DNXFER_* selector
3041  *
3042  *	Adjust xfer masks of @dev downward.  Note that this function
3043  *	does not apply the change.  Invoking ata_set_mode() afterwards
3044  *	will apply the limit.
3045  *
3046  *	LOCKING:
3047  *	Inherited from caller.
3048  *
3049  *	RETURNS:
3050  *	0 on success, negative errno on failure
3051  */
3052 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3053 {
3054 	char buf[32];
3055 	unsigned long orig_mask, xfer_mask;
3056 	unsigned long pio_mask, mwdma_mask, udma_mask;
3057 	int quiet, highbit;
3058 
3059 	quiet = !!(sel & ATA_DNXFER_QUIET);
3060 	sel &= ~ATA_DNXFER_QUIET;
3061 
3062 	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3063 						  dev->mwdma_mask,
3064 						  dev->udma_mask);
3065 	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3066 
3067 	switch (sel) {
3068 	case ATA_DNXFER_PIO:
3069 		highbit = fls(pio_mask) - 1;
3070 		pio_mask &= ~(1 << highbit);
3071 		break;
3072 
3073 	case ATA_DNXFER_DMA:
3074 		if (udma_mask) {
3075 			highbit = fls(udma_mask) - 1;
3076 			udma_mask &= ~(1 << highbit);
3077 			if (!udma_mask)
3078 				return -ENOENT;
3079 		} else if (mwdma_mask) {
3080 			highbit = fls(mwdma_mask) - 1;
3081 			mwdma_mask &= ~(1 << highbit);
3082 			if (!mwdma_mask)
3083 				return -ENOENT;
3084 		}
3085 		break;
3086 
3087 	case ATA_DNXFER_40C:
3088 		udma_mask &= ATA_UDMA_MASK_40C;
3089 		break;
3090 
3091 	case ATA_DNXFER_FORCE_PIO0:
3092 		pio_mask &= 1;
3093 	case ATA_DNXFER_FORCE_PIO:
3094 		mwdma_mask = 0;
3095 		udma_mask = 0;
3096 		break;
3097 
3098 	default:
3099 		BUG();
3100 	}
3101 
3102 	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3103 
3104 	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3105 		return -ENOENT;
3106 
3107 	if (!quiet) {
3108 		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3109 			snprintf(buf, sizeof(buf), "%s:%s",
3110 				 ata_mode_string(xfer_mask),
3111 				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3112 		else
3113 			snprintf(buf, sizeof(buf), "%s",
3114 				 ata_mode_string(xfer_mask));
3115 
3116 		ata_dev_printk(dev, KERN_WARNING,
3117 			       "limiting speed to %s\n", buf);
3118 	}
3119 
3120 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3121 			    &dev->udma_mask);
3122 
3123 	return 0;
3124 }
3125 
3126 static int ata_dev_set_mode(struct ata_device *dev)
3127 {
3128 	struct ata_eh_context *ehc = &dev->link->eh_context;
3129 	const char *dev_err_whine = "";
3130 	int ign_dev_err = 0;
3131 	unsigned int err_mask;
3132 	int rc;
3133 
3134 	dev->flags &= ~ATA_DFLAG_PIO;
3135 	if (dev->xfer_shift == ATA_SHIFT_PIO)
3136 		dev->flags |= ATA_DFLAG_PIO;
3137 
3138 	err_mask = ata_dev_set_xfermode(dev);
3139 
3140 	if (err_mask & ~AC_ERR_DEV)
3141 		goto fail;
3142 
3143 	/* revalidate */
3144 	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3145 	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3146 	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3147 	if (rc)
3148 		return rc;
3149 
3150 	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3151 		/* Old CFA may refuse this command, which is just fine */
3152 		if (ata_id_is_cfa(dev->id))
3153 			ign_dev_err = 1;
3154 		/* Catch several broken garbage emulations plus some pre
3155 		   ATA devices */
3156 		if (ata_id_major_version(dev->id) == 0 &&
3157 					dev->pio_mode <= XFER_PIO_2)
3158 			ign_dev_err = 1;
3159 		/* Some very old devices and some bad newer ones fail
3160 		   any kind of SET_XFERMODE request but support PIO0-2
3161 		   timings and no IORDY */
3162 		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3163 			ign_dev_err = 1;
3164 	}
3165 	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3166 	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3167 	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3168 	    dev->dma_mode == XFER_MW_DMA_0 &&
3169 	    (dev->id[63] >> 8) & 1)
3170 		ign_dev_err = 1;
3171 
3172 	/* if the device is actually configured correctly, ignore dev err */
3173 	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3174 		ign_dev_err = 1;
3175 
3176 	if (err_mask & AC_ERR_DEV) {
3177 		if (!ign_dev_err)
3178 			goto fail;
3179 		else
3180 			dev_err_whine = " (device error ignored)";
3181 	}
3182 
3183 	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3184 		dev->xfer_shift, (int)dev->xfer_mode);
3185 
3186 	ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3187 		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3188 		       dev_err_whine);
3189 
3190 	return 0;
3191 
3192  fail:
3193 	ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3194 		       "(err_mask=0x%x)\n", err_mask);
3195 	return -EIO;
3196 }
3197 
3198 /**
3199  *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3200  *	@link: link on which timings will be programmed
3201  *	@r_failed_dev: out parameter for failed device
3202  *
3203  *	Standard implementation of the function used to tune and set
3204  *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3205  *	ata_dev_set_mode() fails, pointer to the failing device is
3206  *	returned in @r_failed_dev.
3207  *
3208  *	LOCKING:
3209  *	PCI/etc. bus probe sem.
3210  *
3211  *	RETURNS:
3212  *	0 on success, negative errno otherwise
3213  */
3214 
3215 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3216 {
3217 	struct ata_port *ap = link->ap;
3218 	struct ata_device *dev;
3219 	int rc = 0, used_dma = 0, found = 0;
3220 
3221 	/* step 1: calculate xfer_mask */
3222 	ata_link_for_each_dev(dev, link) {
3223 		unsigned long pio_mask, dma_mask;
3224 		unsigned int mode_mask;
3225 
3226 		if (!ata_dev_enabled(dev))
3227 			continue;
3228 
3229 		mode_mask = ATA_DMA_MASK_ATA;
3230 		if (dev->class == ATA_DEV_ATAPI)
3231 			mode_mask = ATA_DMA_MASK_ATAPI;
3232 		else if (ata_id_is_cfa(dev->id))
3233 			mode_mask = ATA_DMA_MASK_CFA;
3234 
3235 		ata_dev_xfermask(dev);
3236 		ata_force_xfermask(dev);
3237 
3238 		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3239 		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3240 
3241 		if (libata_dma_mask & mode_mask)
3242 			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3243 		else
3244 			dma_mask = 0;
3245 
3246 		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3247 		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3248 
3249 		found = 1;
3250 		if (dev->dma_mode != 0xff)
3251 			used_dma = 1;
3252 	}
3253 	if (!found)
3254 		goto out;
3255 
3256 	/* step 2: always set host PIO timings */
3257 	ata_link_for_each_dev(dev, link) {
3258 		if (!ata_dev_enabled(dev))
3259 			continue;
3260 
3261 		if (dev->pio_mode == 0xff) {
3262 			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3263 			rc = -EINVAL;
3264 			goto out;
3265 		}
3266 
3267 		dev->xfer_mode = dev->pio_mode;
3268 		dev->xfer_shift = ATA_SHIFT_PIO;
3269 		if (ap->ops->set_piomode)
3270 			ap->ops->set_piomode(ap, dev);
3271 	}
3272 
3273 	/* step 3: set host DMA timings */
3274 	ata_link_for_each_dev(dev, link) {
3275 		if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3276 			continue;
3277 
3278 		dev->xfer_mode = dev->dma_mode;
3279 		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3280 		if (ap->ops->set_dmamode)
3281 			ap->ops->set_dmamode(ap, dev);
3282 	}
3283 
3284 	/* step 4: update devices' xfer mode */
3285 	ata_link_for_each_dev(dev, link) {
3286 		/* don't update suspended devices' xfer mode */
3287 		if (!ata_dev_enabled(dev))
3288 			continue;
3289 
3290 		rc = ata_dev_set_mode(dev);
3291 		if (rc)
3292 			goto out;
3293 	}
3294 
3295 	/* Record simplex status. If we selected DMA then the other
3296 	 * host channels are not permitted to do so.
3297 	 */
3298 	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3299 		ap->host->simplex_claimed = ap;
3300 
3301  out:
3302 	if (rc)
3303 		*r_failed_dev = dev;
3304 	return rc;
3305 }
3306 
3307 /**
3308  *	ata_wait_ready - wait for link to become ready
3309  *	@link: link to be waited on
3310  *	@deadline: deadline jiffies for the operation
3311  *	@check_ready: callback to check link readiness
3312  *
3313  *	Wait for @link to become ready.  @check_ready should return
3314  *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3315  *	link doesn't seem to be occupied, other errno for other error
3316  *	conditions.
3317  *
3318  *	Transient -ENODEV conditions are allowed for
3319  *	ATA_TMOUT_FF_WAIT.
3320  *
3321  *	LOCKING:
3322  *	EH context.
3323  *
3324  *	RETURNS:
3325  *	0 if @linke is ready before @deadline; otherwise, -errno.
3326  */
3327 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3328 		   int (*check_ready)(struct ata_link *link))
3329 {
3330 	unsigned long start = jiffies;
3331 	unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3332 	int warned = 0;
3333 
3334 	if (time_after(nodev_deadline, deadline))
3335 		nodev_deadline = deadline;
3336 
3337 	while (1) {
3338 		unsigned long now = jiffies;
3339 		int ready, tmp;
3340 
3341 		ready = tmp = check_ready(link);
3342 		if (ready > 0)
3343 			return 0;
3344 
3345 		/* -ENODEV could be transient.  Ignore -ENODEV if link
3346 		 * is online.  Also, some SATA devices take a long
3347 		 * time to clear 0xff after reset.  For example,
3348 		 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3349 		 * GoVault needs even more than that.  Wait for
3350 		 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3351 		 *
3352 		 * Note that some PATA controllers (pata_ali) explode
3353 		 * if status register is read more than once when
3354 		 * there's no device attached.
3355 		 */
3356 		if (ready == -ENODEV) {
3357 			if (ata_link_online(link))
3358 				ready = 0;
3359 			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3360 				 !ata_link_offline(link) &&
3361 				 time_before(now, nodev_deadline))
3362 				ready = 0;
3363 		}
3364 
3365 		if (ready)
3366 			return ready;
3367 		if (time_after(now, deadline))
3368 			return -EBUSY;
3369 
3370 		if (!warned && time_after(now, start + 5 * HZ) &&
3371 		    (deadline - now > 3 * HZ)) {
3372 			ata_link_printk(link, KERN_WARNING,
3373 				"link is slow to respond, please be patient "
3374 				"(ready=%d)\n", tmp);
3375 			warned = 1;
3376 		}
3377 
3378 		msleep(50);
3379 	}
3380 }
3381 
3382 /**
3383  *	ata_wait_after_reset - wait for link to become ready after reset
3384  *	@link: link to be waited on
3385  *	@deadline: deadline jiffies for the operation
3386  *	@check_ready: callback to check link readiness
3387  *
3388  *	Wait for @link to become ready after reset.
3389  *
3390  *	LOCKING:
3391  *	EH context.
3392  *
3393  *	RETURNS:
3394  *	0 if @linke is ready before @deadline; otherwise, -errno.
3395  */
3396 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3397 				int (*check_ready)(struct ata_link *link))
3398 {
3399 	msleep(ATA_WAIT_AFTER_RESET);
3400 
3401 	return ata_wait_ready(link, deadline, check_ready);
3402 }
3403 
3404 /**
3405  *	sata_link_debounce - debounce SATA phy status
3406  *	@link: ATA link to debounce SATA phy status for
3407  *	@params: timing parameters { interval, duratinon, timeout } in msec
3408  *	@deadline: deadline jiffies for the operation
3409  *
3410 *	Make sure SStatus of @link reaches stable state, determined by
3411  *	holding the same value where DET is not 1 for @duration polled
3412  *	every @interval, before @timeout.  Timeout constraints the
3413  *	beginning of the stable state.  Because DET gets stuck at 1 on
3414  *	some controllers after hot unplugging, this functions waits
3415  *	until timeout then returns 0 if DET is stable at 1.
3416  *
3417  *	@timeout is further limited by @deadline.  The sooner of the
3418  *	two is used.
3419  *
3420  *	LOCKING:
3421  *	Kernel thread context (may sleep)
3422  *
3423  *	RETURNS:
3424  *	0 on success, -errno on failure.
3425  */
3426 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3427 		       unsigned long deadline)
3428 {
3429 	unsigned long interval = params[0];
3430 	unsigned long duration = params[1];
3431 	unsigned long last_jiffies, t;
3432 	u32 last, cur;
3433 	int rc;
3434 
3435 	t = ata_deadline(jiffies, params[2]);
3436 	if (time_before(t, deadline))
3437 		deadline = t;
3438 
3439 	if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3440 		return rc;
3441 	cur &= 0xf;
3442 
3443 	last = cur;
3444 	last_jiffies = jiffies;
3445 
3446 	while (1) {
3447 		msleep(interval);
3448 		if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3449 			return rc;
3450 		cur &= 0xf;
3451 
3452 		/* DET stable? */
3453 		if (cur == last) {
3454 			if (cur == 1 && time_before(jiffies, deadline))
3455 				continue;
3456 			if (time_after(jiffies,
3457 				       ata_deadline(last_jiffies, duration)))
3458 				return 0;
3459 			continue;
3460 		}
3461 
3462 		/* unstable, start over */
3463 		last = cur;
3464 		last_jiffies = jiffies;
3465 
3466 		/* Check deadline.  If debouncing failed, return
3467 		 * -EPIPE to tell upper layer to lower link speed.
3468 		 */
3469 		if (time_after(jiffies, deadline))
3470 			return -EPIPE;
3471 	}
3472 }
3473 
3474 /**
3475  *	sata_link_resume - resume SATA link
3476  *	@link: ATA link to resume SATA
3477  *	@params: timing parameters { interval, duratinon, timeout } in msec
3478  *	@deadline: deadline jiffies for the operation
3479  *
3480  *	Resume SATA phy @link and debounce it.
3481  *
3482  *	LOCKING:
3483  *	Kernel thread context (may sleep)
3484  *
3485  *	RETURNS:
3486  *	0 on success, -errno on failure.
3487  */
3488 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3489 		     unsigned long deadline)
3490 {
3491 	u32 scontrol, serror;
3492 	int rc;
3493 
3494 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3495 		return rc;
3496 
3497 	scontrol = (scontrol & 0x0f0) | 0x300;
3498 
3499 	if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3500 		return rc;
3501 
3502 	/* Some PHYs react badly if SStatus is pounded immediately
3503 	 * after resuming.  Delay 200ms before debouncing.
3504 	 */
3505 	msleep(200);
3506 
3507 	if ((rc = sata_link_debounce(link, params, deadline)))
3508 		return rc;
3509 
3510 	/* clear SError, some PHYs require this even for SRST to work */
3511 	if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3512 		rc = sata_scr_write(link, SCR_ERROR, serror);
3513 
3514 	return rc != -EINVAL ? rc : 0;
3515 }
3516 
3517 /**
3518  *	ata_std_prereset - prepare for reset
3519  *	@link: ATA link to be reset
3520  *	@deadline: deadline jiffies for the operation
3521  *
3522  *	@link is about to be reset.  Initialize it.  Failure from
3523  *	prereset makes libata abort whole reset sequence and give up
3524  *	that port, so prereset should be best-effort.  It does its
3525  *	best to prepare for reset sequence but if things go wrong, it
3526  *	should just whine, not fail.
3527  *
3528  *	LOCKING:
3529  *	Kernel thread context (may sleep)
3530  *
3531  *	RETURNS:
3532  *	0 on success, -errno otherwise.
3533  */
3534 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3535 {
3536 	struct ata_port *ap = link->ap;
3537 	struct ata_eh_context *ehc = &link->eh_context;
3538 	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3539 	int rc;
3540 
3541 	/* if we're about to do hardreset, nothing more to do */
3542 	if (ehc->i.action & ATA_EH_HARDRESET)
3543 		return 0;
3544 
3545 	/* if SATA, resume link */
3546 	if (ap->flags & ATA_FLAG_SATA) {
3547 		rc = sata_link_resume(link, timing, deadline);
3548 		/* whine about phy resume failure but proceed */
3549 		if (rc && rc != -EOPNOTSUPP)
3550 			ata_link_printk(link, KERN_WARNING, "failed to resume "
3551 					"link for reset (errno=%d)\n", rc);
3552 	}
3553 
3554 	/* no point in trying softreset on offline link */
3555 	if (ata_link_offline(link))
3556 		ehc->i.action &= ~ATA_EH_SOFTRESET;
3557 
3558 	return 0;
3559 }
3560 
3561 /**
3562  *	sata_link_hardreset - reset link via SATA phy reset
3563  *	@link: link to reset
3564  *	@timing: timing parameters { interval, duratinon, timeout } in msec
3565  *	@deadline: deadline jiffies for the operation
3566  *	@online: optional out parameter indicating link onlineness
3567  *	@check_ready: optional callback to check link readiness
3568  *
3569  *	SATA phy-reset @link using DET bits of SControl register.
3570  *	After hardreset, link readiness is waited upon using
3571  *	ata_wait_ready() if @check_ready is specified.  LLDs are
3572  *	allowed to not specify @check_ready and wait itself after this
3573  *	function returns.  Device classification is LLD's
3574  *	responsibility.
3575  *
3576  *	*@online is set to one iff reset succeeded and @link is online
3577  *	after reset.
3578  *
3579  *	LOCKING:
3580  *	Kernel thread context (may sleep)
3581  *
3582  *	RETURNS:
3583  *	0 on success, -errno otherwise.
3584  */
3585 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3586 			unsigned long deadline,
3587 			bool *online, int (*check_ready)(struct ata_link *))
3588 {
3589 	u32 scontrol;
3590 	int rc;
3591 
3592 	DPRINTK("ENTER\n");
3593 
3594 	if (online)
3595 		*online = false;
3596 
3597 	if (sata_set_spd_needed(link)) {
3598 		/* SATA spec says nothing about how to reconfigure
3599 		 * spd.  To be on the safe side, turn off phy during
3600 		 * reconfiguration.  This works for at least ICH7 AHCI
3601 		 * and Sil3124.
3602 		 */
3603 		if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3604 			goto out;
3605 
3606 		scontrol = (scontrol & 0x0f0) | 0x304;
3607 
3608 		if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3609 			goto out;
3610 
3611 		sata_set_spd(link);
3612 	}
3613 
3614 	/* issue phy wake/reset */
3615 	if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3616 		goto out;
3617 
3618 	scontrol = (scontrol & 0x0f0) | 0x301;
3619 
3620 	if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3621 		goto out;
3622 
3623 	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3624 	 * 10.4.2 says at least 1 ms.
3625 	 */
3626 	msleep(1);
3627 
3628 	/* bring link back */
3629 	rc = sata_link_resume(link, timing, deadline);
3630 	if (rc)
3631 		goto out;
3632 	/* if link is offline nothing more to do */
3633 	if (ata_link_offline(link))
3634 		goto out;
3635 
3636 	/* Link is online.  From this point, -ENODEV too is an error. */
3637 	if (online)
3638 		*online = true;
3639 
3640 	if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3641 		/* If PMP is supported, we have to do follow-up SRST.
3642 		 * Some PMPs don't send D2H Reg FIS after hardreset if
3643 		 * the first port is empty.  Wait only for
3644 		 * ATA_TMOUT_PMP_SRST_WAIT.
3645 		 */
3646 		if (check_ready) {
3647 			unsigned long pmp_deadline;
3648 
3649 			pmp_deadline = ata_deadline(jiffies,
3650 						    ATA_TMOUT_PMP_SRST_WAIT);
3651 			if (time_after(pmp_deadline, deadline))
3652 				pmp_deadline = deadline;
3653 			ata_wait_ready(link, pmp_deadline, check_ready);
3654 		}
3655 		rc = -EAGAIN;
3656 		goto out;
3657 	}
3658 
3659 	rc = 0;
3660 	if (check_ready)
3661 		rc = ata_wait_ready(link, deadline, check_ready);
3662  out:
3663 	if (rc && rc != -EAGAIN) {
3664 		/* online is set iff link is online && reset succeeded */
3665 		if (online)
3666 			*online = false;
3667 		ata_link_printk(link, KERN_ERR,
3668 				"COMRESET failed (errno=%d)\n", rc);
3669 	}
3670 	DPRINTK("EXIT, rc=%d\n", rc);
3671 	return rc;
3672 }
3673 
3674 /**
3675  *	sata_std_hardreset - COMRESET w/o waiting or classification
3676  *	@link: link to reset
3677  *	@class: resulting class of attached device
3678  *	@deadline: deadline jiffies for the operation
3679  *
3680  *	Standard SATA COMRESET w/o waiting or classification.
3681  *
3682  *	LOCKING:
3683  *	Kernel thread context (may sleep)
3684  *
3685  *	RETURNS:
3686  *	0 if link offline, -EAGAIN if link online, -errno on errors.
3687  */
3688 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3689 		       unsigned long deadline)
3690 {
3691 	const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3692 	bool online;
3693 	int rc;
3694 
3695 	/* do hardreset */
3696 	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3697 	return online ? -EAGAIN : rc;
3698 }
3699 
3700 /**
3701  *	ata_std_postreset - standard postreset callback
3702  *	@link: the target ata_link
3703  *	@classes: classes of attached devices
3704  *
3705  *	This function is invoked after a successful reset.  Note that
3706  *	the device might have been reset more than once using
3707  *	different reset methods before postreset is invoked.
3708  *
3709  *	LOCKING:
3710  *	Kernel thread context (may sleep)
3711  */
3712 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3713 {
3714 	u32 serror;
3715 
3716 	DPRINTK("ENTER\n");
3717 
3718 	/* reset complete, clear SError */
3719 	if (!sata_scr_read(link, SCR_ERROR, &serror))
3720 		sata_scr_write(link, SCR_ERROR, serror);
3721 
3722 	/* print link status */
3723 	sata_print_link_status(link);
3724 
3725 	DPRINTK("EXIT\n");
3726 }
3727 
3728 /**
3729  *	ata_dev_same_device - Determine whether new ID matches configured device
3730  *	@dev: device to compare against
3731  *	@new_class: class of the new device
3732  *	@new_id: IDENTIFY page of the new device
3733  *
3734  *	Compare @new_class and @new_id against @dev and determine
3735  *	whether @dev is the device indicated by @new_class and
3736  *	@new_id.
3737  *
3738  *	LOCKING:
3739  *	None.
3740  *
3741  *	RETURNS:
3742  *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3743  */
3744 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3745 			       const u16 *new_id)
3746 {
3747 	const u16 *old_id = dev->id;
3748 	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3749 	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3750 
3751 	if (dev->class != new_class) {
3752 		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3753 			       dev->class, new_class);
3754 		return 0;
3755 	}
3756 
3757 	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3758 	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3759 	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3760 	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3761 
3762 	if (strcmp(model[0], model[1])) {
3763 		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3764 			       "'%s' != '%s'\n", model[0], model[1]);
3765 		return 0;
3766 	}
3767 
3768 	if (strcmp(serial[0], serial[1])) {
3769 		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3770 			       "'%s' != '%s'\n", serial[0], serial[1]);
3771 		return 0;
3772 	}
3773 
3774 	return 1;
3775 }
3776 
3777 /**
3778  *	ata_dev_reread_id - Re-read IDENTIFY data
3779  *	@dev: target ATA device
3780  *	@readid_flags: read ID flags
3781  *
3782  *	Re-read IDENTIFY page and make sure @dev is still attached to
3783  *	the port.
3784  *
3785  *	LOCKING:
3786  *	Kernel thread context (may sleep)
3787  *
3788  *	RETURNS:
3789  *	0 on success, negative errno otherwise
3790  */
3791 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3792 {
3793 	unsigned int class = dev->class;
3794 	u16 *id = (void *)dev->link->ap->sector_buf;
3795 	int rc;
3796 
3797 	/* read ID data */
3798 	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3799 	if (rc)
3800 		return rc;
3801 
3802 	/* is the device still there? */
3803 	if (!ata_dev_same_device(dev, class, id))
3804 		return -ENODEV;
3805 
3806 	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3807 	return 0;
3808 }
3809 
3810 /**
3811  *	ata_dev_revalidate - Revalidate ATA device
3812  *	@dev: device to revalidate
3813  *	@new_class: new class code
3814  *	@readid_flags: read ID flags
3815  *
3816  *	Re-read IDENTIFY page, make sure @dev is still attached to the
3817  *	port and reconfigure it according to the new IDENTIFY page.
3818  *
3819  *	LOCKING:
3820  *	Kernel thread context (may sleep)
3821  *
3822  *	RETURNS:
3823  *	0 on success, negative errno otherwise
3824  */
3825 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3826 		       unsigned int readid_flags)
3827 {
3828 	u64 n_sectors = dev->n_sectors;
3829 	int rc;
3830 
3831 	if (!ata_dev_enabled(dev))
3832 		return -ENODEV;
3833 
3834 	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3835 	if (ata_class_enabled(new_class) &&
3836 	    new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3837 		ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3838 			       dev->class, new_class);
3839 		rc = -ENODEV;
3840 		goto fail;
3841 	}
3842 
3843 	/* re-read ID */
3844 	rc = ata_dev_reread_id(dev, readid_flags);
3845 	if (rc)
3846 		goto fail;
3847 
3848 	/* configure device according to the new ID */
3849 	rc = ata_dev_configure(dev);
3850 	if (rc)
3851 		goto fail;
3852 
3853 	/* verify n_sectors hasn't changed */
3854 	if (dev->class == ATA_DEV_ATA && n_sectors &&
3855 	    dev->n_sectors != n_sectors) {
3856 		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3857 			       "%llu != %llu\n",
3858 			       (unsigned long long)n_sectors,
3859 			       (unsigned long long)dev->n_sectors);
3860 
3861 		/* restore original n_sectors */
3862 		dev->n_sectors = n_sectors;
3863 
3864 		rc = -ENODEV;
3865 		goto fail;
3866 	}
3867 
3868 	return 0;
3869 
3870  fail:
3871 	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3872 	return rc;
3873 }
3874 
3875 struct ata_blacklist_entry {
3876 	const char *model_num;
3877 	const char *model_rev;
3878 	unsigned long horkage;
3879 };
3880 
3881 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3882 	/* Devices with DMA related problems under Linux */
3883 	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3884 	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3885 	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3886 	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3887 	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3888 	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3889 	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3890 	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3891 	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3892 	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3893 	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3894 	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3895 	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3896 	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3897 	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3898 	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3899 	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3900 	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3901 	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3902 	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3903 	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3904 	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3905 	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3906 	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3907 	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3908 	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3909 	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3910 	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3911 	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
3912 	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3913 	/* Odd clown on sil3726/4726 PMPs */
3914 	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
3915 
3916 	/* Weird ATAPI devices */
3917 	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3918 
3919 	/* Devices we expect to fail diagnostics */
3920 
3921 	/* Devices where NCQ should be avoided */
3922 	/* NCQ is slow */
3923 	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
3924 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3925 	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3926 	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3927 	/* NCQ is broken */
3928 	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
3929 	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
3930 	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
3931 	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
3932 
3933 	/* Blacklist entries taken from Silicon Image 3124/3132
3934 	   Windows driver .inf file - also several Linux problem reports */
3935 	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3936 	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3937 	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3938 
3939 	/* devices which puke on READ_NATIVE_MAX */
3940 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
3941 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3942 	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3943 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
3944 
3945 	/* Devices which report 1 sector over size HPA */
3946 	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3947 	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3948 	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE, },
3949 
3950 	/* Devices which get the IVB wrong */
3951 	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3952 	/* Maybe we should just blacklist TSSTcorp... */
3953 	{ "TSSTcorp CDDVDW SH-S202H", "SB00",	  ATA_HORKAGE_IVB, },
3954 	{ "TSSTcorp CDDVDW SH-S202H", "SB01",	  ATA_HORKAGE_IVB, },
3955 	{ "TSSTcorp CDDVDW SH-S202J", "SB00",	  ATA_HORKAGE_IVB, },
3956 	{ "TSSTcorp CDDVDW SH-S202J", "SB01",	  ATA_HORKAGE_IVB, },
3957 	{ "TSSTcorp CDDVDW SH-S202N", "SB00",	  ATA_HORKAGE_IVB, },
3958 	{ "TSSTcorp CDDVDW SH-S202N", "SB01",	  ATA_HORKAGE_IVB, },
3959 
3960 	/* End Marker */
3961 	{ }
3962 };
3963 
3964 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3965 {
3966 	const char *p;
3967 	int len;
3968 
3969 	/*
3970 	 * check for trailing wildcard: *\0
3971 	 */
3972 	p = strchr(patt, wildchar);
3973 	if (p && ((*(p + 1)) == 0))
3974 		len = p - patt;
3975 	else {
3976 		len = strlen(name);
3977 		if (!len) {
3978 			if (!*patt)
3979 				return 0;
3980 			return -1;
3981 		}
3982 	}
3983 
3984 	return strncmp(patt, name, len);
3985 }
3986 
3987 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3988 {
3989 	unsigned char model_num[ATA_ID_PROD_LEN + 1];
3990 	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3991 	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3992 
3993 	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3994 	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3995 
3996 	while (ad->model_num) {
3997 		if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3998 			if (ad->model_rev == NULL)
3999 				return ad->horkage;
4000 			if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4001 				return ad->horkage;
4002 		}
4003 		ad++;
4004 	}
4005 	return 0;
4006 }
4007 
4008 static int ata_dma_blacklisted(const struct ata_device *dev)
4009 {
4010 	/* We don't support polling DMA.
4011 	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4012 	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4013 	 */
4014 	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4015 	    (dev->flags & ATA_DFLAG_CDB_INTR))
4016 		return 1;
4017 	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4018 }
4019 
4020 /**
4021  *	ata_is_40wire		-	check drive side detection
4022  *	@dev: device
4023  *
4024  *	Perform drive side detection decoding, allowing for device vendors
4025  *	who can't follow the documentation.
4026  */
4027 
4028 static int ata_is_40wire(struct ata_device *dev)
4029 {
4030 	if (dev->horkage & ATA_HORKAGE_IVB)
4031 		return ata_drive_40wire_relaxed(dev->id);
4032 	return ata_drive_40wire(dev->id);
4033 }
4034 
4035 /**
4036  *	cable_is_40wire		-	40/80/SATA decider
4037  *	@ap: port to consider
4038  *
4039  *	This function encapsulates the policy for speed management
4040  *	in one place. At the moment we don't cache the result but
4041  *	there is a good case for setting ap->cbl to the result when
4042  *	we are called with unknown cables (and figuring out if it
4043  *	impacts hotplug at all).
4044  *
4045  *	Return 1 if the cable appears to be 40 wire.
4046  */
4047 
4048 static int cable_is_40wire(struct ata_port *ap)
4049 {
4050 	struct ata_link *link;
4051 	struct ata_device *dev;
4052 
4053 	/* If the controller thinks we are 40 wire, we are */
4054 	if (ap->cbl == ATA_CBL_PATA40)
4055 		return 1;
4056 	/* If the controller thinks we are 80 wire, we are */
4057 	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4058 		return 0;
4059 	/* If the system is known to be 40 wire short cable (eg laptop),
4060 	   then we allow 80 wire modes even if the drive isn't sure */
4061 	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4062 		return 0;
4063 	/* If the controller doesn't know we scan
4064 
4065 	   - Note: We look for all 40 wire detects at this point.
4066 	     Any 80 wire detect is taken to be 80 wire cable
4067 	     because
4068 	     - In many setups only the one drive (slave if present)
4069                will give a valid detect
4070              - If you have a non detect capable drive you don't
4071                want it to colour the choice
4072         */
4073 	ata_port_for_each_link(link, ap) {
4074 		ata_link_for_each_dev(dev, link) {
4075 			if (!ata_is_40wire(dev))
4076 				return 0;
4077 		}
4078 	}
4079 	return 1;
4080 }
4081 
4082 /**
4083  *	ata_dev_xfermask - Compute supported xfermask of the given device
4084  *	@dev: Device to compute xfermask for
4085  *
4086  *	Compute supported xfermask of @dev and store it in
4087  *	dev->*_mask.  This function is responsible for applying all
4088  *	known limits including host controller limits, device
4089  *	blacklist, etc...
4090  *
4091  *	LOCKING:
4092  *	None.
4093  */
4094 static void ata_dev_xfermask(struct ata_device *dev)
4095 {
4096 	struct ata_link *link = dev->link;
4097 	struct ata_port *ap = link->ap;
4098 	struct ata_host *host = ap->host;
4099 	unsigned long xfer_mask;
4100 
4101 	/* controller modes available */
4102 	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4103 				      ap->mwdma_mask, ap->udma_mask);
4104 
4105 	/* drive modes available */
4106 	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4107 				       dev->mwdma_mask, dev->udma_mask);
4108 	xfer_mask &= ata_id_xfermask(dev->id);
4109 
4110 	/*
4111 	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4112 	 *	cable
4113 	 */
4114 	if (ata_dev_pair(dev)) {
4115 		/* No PIO5 or PIO6 */
4116 		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4117 		/* No MWDMA3 or MWDMA 4 */
4118 		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4119 	}
4120 
4121 	if (ata_dma_blacklisted(dev)) {
4122 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4123 		ata_dev_printk(dev, KERN_WARNING,
4124 			       "device is on DMA blacklist, disabling DMA\n");
4125 	}
4126 
4127 	if ((host->flags & ATA_HOST_SIMPLEX) &&
4128 	    host->simplex_claimed && host->simplex_claimed != ap) {
4129 		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4130 		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4131 			       "other device, disabling DMA\n");
4132 	}
4133 
4134 	if (ap->flags & ATA_FLAG_NO_IORDY)
4135 		xfer_mask &= ata_pio_mask_no_iordy(dev);
4136 
4137 	if (ap->ops->mode_filter)
4138 		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4139 
4140 	/* Apply cable rule here.  Don't apply it early because when
4141 	 * we handle hot plug the cable type can itself change.
4142 	 * Check this last so that we know if the transfer rate was
4143 	 * solely limited by the cable.
4144 	 * Unknown or 80 wire cables reported host side are checked
4145 	 * drive side as well. Cases where we know a 40wire cable
4146 	 * is used safely for 80 are not checked here.
4147 	 */
4148 	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4149 		/* UDMA/44 or higher would be available */
4150 		if (cable_is_40wire(ap)) {
4151 			ata_dev_printk(dev, KERN_WARNING,
4152 				 "limited to UDMA/33 due to 40-wire cable\n");
4153 			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4154 		}
4155 
4156 	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4157 			    &dev->mwdma_mask, &dev->udma_mask);
4158 }
4159 
4160 /**
4161  *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4162  *	@dev: Device to which command will be sent
4163  *
4164  *	Issue SET FEATURES - XFER MODE command to device @dev
4165  *	on port @ap.
4166  *
4167  *	LOCKING:
4168  *	PCI/etc. bus probe sem.
4169  *
4170  *	RETURNS:
4171  *	0 on success, AC_ERR_* mask otherwise.
4172  */
4173 
4174 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4175 {
4176 	struct ata_taskfile tf;
4177 	unsigned int err_mask;
4178 
4179 	/* set up set-features taskfile */
4180 	DPRINTK("set features - xfer mode\n");
4181 
4182 	/* Some controllers and ATAPI devices show flaky interrupt
4183 	 * behavior after setting xfer mode.  Use polling instead.
4184 	 */
4185 	ata_tf_init(dev, &tf);
4186 	tf.command = ATA_CMD_SET_FEATURES;
4187 	tf.feature = SETFEATURES_XFER;
4188 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4189 	tf.protocol = ATA_PROT_NODATA;
4190 	/* If we are using IORDY we must send the mode setting command */
4191 	if (ata_pio_need_iordy(dev))
4192 		tf.nsect = dev->xfer_mode;
4193 	/* If the device has IORDY and the controller does not - turn it off */
4194  	else if (ata_id_has_iordy(dev->id))
4195 		tf.nsect = 0x01;
4196 	else /* In the ancient relic department - skip all of this */
4197 		return 0;
4198 
4199 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4200 
4201 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4202 	return err_mask;
4203 }
4204 /**
4205  *	ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4206  *	@dev: Device to which command will be sent
4207  *	@enable: Whether to enable or disable the feature
4208  *	@feature: The sector count represents the feature to set
4209  *
4210  *	Issue SET FEATURES - SATA FEATURES command to device @dev
4211  *	on port @ap with sector count
4212  *
4213  *	LOCKING:
4214  *	PCI/etc. bus probe sem.
4215  *
4216  *	RETURNS:
4217  *	0 on success, AC_ERR_* mask otherwise.
4218  */
4219 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4220 					u8 feature)
4221 {
4222 	struct ata_taskfile tf;
4223 	unsigned int err_mask;
4224 
4225 	/* set up set-features taskfile */
4226 	DPRINTK("set features - SATA features\n");
4227 
4228 	ata_tf_init(dev, &tf);
4229 	tf.command = ATA_CMD_SET_FEATURES;
4230 	tf.feature = enable;
4231 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4232 	tf.protocol = ATA_PROT_NODATA;
4233 	tf.nsect = feature;
4234 
4235 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4236 
4237 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4238 	return err_mask;
4239 }
4240 
4241 /**
4242  *	ata_dev_init_params - Issue INIT DEV PARAMS command
4243  *	@dev: Device to which command will be sent
4244  *	@heads: Number of heads (taskfile parameter)
4245  *	@sectors: Number of sectors (taskfile parameter)
4246  *
4247  *	LOCKING:
4248  *	Kernel thread context (may sleep)
4249  *
4250  *	RETURNS:
4251  *	0 on success, AC_ERR_* mask otherwise.
4252  */
4253 static unsigned int ata_dev_init_params(struct ata_device *dev,
4254 					u16 heads, u16 sectors)
4255 {
4256 	struct ata_taskfile tf;
4257 	unsigned int err_mask;
4258 
4259 	/* Number of sectors per track 1-255. Number of heads 1-16 */
4260 	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4261 		return AC_ERR_INVALID;
4262 
4263 	/* set up init dev params taskfile */
4264 	DPRINTK("init dev params \n");
4265 
4266 	ata_tf_init(dev, &tf);
4267 	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4268 	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4269 	tf.protocol = ATA_PROT_NODATA;
4270 	tf.nsect = sectors;
4271 	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4272 
4273 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4274 	/* A clean abort indicates an original or just out of spec drive
4275 	   and we should continue as we issue the setup based on the
4276 	   drive reported working geometry */
4277 	if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4278 		err_mask = 0;
4279 
4280 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
4281 	return err_mask;
4282 }
4283 
4284 /**
4285  *	ata_sg_clean - Unmap DMA memory associated with command
4286  *	@qc: Command containing DMA memory to be released
4287  *
4288  *	Unmap all mapped DMA memory associated with this command.
4289  *
4290  *	LOCKING:
4291  *	spin_lock_irqsave(host lock)
4292  */
4293 void ata_sg_clean(struct ata_queued_cmd *qc)
4294 {
4295 	struct ata_port *ap = qc->ap;
4296 	struct scatterlist *sg = qc->sg;
4297 	int dir = qc->dma_dir;
4298 
4299 	WARN_ON(sg == NULL);
4300 
4301 	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4302 
4303 	if (qc->n_elem)
4304 		dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4305 
4306 	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4307 	qc->sg = NULL;
4308 }
4309 
4310 /**
4311  *	atapi_check_dma - Check whether ATAPI DMA can be supported
4312  *	@qc: Metadata associated with taskfile to check
4313  *
4314  *	Allow low-level driver to filter ATA PACKET commands, returning
4315  *	a status indicating whether or not it is OK to use DMA for the
4316  *	supplied PACKET command.
4317  *
4318  *	LOCKING:
4319  *	spin_lock_irqsave(host lock)
4320  *
4321  *	RETURNS: 0 when ATAPI DMA can be used
4322  *               nonzero otherwise
4323  */
4324 int atapi_check_dma(struct ata_queued_cmd *qc)
4325 {
4326 	struct ata_port *ap = qc->ap;
4327 
4328 	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4329 	 * few ATAPI devices choke on such DMA requests.
4330 	 */
4331 	if (unlikely(qc->nbytes & 15))
4332 		return 1;
4333 
4334 	if (ap->ops->check_atapi_dma)
4335 		return ap->ops->check_atapi_dma(qc);
4336 
4337 	return 0;
4338 }
4339 
4340 /**
4341  *	ata_std_qc_defer - Check whether a qc needs to be deferred
4342  *	@qc: ATA command in question
4343  *
4344  *	Non-NCQ commands cannot run with any other command, NCQ or
4345  *	not.  As upper layer only knows the queue depth, we are
4346  *	responsible for maintaining exclusion.  This function checks
4347  *	whether a new command @qc can be issued.
4348  *
4349  *	LOCKING:
4350  *	spin_lock_irqsave(host lock)
4351  *
4352  *	RETURNS:
4353  *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4354  */
4355 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4356 {
4357 	struct ata_link *link = qc->dev->link;
4358 
4359 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4360 		if (!ata_tag_valid(link->active_tag))
4361 			return 0;
4362 	} else {
4363 		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4364 			return 0;
4365 	}
4366 
4367 	return ATA_DEFER_LINK;
4368 }
4369 
4370 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4371 
4372 /**
4373  *	ata_sg_init - Associate command with scatter-gather table.
4374  *	@qc: Command to be associated
4375  *	@sg: Scatter-gather table.
4376  *	@n_elem: Number of elements in s/g table.
4377  *
4378  *	Initialize the data-related elements of queued_cmd @qc
4379  *	to point to a scatter-gather table @sg, containing @n_elem
4380  *	elements.
4381  *
4382  *	LOCKING:
4383  *	spin_lock_irqsave(host lock)
4384  */
4385 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4386 		 unsigned int n_elem)
4387 {
4388 	qc->sg = sg;
4389 	qc->n_elem = n_elem;
4390 	qc->cursg = qc->sg;
4391 }
4392 
4393 /**
4394  *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4395  *	@qc: Command with scatter-gather table to be mapped.
4396  *
4397  *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4398  *
4399  *	LOCKING:
4400  *	spin_lock_irqsave(host lock)
4401  *
4402  *	RETURNS:
4403  *	Zero on success, negative on error.
4404  *
4405  */
4406 static int ata_sg_setup(struct ata_queued_cmd *qc)
4407 {
4408 	struct ata_port *ap = qc->ap;
4409 	unsigned int n_elem;
4410 
4411 	VPRINTK("ENTER, ata%u\n", ap->print_id);
4412 
4413 	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4414 	if (n_elem < 1)
4415 		return -1;
4416 
4417 	DPRINTK("%d sg elements mapped\n", n_elem);
4418 
4419 	qc->n_elem = n_elem;
4420 	qc->flags |= ATA_QCFLAG_DMAMAP;
4421 
4422 	return 0;
4423 }
4424 
4425 /**
4426  *	swap_buf_le16 - swap halves of 16-bit words in place
4427  *	@buf:  Buffer to swap
4428  *	@buf_words:  Number of 16-bit words in buffer.
4429  *
4430  *	Swap halves of 16-bit words if needed to convert from
4431  *	little-endian byte order to native cpu byte order, or
4432  *	vice-versa.
4433  *
4434  *	LOCKING:
4435  *	Inherited from caller.
4436  */
4437 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4438 {
4439 #ifdef __BIG_ENDIAN
4440 	unsigned int i;
4441 
4442 	for (i = 0; i < buf_words; i++)
4443 		buf[i] = le16_to_cpu(buf[i]);
4444 #endif /* __BIG_ENDIAN */
4445 }
4446 
4447 /**
4448  *	ata_qc_new - Request an available ATA command, for queueing
4449  *	@ap: Port associated with device @dev
4450  *	@dev: Device from whom we request an available command structure
4451  *
4452  *	LOCKING:
4453  *	None.
4454  */
4455 
4456 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4457 {
4458 	struct ata_queued_cmd *qc = NULL;
4459 	unsigned int i;
4460 
4461 	/* no command while frozen */
4462 	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4463 		return NULL;
4464 
4465 	/* the last tag is reserved for internal command. */
4466 	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4467 		if (!test_and_set_bit(i, &ap->qc_allocated)) {
4468 			qc = __ata_qc_from_tag(ap, i);
4469 			break;
4470 		}
4471 
4472 	if (qc)
4473 		qc->tag = i;
4474 
4475 	return qc;
4476 }
4477 
4478 /**
4479  *	ata_qc_new_init - Request an available ATA command, and initialize it
4480  *	@dev: Device from whom we request an available command structure
4481  *
4482  *	LOCKING:
4483  *	None.
4484  */
4485 
4486 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4487 {
4488 	struct ata_port *ap = dev->link->ap;
4489 	struct ata_queued_cmd *qc;
4490 
4491 	qc = ata_qc_new(ap);
4492 	if (qc) {
4493 		qc->scsicmd = NULL;
4494 		qc->ap = ap;
4495 		qc->dev = dev;
4496 
4497 		ata_qc_reinit(qc);
4498 	}
4499 
4500 	return qc;
4501 }
4502 
4503 /**
4504  *	ata_qc_free - free unused ata_queued_cmd
4505  *	@qc: Command to complete
4506  *
4507  *	Designed to free unused ata_queued_cmd object
4508  *	in case something prevents using it.
4509  *
4510  *	LOCKING:
4511  *	spin_lock_irqsave(host lock)
4512  */
4513 void ata_qc_free(struct ata_queued_cmd *qc)
4514 {
4515 	struct ata_port *ap = qc->ap;
4516 	unsigned int tag;
4517 
4518 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4519 
4520 	qc->flags = 0;
4521 	tag = qc->tag;
4522 	if (likely(ata_tag_valid(tag))) {
4523 		qc->tag = ATA_TAG_POISON;
4524 		clear_bit(tag, &ap->qc_allocated);
4525 	}
4526 }
4527 
4528 void __ata_qc_complete(struct ata_queued_cmd *qc)
4529 {
4530 	struct ata_port *ap = qc->ap;
4531 	struct ata_link *link = qc->dev->link;
4532 
4533 	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
4534 	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4535 
4536 	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4537 		ata_sg_clean(qc);
4538 
4539 	/* command should be marked inactive atomically with qc completion */
4540 	if (qc->tf.protocol == ATA_PROT_NCQ) {
4541 		link->sactive &= ~(1 << qc->tag);
4542 		if (!link->sactive)
4543 			ap->nr_active_links--;
4544 	} else {
4545 		link->active_tag = ATA_TAG_POISON;
4546 		ap->nr_active_links--;
4547 	}
4548 
4549 	/* clear exclusive status */
4550 	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4551 		     ap->excl_link == link))
4552 		ap->excl_link = NULL;
4553 
4554 	/* atapi: mark qc as inactive to prevent the interrupt handler
4555 	 * from completing the command twice later, before the error handler
4556 	 * is called. (when rc != 0 and atapi request sense is needed)
4557 	 */
4558 	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4559 	ap->qc_active &= ~(1 << qc->tag);
4560 
4561 	/* call completion callback */
4562 	qc->complete_fn(qc);
4563 }
4564 
4565 static void fill_result_tf(struct ata_queued_cmd *qc)
4566 {
4567 	struct ata_port *ap = qc->ap;
4568 
4569 	qc->result_tf.flags = qc->tf.flags;
4570 	ap->ops->qc_fill_rtf(qc);
4571 }
4572 
4573 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4574 {
4575 	struct ata_device *dev = qc->dev;
4576 
4577 	if (ata_tag_internal(qc->tag))
4578 		return;
4579 
4580 	if (ata_is_nodata(qc->tf.protocol))
4581 		return;
4582 
4583 	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4584 		return;
4585 
4586 	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4587 }
4588 
4589 /**
4590  *	ata_qc_complete - Complete an active ATA command
4591  *	@qc: Command to complete
4592  *	@err_mask: ATA Status register contents
4593  *
4594  *	Indicate to the mid and upper layers that an ATA
4595  *	command has completed, with either an ok or not-ok status.
4596  *
4597  *	LOCKING:
4598  *	spin_lock_irqsave(host lock)
4599  */
4600 void ata_qc_complete(struct ata_queued_cmd *qc)
4601 {
4602 	struct ata_port *ap = qc->ap;
4603 
4604 	/* XXX: New EH and old EH use different mechanisms to
4605 	 * synchronize EH with regular execution path.
4606 	 *
4607 	 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4608 	 * Normal execution path is responsible for not accessing a
4609 	 * failed qc.  libata core enforces the rule by returning NULL
4610 	 * from ata_qc_from_tag() for failed qcs.
4611 	 *
4612 	 * Old EH depends on ata_qc_complete() nullifying completion
4613 	 * requests if ATA_QCFLAG_EH_SCHEDULED is set.  Old EH does
4614 	 * not synchronize with interrupt handler.  Only PIO task is
4615 	 * taken care of.
4616 	 */
4617 	if (ap->ops->error_handler) {
4618 		struct ata_device *dev = qc->dev;
4619 		struct ata_eh_info *ehi = &dev->link->eh_info;
4620 
4621 		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4622 
4623 		if (unlikely(qc->err_mask))
4624 			qc->flags |= ATA_QCFLAG_FAILED;
4625 
4626 		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4627 			if (!ata_tag_internal(qc->tag)) {
4628 				/* always fill result TF for failed qc */
4629 				fill_result_tf(qc);
4630 				ata_qc_schedule_eh(qc);
4631 				return;
4632 			}
4633 		}
4634 
4635 		/* read result TF if requested */
4636 		if (qc->flags & ATA_QCFLAG_RESULT_TF)
4637 			fill_result_tf(qc);
4638 
4639 		/* Some commands need post-processing after successful
4640 		 * completion.
4641 		 */
4642 		switch (qc->tf.command) {
4643 		case ATA_CMD_SET_FEATURES:
4644 			if (qc->tf.feature != SETFEATURES_WC_ON &&
4645 			    qc->tf.feature != SETFEATURES_WC_OFF)
4646 				break;
4647 			/* fall through */
4648 		case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4649 		case ATA_CMD_SET_MULTI: /* multi_count changed */
4650 			/* revalidate device */
4651 			ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4652 			ata_port_schedule_eh(ap);
4653 			break;
4654 
4655 		case ATA_CMD_SLEEP:
4656 			dev->flags |= ATA_DFLAG_SLEEPING;
4657 			break;
4658 		}
4659 
4660 		if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4661 			ata_verify_xfer(qc);
4662 
4663 		__ata_qc_complete(qc);
4664 	} else {
4665 		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4666 			return;
4667 
4668 		/* read result TF if failed or requested */
4669 		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4670 			fill_result_tf(qc);
4671 
4672 		__ata_qc_complete(qc);
4673 	}
4674 }
4675 
4676 /**
4677  *	ata_qc_complete_multiple - Complete multiple qcs successfully
4678  *	@ap: port in question
4679  *	@qc_active: new qc_active mask
4680  *
4681  *	Complete in-flight commands.  This functions is meant to be
4682  *	called from low-level driver's interrupt routine to complete
4683  *	requests normally.  ap->qc_active and @qc_active is compared
4684  *	and commands are completed accordingly.
4685  *
4686  *	LOCKING:
4687  *	spin_lock_irqsave(host lock)
4688  *
4689  *	RETURNS:
4690  *	Number of completed commands on success, -errno otherwise.
4691  */
4692 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
4693 {
4694 	int nr_done = 0;
4695 	u32 done_mask;
4696 	int i;
4697 
4698 	done_mask = ap->qc_active ^ qc_active;
4699 
4700 	if (unlikely(done_mask & qc_active)) {
4701 		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4702 				"(%08x->%08x)\n", ap->qc_active, qc_active);
4703 		return -EINVAL;
4704 	}
4705 
4706 	for (i = 0; i < ATA_MAX_QUEUE; i++) {
4707 		struct ata_queued_cmd *qc;
4708 
4709 		if (!(done_mask & (1 << i)))
4710 			continue;
4711 
4712 		if ((qc = ata_qc_from_tag(ap, i))) {
4713 			ata_qc_complete(qc);
4714 			nr_done++;
4715 		}
4716 	}
4717 
4718 	return nr_done;
4719 }
4720 
4721 /**
4722  *	ata_qc_issue - issue taskfile to device
4723  *	@qc: command to issue to device
4724  *
4725  *	Prepare an ATA command to submission to device.
4726  *	This includes mapping the data into a DMA-able
4727  *	area, filling in the S/G table, and finally
4728  *	writing the taskfile to hardware, starting the command.
4729  *
4730  *	LOCKING:
4731  *	spin_lock_irqsave(host lock)
4732  */
4733 void ata_qc_issue(struct ata_queued_cmd *qc)
4734 {
4735 	struct ata_port *ap = qc->ap;
4736 	struct ata_link *link = qc->dev->link;
4737 	u8 prot = qc->tf.protocol;
4738 
4739 	/* Make sure only one non-NCQ command is outstanding.  The
4740 	 * check is skipped for old EH because it reuses active qc to
4741 	 * request ATAPI sense.
4742 	 */
4743 	WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4744 
4745 	if (ata_is_ncq(prot)) {
4746 		WARN_ON(link->sactive & (1 << qc->tag));
4747 
4748 		if (!link->sactive)
4749 			ap->nr_active_links++;
4750 		link->sactive |= 1 << qc->tag;
4751 	} else {
4752 		WARN_ON(link->sactive);
4753 
4754 		ap->nr_active_links++;
4755 		link->active_tag = qc->tag;
4756 	}
4757 
4758 	qc->flags |= ATA_QCFLAG_ACTIVE;
4759 	ap->qc_active |= 1 << qc->tag;
4760 
4761 	/* We guarantee to LLDs that they will have at least one
4762 	 * non-zero sg if the command is a data command.
4763 	 */
4764 	BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
4765 
4766 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4767 				 (ap->flags & ATA_FLAG_PIO_DMA)))
4768 		if (ata_sg_setup(qc))
4769 			goto sg_err;
4770 
4771 	/* if device is sleeping, schedule reset and abort the link */
4772 	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4773 		link->eh_info.action |= ATA_EH_RESET;
4774 		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4775 		ata_link_abort(link);
4776 		return;
4777 	}
4778 
4779 	ap->ops->qc_prep(qc);
4780 
4781 	qc->err_mask |= ap->ops->qc_issue(qc);
4782 	if (unlikely(qc->err_mask))
4783 		goto err;
4784 	return;
4785 
4786 sg_err:
4787 	qc->err_mask |= AC_ERR_SYSTEM;
4788 err:
4789 	ata_qc_complete(qc);
4790 }
4791 
4792 /**
4793  *	sata_scr_valid - test whether SCRs are accessible
4794  *	@link: ATA link to test SCR accessibility for
4795  *
4796  *	Test whether SCRs are accessible for @link.
4797  *
4798  *	LOCKING:
4799  *	None.
4800  *
4801  *	RETURNS:
4802  *	1 if SCRs are accessible, 0 otherwise.
4803  */
4804 int sata_scr_valid(struct ata_link *link)
4805 {
4806 	struct ata_port *ap = link->ap;
4807 
4808 	return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
4809 }
4810 
4811 /**
4812  *	sata_scr_read - read SCR register of the specified port
4813  *	@link: ATA link to read SCR for
4814  *	@reg: SCR to read
4815  *	@val: Place to store read value
4816  *
4817  *	Read SCR register @reg of @link into *@val.  This function is
4818  *	guaranteed to succeed if @link is ap->link, the cable type of
4819  *	the port is SATA and the port implements ->scr_read.
4820  *
4821  *	LOCKING:
4822  *	None if @link is ap->link.  Kernel thread context otherwise.
4823  *
4824  *	RETURNS:
4825  *	0 on success, negative errno on failure.
4826  */
4827 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
4828 {
4829 	if (ata_is_host_link(link)) {
4830 		struct ata_port *ap = link->ap;
4831 
4832 		if (sata_scr_valid(link))
4833 			return ap->ops->scr_read(ap, reg, val);
4834 		return -EOPNOTSUPP;
4835 	}
4836 
4837 	return sata_pmp_scr_read(link, reg, val);
4838 }
4839 
4840 /**
4841  *	sata_scr_write - write SCR register of the specified port
4842  *	@link: ATA link to write SCR for
4843  *	@reg: SCR to write
4844  *	@val: value to write
4845  *
4846  *	Write @val to SCR register @reg of @link.  This function is
4847  *	guaranteed to succeed if @link is ap->link, the cable type of
4848  *	the port is SATA and the port implements ->scr_read.
4849  *
4850  *	LOCKING:
4851  *	None if @link is ap->link.  Kernel thread context otherwise.
4852  *
4853  *	RETURNS:
4854  *	0 on success, negative errno on failure.
4855  */
4856 int sata_scr_write(struct ata_link *link, int reg, u32 val)
4857 {
4858 	if (ata_is_host_link(link)) {
4859 		struct ata_port *ap = link->ap;
4860 
4861 		if (sata_scr_valid(link))
4862 			return ap->ops->scr_write(ap, reg, val);
4863 		return -EOPNOTSUPP;
4864 	}
4865 
4866 	return sata_pmp_scr_write(link, reg, val);
4867 }
4868 
4869 /**
4870  *	sata_scr_write_flush - write SCR register of the specified port and flush
4871  *	@link: ATA link to write SCR for
4872  *	@reg: SCR to write
4873  *	@val: value to write
4874  *
4875  *	This function is identical to sata_scr_write() except that this
4876  *	function performs flush after writing to the register.
4877  *
4878  *	LOCKING:
4879  *	None if @link is ap->link.  Kernel thread context otherwise.
4880  *
4881  *	RETURNS:
4882  *	0 on success, negative errno on failure.
4883  */
4884 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
4885 {
4886 	if (ata_is_host_link(link)) {
4887 		struct ata_port *ap = link->ap;
4888 		int rc;
4889 
4890 		if (sata_scr_valid(link)) {
4891 			rc = ap->ops->scr_write(ap, reg, val);
4892 			if (rc == 0)
4893 				rc = ap->ops->scr_read(ap, reg, &val);
4894 			return rc;
4895 		}
4896 		return -EOPNOTSUPP;
4897 	}
4898 
4899 	return sata_pmp_scr_write(link, reg, val);
4900 }
4901 
4902 /**
4903  *	ata_link_online - test whether the given link is online
4904  *	@link: ATA link to test
4905  *
4906  *	Test whether @link is online.  Note that this function returns
4907  *	0 if online status of @link cannot be obtained, so
4908  *	ata_link_online(link) != !ata_link_offline(link).
4909  *
4910  *	LOCKING:
4911  *	None.
4912  *
4913  *	RETURNS:
4914  *	1 if the port online status is available and online.
4915  */
4916 int ata_link_online(struct ata_link *link)
4917 {
4918 	u32 sstatus;
4919 
4920 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4921 	    (sstatus & 0xf) == 0x3)
4922 		return 1;
4923 	return 0;
4924 }
4925 
4926 /**
4927  *	ata_link_offline - test whether the given link is offline
4928  *	@link: ATA link to test
4929  *
4930  *	Test whether @link is offline.  Note that this function
4931  *	returns 0 if offline status of @link cannot be obtained, so
4932  *	ata_link_online(link) != !ata_link_offline(link).
4933  *
4934  *	LOCKING:
4935  *	None.
4936  *
4937  *	RETURNS:
4938  *	1 if the port offline status is available and offline.
4939  */
4940 int ata_link_offline(struct ata_link *link)
4941 {
4942 	u32 sstatus;
4943 
4944 	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4945 	    (sstatus & 0xf) != 0x3)
4946 		return 1;
4947 	return 0;
4948 }
4949 
4950 #ifdef CONFIG_PM
4951 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4952 			       unsigned int action, unsigned int ehi_flags,
4953 			       int wait)
4954 {
4955 	unsigned long flags;
4956 	int i, rc;
4957 
4958 	for (i = 0; i < host->n_ports; i++) {
4959 		struct ata_port *ap = host->ports[i];
4960 		struct ata_link *link;
4961 
4962 		/* Previous resume operation might still be in
4963 		 * progress.  Wait for PM_PENDING to clear.
4964 		 */
4965 		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4966 			ata_port_wait_eh(ap);
4967 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4968 		}
4969 
4970 		/* request PM ops to EH */
4971 		spin_lock_irqsave(ap->lock, flags);
4972 
4973 		ap->pm_mesg = mesg;
4974 		if (wait) {
4975 			rc = 0;
4976 			ap->pm_result = &rc;
4977 		}
4978 
4979 		ap->pflags |= ATA_PFLAG_PM_PENDING;
4980 		__ata_port_for_each_link(link, ap) {
4981 			link->eh_info.action |= action;
4982 			link->eh_info.flags |= ehi_flags;
4983 		}
4984 
4985 		ata_port_schedule_eh(ap);
4986 
4987 		spin_unlock_irqrestore(ap->lock, flags);
4988 
4989 		/* wait and check result */
4990 		if (wait) {
4991 			ata_port_wait_eh(ap);
4992 			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4993 			if (rc)
4994 				return rc;
4995 		}
4996 	}
4997 
4998 	return 0;
4999 }
5000 
5001 /**
5002  *	ata_host_suspend - suspend host
5003  *	@host: host to suspend
5004  *	@mesg: PM message
5005  *
5006  *	Suspend @host.  Actual operation is performed by EH.  This
5007  *	function requests EH to perform PM operations and waits for EH
5008  *	to finish.
5009  *
5010  *	LOCKING:
5011  *	Kernel thread context (may sleep).
5012  *
5013  *	RETURNS:
5014  *	0 on success, -errno on failure.
5015  */
5016 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5017 {
5018 	int rc;
5019 
5020 	/*
5021 	 * disable link pm on all ports before requesting
5022 	 * any pm activity
5023 	 */
5024 	ata_lpm_enable(host);
5025 
5026 	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5027 	if (rc == 0)
5028 		host->dev->power.power_state = mesg;
5029 	return rc;
5030 }
5031 
5032 /**
5033  *	ata_host_resume - resume host
5034  *	@host: host to resume
5035  *
5036  *	Resume @host.  Actual operation is performed by EH.  This
5037  *	function requests EH to perform PM operations and returns.
5038  *	Note that all resume operations are performed parallely.
5039  *
5040  *	LOCKING:
5041  *	Kernel thread context (may sleep).
5042  */
5043 void ata_host_resume(struct ata_host *host)
5044 {
5045 	ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
5046 			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5047 	host->dev->power.power_state = PMSG_ON;
5048 
5049 	/* reenable link pm */
5050 	ata_lpm_disable(host);
5051 }
5052 #endif
5053 
5054 /**
5055  *	ata_port_start - Set port up for dma.
5056  *	@ap: Port to initialize
5057  *
5058  *	Called just after data structures for each port are
5059  *	initialized.  Allocates space for PRD table.
5060  *
5061  *	May be used as the port_start() entry in ata_port_operations.
5062  *
5063  *	LOCKING:
5064  *	Inherited from caller.
5065  */
5066 int ata_port_start(struct ata_port *ap)
5067 {
5068 	struct device *dev = ap->dev;
5069 
5070 	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5071 				      GFP_KERNEL);
5072 	if (!ap->prd)
5073 		return -ENOMEM;
5074 
5075 	return 0;
5076 }
5077 
5078 /**
5079  *	ata_dev_init - Initialize an ata_device structure
5080  *	@dev: Device structure to initialize
5081  *
5082  *	Initialize @dev in preparation for probing.
5083  *
5084  *	LOCKING:
5085  *	Inherited from caller.
5086  */
5087 void ata_dev_init(struct ata_device *dev)
5088 {
5089 	struct ata_link *link = dev->link;
5090 	struct ata_port *ap = link->ap;
5091 	unsigned long flags;
5092 
5093 	/* SATA spd limit is bound to the first device */
5094 	link->sata_spd_limit = link->hw_sata_spd_limit;
5095 	link->sata_spd = 0;
5096 
5097 	/* High bits of dev->flags are used to record warm plug
5098 	 * requests which occur asynchronously.  Synchronize using
5099 	 * host lock.
5100 	 */
5101 	spin_lock_irqsave(ap->lock, flags);
5102 	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5103 	dev->horkage = 0;
5104 	spin_unlock_irqrestore(ap->lock, flags);
5105 
5106 	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5107 	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5108 	dev->pio_mask = UINT_MAX;
5109 	dev->mwdma_mask = UINT_MAX;
5110 	dev->udma_mask = UINT_MAX;
5111 }
5112 
5113 /**
5114  *	ata_link_init - Initialize an ata_link structure
5115  *	@ap: ATA port link is attached to
5116  *	@link: Link structure to initialize
5117  *	@pmp: Port multiplier port number
5118  *
5119  *	Initialize @link.
5120  *
5121  *	LOCKING:
5122  *	Kernel thread context (may sleep)
5123  */
5124 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5125 {
5126 	int i;
5127 
5128 	/* clear everything except for devices */
5129 	memset(link, 0, offsetof(struct ata_link, device[0]));
5130 
5131 	link->ap = ap;
5132 	link->pmp = pmp;
5133 	link->active_tag = ATA_TAG_POISON;
5134 	link->hw_sata_spd_limit = UINT_MAX;
5135 
5136 	/* can't use iterator, ap isn't initialized yet */
5137 	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5138 		struct ata_device *dev = &link->device[i];
5139 
5140 		dev->link = link;
5141 		dev->devno = dev - link->device;
5142 		ata_dev_init(dev);
5143 	}
5144 }
5145 
5146 /**
5147  *	sata_link_init_spd - Initialize link->sata_spd_limit
5148  *	@link: Link to configure sata_spd_limit for
5149  *
5150  *	Initialize @link->[hw_]sata_spd_limit to the currently
5151  *	configured value.
5152  *
5153  *	LOCKING:
5154  *	Kernel thread context (may sleep).
5155  *
5156  *	RETURNS:
5157  *	0 on success, -errno on failure.
5158  */
5159 int sata_link_init_spd(struct ata_link *link)
5160 {
5161 	u32 scontrol;
5162 	u8 spd;
5163 	int rc;
5164 
5165 	rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5166 	if (rc)
5167 		return rc;
5168 
5169 	spd = (scontrol >> 4) & 0xf;
5170 	if (spd)
5171 		link->hw_sata_spd_limit &= (1 << spd) - 1;
5172 
5173 	ata_force_spd_limit(link);
5174 
5175 	link->sata_spd_limit = link->hw_sata_spd_limit;
5176 
5177 	return 0;
5178 }
5179 
5180 /**
5181  *	ata_port_alloc - allocate and initialize basic ATA port resources
5182  *	@host: ATA host this allocated port belongs to
5183  *
5184  *	Allocate and initialize basic ATA port resources.
5185  *
5186  *	RETURNS:
5187  *	Allocate ATA port on success, NULL on failure.
5188  *
5189  *	LOCKING:
5190  *	Inherited from calling layer (may sleep).
5191  */
5192 struct ata_port *ata_port_alloc(struct ata_host *host)
5193 {
5194 	struct ata_port *ap;
5195 
5196 	DPRINTK("ENTER\n");
5197 
5198 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5199 	if (!ap)
5200 		return NULL;
5201 
5202 	ap->pflags |= ATA_PFLAG_INITIALIZING;
5203 	ap->lock = &host->lock;
5204 	ap->flags = ATA_FLAG_DISABLED;
5205 	ap->print_id = -1;
5206 	ap->ctl = ATA_DEVCTL_OBS;
5207 	ap->host = host;
5208 	ap->dev = host->dev;
5209 	ap->last_ctl = 0xFF;
5210 
5211 #if defined(ATA_VERBOSE_DEBUG)
5212 	/* turn on all debugging levels */
5213 	ap->msg_enable = 0x00FF;
5214 #elif defined(ATA_DEBUG)
5215 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5216 #else
5217 	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5218 #endif
5219 
5220 #ifdef CONFIG_ATA_SFF
5221 	INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
5222 #endif
5223 	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5224 	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5225 	INIT_LIST_HEAD(&ap->eh_done_q);
5226 	init_waitqueue_head(&ap->eh_wait_q);
5227 	init_timer_deferrable(&ap->fastdrain_timer);
5228 	ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5229 	ap->fastdrain_timer.data = (unsigned long)ap;
5230 
5231 	ap->cbl = ATA_CBL_NONE;
5232 
5233 	ata_link_init(ap, &ap->link, 0);
5234 
5235 #ifdef ATA_IRQ_TRAP
5236 	ap->stats.unhandled_irq = 1;
5237 	ap->stats.idle_irq = 1;
5238 #endif
5239 	return ap;
5240 }
5241 
5242 static void ata_host_release(struct device *gendev, void *res)
5243 {
5244 	struct ata_host *host = dev_get_drvdata(gendev);
5245 	int i;
5246 
5247 	for (i = 0; i < host->n_ports; i++) {
5248 		struct ata_port *ap = host->ports[i];
5249 
5250 		if (!ap)
5251 			continue;
5252 
5253 		if (ap->scsi_host)
5254 			scsi_host_put(ap->scsi_host);
5255 
5256 		kfree(ap->pmp_link);
5257 		kfree(ap);
5258 		host->ports[i] = NULL;
5259 	}
5260 
5261 	dev_set_drvdata(gendev, NULL);
5262 }
5263 
5264 /**
5265  *	ata_host_alloc - allocate and init basic ATA host resources
5266  *	@dev: generic device this host is associated with
5267  *	@max_ports: maximum number of ATA ports associated with this host
5268  *
5269  *	Allocate and initialize basic ATA host resources.  LLD calls
5270  *	this function to allocate a host, initializes it fully and
5271  *	attaches it using ata_host_register().
5272  *
5273  *	@max_ports ports are allocated and host->n_ports is
5274  *	initialized to @max_ports.  The caller is allowed to decrease
5275  *	host->n_ports before calling ata_host_register().  The unused
5276  *	ports will be automatically freed on registration.
5277  *
5278  *	RETURNS:
5279  *	Allocate ATA host on success, NULL on failure.
5280  *
5281  *	LOCKING:
5282  *	Inherited from calling layer (may sleep).
5283  */
5284 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5285 {
5286 	struct ata_host *host;
5287 	size_t sz;
5288 	int i;
5289 
5290 	DPRINTK("ENTER\n");
5291 
5292 	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5293 		return NULL;
5294 
5295 	/* alloc a container for our list of ATA ports (buses) */
5296 	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5297 	/* alloc a container for our list of ATA ports (buses) */
5298 	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5299 	if (!host)
5300 		goto err_out;
5301 
5302 	devres_add(dev, host);
5303 	dev_set_drvdata(dev, host);
5304 
5305 	spin_lock_init(&host->lock);
5306 	host->dev = dev;
5307 	host->n_ports = max_ports;
5308 
5309 	/* allocate ports bound to this host */
5310 	for (i = 0; i < max_ports; i++) {
5311 		struct ata_port *ap;
5312 
5313 		ap = ata_port_alloc(host);
5314 		if (!ap)
5315 			goto err_out;
5316 
5317 		ap->port_no = i;
5318 		host->ports[i] = ap;
5319 	}
5320 
5321 	devres_remove_group(dev, NULL);
5322 	return host;
5323 
5324  err_out:
5325 	devres_release_group(dev, NULL);
5326 	return NULL;
5327 }
5328 
5329 /**
5330  *	ata_host_alloc_pinfo - alloc host and init with port_info array
5331  *	@dev: generic device this host is associated with
5332  *	@ppi: array of ATA port_info to initialize host with
5333  *	@n_ports: number of ATA ports attached to this host
5334  *
5335  *	Allocate ATA host and initialize with info from @ppi.  If NULL
5336  *	terminated, @ppi may contain fewer entries than @n_ports.  The
5337  *	last entry will be used for the remaining ports.
5338  *
5339  *	RETURNS:
5340  *	Allocate ATA host on success, NULL on failure.
5341  *
5342  *	LOCKING:
5343  *	Inherited from calling layer (may sleep).
5344  */
5345 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5346 				      const struct ata_port_info * const * ppi,
5347 				      int n_ports)
5348 {
5349 	const struct ata_port_info *pi;
5350 	struct ata_host *host;
5351 	int i, j;
5352 
5353 	host = ata_host_alloc(dev, n_ports);
5354 	if (!host)
5355 		return NULL;
5356 
5357 	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5358 		struct ata_port *ap = host->ports[i];
5359 
5360 		if (ppi[j])
5361 			pi = ppi[j++];
5362 
5363 		ap->pio_mask = pi->pio_mask;
5364 		ap->mwdma_mask = pi->mwdma_mask;
5365 		ap->udma_mask = pi->udma_mask;
5366 		ap->flags |= pi->flags;
5367 		ap->link.flags |= pi->link_flags;
5368 		ap->ops = pi->port_ops;
5369 
5370 		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5371 			host->ops = pi->port_ops;
5372 	}
5373 
5374 	return host;
5375 }
5376 
5377 static void ata_host_stop(struct device *gendev, void *res)
5378 {
5379 	struct ata_host *host = dev_get_drvdata(gendev);
5380 	int i;
5381 
5382 	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5383 
5384 	for (i = 0; i < host->n_ports; i++) {
5385 		struct ata_port *ap = host->ports[i];
5386 
5387 		if (ap->ops->port_stop)
5388 			ap->ops->port_stop(ap);
5389 	}
5390 
5391 	if (host->ops->host_stop)
5392 		host->ops->host_stop(host);
5393 }
5394 
5395 /**
5396  *	ata_finalize_port_ops - finalize ata_port_operations
5397  *	@ops: ata_port_operations to finalize
5398  *
5399  *	An ata_port_operations can inherit from another ops and that
5400  *	ops can again inherit from another.  This can go on as many
5401  *	times as necessary as long as there is no loop in the
5402  *	inheritance chain.
5403  *
5404  *	Ops tables are finalized when the host is started.  NULL or
5405  *	unspecified entries are inherited from the closet ancestor
5406  *	which has the method and the entry is populated with it.
5407  *	After finalization, the ops table directly points to all the
5408  *	methods and ->inherits is no longer necessary and cleared.
5409  *
5410  *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5411  *
5412  *	LOCKING:
5413  *	None.
5414  */
5415 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5416 {
5417 	static DEFINE_SPINLOCK(lock);
5418 	const struct ata_port_operations *cur;
5419 	void **begin = (void **)ops;
5420 	void **end = (void **)&ops->inherits;
5421 	void **pp;
5422 
5423 	if (!ops || !ops->inherits)
5424 		return;
5425 
5426 	spin_lock(&lock);
5427 
5428 	for (cur = ops->inherits; cur; cur = cur->inherits) {
5429 		void **inherit = (void **)cur;
5430 
5431 		for (pp = begin; pp < end; pp++, inherit++)
5432 			if (!*pp)
5433 				*pp = *inherit;
5434 	}
5435 
5436 	for (pp = begin; pp < end; pp++)
5437 		if (IS_ERR(*pp))
5438 			*pp = NULL;
5439 
5440 	ops->inherits = NULL;
5441 
5442 	spin_unlock(&lock);
5443 }
5444 
5445 /**
5446  *	ata_host_start - start and freeze ports of an ATA host
5447  *	@host: ATA host to start ports for
5448  *
5449  *	Start and then freeze ports of @host.  Started status is
5450  *	recorded in host->flags, so this function can be called
5451  *	multiple times.  Ports are guaranteed to get started only
5452  *	once.  If host->ops isn't initialized yet, its set to the
5453  *	first non-dummy port ops.
5454  *
5455  *	LOCKING:
5456  *	Inherited from calling layer (may sleep).
5457  *
5458  *	RETURNS:
5459  *	0 if all ports are started successfully, -errno otherwise.
5460  */
5461 int ata_host_start(struct ata_host *host)
5462 {
5463 	int have_stop = 0;
5464 	void *start_dr = NULL;
5465 	int i, rc;
5466 
5467 	if (host->flags & ATA_HOST_STARTED)
5468 		return 0;
5469 
5470 	ata_finalize_port_ops(host->ops);
5471 
5472 	for (i = 0; i < host->n_ports; i++) {
5473 		struct ata_port *ap = host->ports[i];
5474 
5475 		ata_finalize_port_ops(ap->ops);
5476 
5477 		if (!host->ops && !ata_port_is_dummy(ap))
5478 			host->ops = ap->ops;
5479 
5480 		if (ap->ops->port_stop)
5481 			have_stop = 1;
5482 	}
5483 
5484 	if (host->ops->host_stop)
5485 		have_stop = 1;
5486 
5487 	if (have_stop) {
5488 		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5489 		if (!start_dr)
5490 			return -ENOMEM;
5491 	}
5492 
5493 	for (i = 0; i < host->n_ports; i++) {
5494 		struct ata_port *ap = host->ports[i];
5495 
5496 		if (ap->ops->port_start) {
5497 			rc = ap->ops->port_start(ap);
5498 			if (rc) {
5499 				if (rc != -ENODEV)
5500 					dev_printk(KERN_ERR, host->dev,
5501 						"failed to start port %d "
5502 						"(errno=%d)\n", i, rc);
5503 				goto err_out;
5504 			}
5505 		}
5506 		ata_eh_freeze_port(ap);
5507 	}
5508 
5509 	if (start_dr)
5510 		devres_add(host->dev, start_dr);
5511 	host->flags |= ATA_HOST_STARTED;
5512 	return 0;
5513 
5514  err_out:
5515 	while (--i >= 0) {
5516 		struct ata_port *ap = host->ports[i];
5517 
5518 		if (ap->ops->port_stop)
5519 			ap->ops->port_stop(ap);
5520 	}
5521 	devres_free(start_dr);
5522 	return rc;
5523 }
5524 
5525 /**
5526  *	ata_sas_host_init - Initialize a host struct
5527  *	@host:	host to initialize
5528  *	@dev:	device host is attached to
5529  *	@flags:	host flags
5530  *	@ops:	port_ops
5531  *
5532  *	LOCKING:
5533  *	PCI/etc. bus probe sem.
5534  *
5535  */
5536 /* KILLME - the only user left is ipr */
5537 void ata_host_init(struct ata_host *host, struct device *dev,
5538 		   unsigned long flags, struct ata_port_operations *ops)
5539 {
5540 	spin_lock_init(&host->lock);
5541 	host->dev = dev;
5542 	host->flags = flags;
5543 	host->ops = ops;
5544 }
5545 
5546 /**
5547  *	ata_host_register - register initialized ATA host
5548  *	@host: ATA host to register
5549  *	@sht: template for SCSI host
5550  *
5551  *	Register initialized ATA host.  @host is allocated using
5552  *	ata_host_alloc() and fully initialized by LLD.  This function
5553  *	starts ports, registers @host with ATA and SCSI layers and
5554  *	probe registered devices.
5555  *
5556  *	LOCKING:
5557  *	Inherited from calling layer (may sleep).
5558  *
5559  *	RETURNS:
5560  *	0 on success, -errno otherwise.
5561  */
5562 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5563 {
5564 	int i, rc;
5565 
5566 	/* host must have been started */
5567 	if (!(host->flags & ATA_HOST_STARTED)) {
5568 		dev_printk(KERN_ERR, host->dev,
5569 			   "BUG: trying to register unstarted host\n");
5570 		WARN_ON(1);
5571 		return -EINVAL;
5572 	}
5573 
5574 	/* Blow away unused ports.  This happens when LLD can't
5575 	 * determine the exact number of ports to allocate at
5576 	 * allocation time.
5577 	 */
5578 	for (i = host->n_ports; host->ports[i]; i++)
5579 		kfree(host->ports[i]);
5580 
5581 	/* give ports names and add SCSI hosts */
5582 	for (i = 0; i < host->n_ports; i++)
5583 		host->ports[i]->print_id = ata_print_id++;
5584 
5585 	rc = ata_scsi_add_hosts(host, sht);
5586 	if (rc)
5587 		return rc;
5588 
5589 	/* associate with ACPI nodes */
5590 	ata_acpi_associate(host);
5591 
5592 	/* set cable, sata_spd_limit and report */
5593 	for (i = 0; i < host->n_ports; i++) {
5594 		struct ata_port *ap = host->ports[i];
5595 		unsigned long xfer_mask;
5596 
5597 		/* set SATA cable type if still unset */
5598 		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5599 			ap->cbl = ATA_CBL_SATA;
5600 
5601 		/* init sata_spd_limit to the current value */
5602 		sata_link_init_spd(&ap->link);
5603 
5604 		/* print per-port info to dmesg */
5605 		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5606 					      ap->udma_mask);
5607 
5608 		if (!ata_port_is_dummy(ap)) {
5609 			ata_port_printk(ap, KERN_INFO,
5610 					"%cATA max %s %s\n",
5611 					(ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5612 					ata_mode_string(xfer_mask),
5613 					ap->link.eh_info.desc);
5614 			ata_ehi_clear_desc(&ap->link.eh_info);
5615 		} else
5616 			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5617 	}
5618 
5619 	/* perform each probe synchronously */
5620 	DPRINTK("probe begin\n");
5621 	for (i = 0; i < host->n_ports; i++) {
5622 		struct ata_port *ap = host->ports[i];
5623 
5624 		/* probe */
5625 		if (ap->ops->error_handler) {
5626 			struct ata_eh_info *ehi = &ap->link.eh_info;
5627 			unsigned long flags;
5628 
5629 			ata_port_probe(ap);
5630 
5631 			/* kick EH for boot probing */
5632 			spin_lock_irqsave(ap->lock, flags);
5633 
5634 			ehi->probe_mask |= ATA_ALL_DEVICES;
5635 			ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
5636 			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5637 
5638 			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5639 			ap->pflags |= ATA_PFLAG_LOADING;
5640 			ata_port_schedule_eh(ap);
5641 
5642 			spin_unlock_irqrestore(ap->lock, flags);
5643 
5644 			/* wait for EH to finish */
5645 			ata_port_wait_eh(ap);
5646 		} else {
5647 			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5648 			rc = ata_bus_probe(ap);
5649 			DPRINTK("ata%u: bus probe end\n", ap->print_id);
5650 
5651 			if (rc) {
5652 				/* FIXME: do something useful here?
5653 				 * Current libata behavior will
5654 				 * tear down everything when
5655 				 * the module is removed
5656 				 * or the h/w is unplugged.
5657 				 */
5658 			}
5659 		}
5660 	}
5661 
5662 	/* probes are done, now scan each port's disk(s) */
5663 	DPRINTK("host probe begin\n");
5664 	for (i = 0; i < host->n_ports; i++) {
5665 		struct ata_port *ap = host->ports[i];
5666 
5667 		ata_scsi_scan_host(ap, 1);
5668 	}
5669 
5670 	return 0;
5671 }
5672 
5673 /**
5674  *	ata_host_activate - start host, request IRQ and register it
5675  *	@host: target ATA host
5676  *	@irq: IRQ to request
5677  *	@irq_handler: irq_handler used when requesting IRQ
5678  *	@irq_flags: irq_flags used when requesting IRQ
5679  *	@sht: scsi_host_template to use when registering the host
5680  *
5681  *	After allocating an ATA host and initializing it, most libata
5682  *	LLDs perform three steps to activate the host - start host,
5683  *	request IRQ and register it.  This helper takes necessasry
5684  *	arguments and performs the three steps in one go.
5685  *
5686  *	An invalid IRQ skips the IRQ registration and expects the host to
5687  *	have set polling mode on the port. In this case, @irq_handler
5688  *	should be NULL.
5689  *
5690  *	LOCKING:
5691  *	Inherited from calling layer (may sleep).
5692  *
5693  *	RETURNS:
5694  *	0 on success, -errno otherwise.
5695  */
5696 int ata_host_activate(struct ata_host *host, int irq,
5697 		      irq_handler_t irq_handler, unsigned long irq_flags,
5698 		      struct scsi_host_template *sht)
5699 {
5700 	int i, rc;
5701 
5702 	rc = ata_host_start(host);
5703 	if (rc)
5704 		return rc;
5705 
5706 	/* Special case for polling mode */
5707 	if (!irq) {
5708 		WARN_ON(irq_handler);
5709 		return ata_host_register(host, sht);
5710 	}
5711 
5712 	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5713 			      dev_driver_string(host->dev), host);
5714 	if (rc)
5715 		return rc;
5716 
5717 	for (i = 0; i < host->n_ports; i++)
5718 		ata_port_desc(host->ports[i], "irq %d", irq);
5719 
5720 	rc = ata_host_register(host, sht);
5721 	/* if failed, just free the IRQ and leave ports alone */
5722 	if (rc)
5723 		devm_free_irq(host->dev, irq, host);
5724 
5725 	return rc;
5726 }
5727 
5728 /**
5729  *	ata_port_detach - Detach ATA port in prepration of device removal
5730  *	@ap: ATA port to be detached
5731  *
5732  *	Detach all ATA devices and the associated SCSI devices of @ap;
5733  *	then, remove the associated SCSI host.  @ap is guaranteed to
5734  *	be quiescent on return from this function.
5735  *
5736  *	LOCKING:
5737  *	Kernel thread context (may sleep).
5738  */
5739 static void ata_port_detach(struct ata_port *ap)
5740 {
5741 	unsigned long flags;
5742 	struct ata_link *link;
5743 	struct ata_device *dev;
5744 
5745 	if (!ap->ops->error_handler)
5746 		goto skip_eh;
5747 
5748 	/* tell EH we're leaving & flush EH */
5749 	spin_lock_irqsave(ap->lock, flags);
5750 	ap->pflags |= ATA_PFLAG_UNLOADING;
5751 	spin_unlock_irqrestore(ap->lock, flags);
5752 
5753 	ata_port_wait_eh(ap);
5754 
5755 	/* EH is now guaranteed to see UNLOADING - EH context belongs
5756 	 * to us.  Disable all existing devices.
5757 	 */
5758 	ata_port_for_each_link(link, ap) {
5759 		ata_link_for_each_dev(dev, link)
5760 			ata_dev_disable(dev);
5761 	}
5762 
5763 	/* Final freeze & EH.  All in-flight commands are aborted.  EH
5764 	 * will be skipped and retrials will be terminated with bad
5765 	 * target.
5766 	 */
5767 	spin_lock_irqsave(ap->lock, flags);
5768 	ata_port_freeze(ap);	/* won't be thawed */
5769 	spin_unlock_irqrestore(ap->lock, flags);
5770 
5771 	ata_port_wait_eh(ap);
5772 	cancel_rearming_delayed_work(&ap->hotplug_task);
5773 
5774  skip_eh:
5775 	/* remove the associated SCSI host */
5776 	scsi_remove_host(ap->scsi_host);
5777 }
5778 
5779 /**
5780  *	ata_host_detach - Detach all ports of an ATA host
5781  *	@host: Host to detach
5782  *
5783  *	Detach all ports of @host.
5784  *
5785  *	LOCKING:
5786  *	Kernel thread context (may sleep).
5787  */
5788 void ata_host_detach(struct ata_host *host)
5789 {
5790 	int i;
5791 
5792 	for (i = 0; i < host->n_ports; i++)
5793 		ata_port_detach(host->ports[i]);
5794 
5795 	/* the host is dead now, dissociate ACPI */
5796 	ata_acpi_dissociate(host);
5797 }
5798 
5799 #ifdef CONFIG_PCI
5800 
5801 /**
5802  *	ata_pci_remove_one - PCI layer callback for device removal
5803  *	@pdev: PCI device that was removed
5804  *
5805  *	PCI layer indicates to libata via this hook that hot-unplug or
5806  *	module unload event has occurred.  Detach all ports.  Resource
5807  *	release is handled via devres.
5808  *
5809  *	LOCKING:
5810  *	Inherited from PCI layer (may sleep).
5811  */
5812 void ata_pci_remove_one(struct pci_dev *pdev)
5813 {
5814 	struct device *dev = &pdev->dev;
5815 	struct ata_host *host = dev_get_drvdata(dev);
5816 
5817 	ata_host_detach(host);
5818 }
5819 
5820 /* move to PCI subsystem */
5821 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5822 {
5823 	unsigned long tmp = 0;
5824 
5825 	switch (bits->width) {
5826 	case 1: {
5827 		u8 tmp8 = 0;
5828 		pci_read_config_byte(pdev, bits->reg, &tmp8);
5829 		tmp = tmp8;
5830 		break;
5831 	}
5832 	case 2: {
5833 		u16 tmp16 = 0;
5834 		pci_read_config_word(pdev, bits->reg, &tmp16);
5835 		tmp = tmp16;
5836 		break;
5837 	}
5838 	case 4: {
5839 		u32 tmp32 = 0;
5840 		pci_read_config_dword(pdev, bits->reg, &tmp32);
5841 		tmp = tmp32;
5842 		break;
5843 	}
5844 
5845 	default:
5846 		return -EINVAL;
5847 	}
5848 
5849 	tmp &= bits->mask;
5850 
5851 	return (tmp == bits->val) ? 1 : 0;
5852 }
5853 
5854 #ifdef CONFIG_PM
5855 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
5856 {
5857 	pci_save_state(pdev);
5858 	pci_disable_device(pdev);
5859 
5860 	if (mesg.event & PM_EVENT_SLEEP)
5861 		pci_set_power_state(pdev, PCI_D3hot);
5862 }
5863 
5864 int ata_pci_device_do_resume(struct pci_dev *pdev)
5865 {
5866 	int rc;
5867 
5868 	pci_set_power_state(pdev, PCI_D0);
5869 	pci_restore_state(pdev);
5870 
5871 	rc = pcim_enable_device(pdev);
5872 	if (rc) {
5873 		dev_printk(KERN_ERR, &pdev->dev,
5874 			   "failed to enable device after resume (%d)\n", rc);
5875 		return rc;
5876 	}
5877 
5878 	pci_set_master(pdev);
5879 	return 0;
5880 }
5881 
5882 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
5883 {
5884 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5885 	int rc = 0;
5886 
5887 	rc = ata_host_suspend(host, mesg);
5888 	if (rc)
5889 		return rc;
5890 
5891 	ata_pci_device_do_suspend(pdev, mesg);
5892 
5893 	return 0;
5894 }
5895 
5896 int ata_pci_device_resume(struct pci_dev *pdev)
5897 {
5898 	struct ata_host *host = dev_get_drvdata(&pdev->dev);
5899 	int rc;
5900 
5901 	rc = ata_pci_device_do_resume(pdev);
5902 	if (rc == 0)
5903 		ata_host_resume(host);
5904 	return rc;
5905 }
5906 #endif /* CONFIG_PM */
5907 
5908 #endif /* CONFIG_PCI */
5909 
5910 static int __init ata_parse_force_one(char **cur,
5911 				      struct ata_force_ent *force_ent,
5912 				      const char **reason)
5913 {
5914 	/* FIXME: Currently, there's no way to tag init const data and
5915 	 * using __initdata causes build failure on some versions of
5916 	 * gcc.  Once __initdataconst is implemented, add const to the
5917 	 * following structure.
5918 	 */
5919 	static struct ata_force_param force_tbl[] __initdata = {
5920 		{ "40c",	.cbl		= ATA_CBL_PATA40 },
5921 		{ "80c",	.cbl		= ATA_CBL_PATA80 },
5922 		{ "short40c",	.cbl		= ATA_CBL_PATA40_SHORT },
5923 		{ "unk",	.cbl		= ATA_CBL_PATA_UNK },
5924 		{ "ign",	.cbl		= ATA_CBL_PATA_IGN },
5925 		{ "sata",	.cbl		= ATA_CBL_SATA },
5926 		{ "1.5Gbps",	.spd_limit	= 1 },
5927 		{ "3.0Gbps",	.spd_limit	= 2 },
5928 		{ "noncq",	.horkage_on	= ATA_HORKAGE_NONCQ },
5929 		{ "ncq",	.horkage_off	= ATA_HORKAGE_NONCQ },
5930 		{ "pio0",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 0) },
5931 		{ "pio1",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 1) },
5932 		{ "pio2",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 2) },
5933 		{ "pio3",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 3) },
5934 		{ "pio4",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 4) },
5935 		{ "pio5",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 5) },
5936 		{ "pio6",	.xfer_mask	= 1 << (ATA_SHIFT_PIO + 6) },
5937 		{ "mwdma0",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 0) },
5938 		{ "mwdma1",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 1) },
5939 		{ "mwdma2",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 2) },
5940 		{ "mwdma3",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 3) },
5941 		{ "mwdma4",	.xfer_mask	= 1 << (ATA_SHIFT_MWDMA + 4) },
5942 		{ "udma0",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5943 		{ "udma16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5944 		{ "udma/16",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 0) },
5945 		{ "udma1",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5946 		{ "udma25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5947 		{ "udma/25",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 1) },
5948 		{ "udma2",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5949 		{ "udma33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5950 		{ "udma/33",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 2) },
5951 		{ "udma3",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5952 		{ "udma44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5953 		{ "udma/44",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 3) },
5954 		{ "udma4",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5955 		{ "udma66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5956 		{ "udma/66",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 4) },
5957 		{ "udma5",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5958 		{ "udma100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5959 		{ "udma/100",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 5) },
5960 		{ "udma6",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5961 		{ "udma133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5962 		{ "udma/133",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 6) },
5963 		{ "udma7",	.xfer_mask	= 1 << (ATA_SHIFT_UDMA + 7) },
5964 	};
5965 	char *start = *cur, *p = *cur;
5966 	char *id, *val, *endp;
5967 	const struct ata_force_param *match_fp = NULL;
5968 	int nr_matches = 0, i;
5969 
5970 	/* find where this param ends and update *cur */
5971 	while (*p != '\0' && *p != ',')
5972 		p++;
5973 
5974 	if (*p == '\0')
5975 		*cur = p;
5976 	else
5977 		*cur = p + 1;
5978 
5979 	*p = '\0';
5980 
5981 	/* parse */
5982 	p = strchr(start, ':');
5983 	if (!p) {
5984 		val = strstrip(start);
5985 		goto parse_val;
5986 	}
5987 	*p = '\0';
5988 
5989 	id = strstrip(start);
5990 	val = strstrip(p + 1);
5991 
5992 	/* parse id */
5993 	p = strchr(id, '.');
5994 	if (p) {
5995 		*p++ = '\0';
5996 		force_ent->device = simple_strtoul(p, &endp, 10);
5997 		if (p == endp || *endp != '\0') {
5998 			*reason = "invalid device";
5999 			return -EINVAL;
6000 		}
6001 	}
6002 
6003 	force_ent->port = simple_strtoul(id, &endp, 10);
6004 	if (p == endp || *endp != '\0') {
6005 		*reason = "invalid port/link";
6006 		return -EINVAL;
6007 	}
6008 
6009  parse_val:
6010 	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6011 	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6012 		const struct ata_force_param *fp = &force_tbl[i];
6013 
6014 		if (strncasecmp(val, fp->name, strlen(val)))
6015 			continue;
6016 
6017 		nr_matches++;
6018 		match_fp = fp;
6019 
6020 		if (strcasecmp(val, fp->name) == 0) {
6021 			nr_matches = 1;
6022 			break;
6023 		}
6024 	}
6025 
6026 	if (!nr_matches) {
6027 		*reason = "unknown value";
6028 		return -EINVAL;
6029 	}
6030 	if (nr_matches > 1) {
6031 		*reason = "ambigious value";
6032 		return -EINVAL;
6033 	}
6034 
6035 	force_ent->param = *match_fp;
6036 
6037 	return 0;
6038 }
6039 
6040 static void __init ata_parse_force_param(void)
6041 {
6042 	int idx = 0, size = 1;
6043 	int last_port = -1, last_device = -1;
6044 	char *p, *cur, *next;
6045 
6046 	/* calculate maximum number of params and allocate force_tbl */
6047 	for (p = ata_force_param_buf; *p; p++)
6048 		if (*p == ',')
6049 			size++;
6050 
6051 	ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6052 	if (!ata_force_tbl) {
6053 		printk(KERN_WARNING "ata: failed to extend force table, "
6054 		       "libata.force ignored\n");
6055 		return;
6056 	}
6057 
6058 	/* parse and populate the table */
6059 	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6060 		const char *reason = "";
6061 		struct ata_force_ent te = { .port = -1, .device = -1 };
6062 
6063 		next = cur;
6064 		if (ata_parse_force_one(&next, &te, &reason)) {
6065 			printk(KERN_WARNING "ata: failed to parse force "
6066 			       "parameter \"%s\" (%s)\n",
6067 			       cur, reason);
6068 			continue;
6069 		}
6070 
6071 		if (te.port == -1) {
6072 			te.port = last_port;
6073 			te.device = last_device;
6074 		}
6075 
6076 		ata_force_tbl[idx++] = te;
6077 
6078 		last_port = te.port;
6079 		last_device = te.device;
6080 	}
6081 
6082 	ata_force_tbl_size = idx;
6083 }
6084 
6085 static int __init ata_init(void)
6086 {
6087 	ata_parse_force_param();
6088 
6089 	ata_wq = create_workqueue("ata");
6090 	if (!ata_wq)
6091 		return -ENOMEM;
6092 
6093 	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6094 	if (!ata_aux_wq) {
6095 		destroy_workqueue(ata_wq);
6096 		return -ENOMEM;
6097 	}
6098 
6099 	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6100 	return 0;
6101 }
6102 
6103 static void __exit ata_exit(void)
6104 {
6105 	kfree(ata_force_tbl);
6106 	destroy_workqueue(ata_wq);
6107 	destroy_workqueue(ata_aux_wq);
6108 }
6109 
6110 subsys_initcall(ata_init);
6111 module_exit(ata_exit);
6112 
6113 static unsigned long ratelimit_time;
6114 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6115 
6116 int ata_ratelimit(void)
6117 {
6118 	int rc;
6119 	unsigned long flags;
6120 
6121 	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6122 
6123 	if (time_after(jiffies, ratelimit_time)) {
6124 		rc = 1;
6125 		ratelimit_time = jiffies + (HZ/5);
6126 	} else
6127 		rc = 0;
6128 
6129 	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6130 
6131 	return rc;
6132 }
6133 
6134 /**
6135  *	ata_wait_register - wait until register value changes
6136  *	@reg: IO-mapped register
6137  *	@mask: Mask to apply to read register value
6138  *	@val: Wait condition
6139  *	@interval: polling interval in milliseconds
6140  *	@timeout: timeout in milliseconds
6141  *
6142  *	Waiting for some bits of register to change is a common
6143  *	operation for ATA controllers.  This function reads 32bit LE
6144  *	IO-mapped register @reg and tests for the following condition.
6145  *
6146  *	(*@reg & mask) != val
6147  *
6148  *	If the condition is met, it returns; otherwise, the process is
6149  *	repeated after @interval_msec until timeout.
6150  *
6151  *	LOCKING:
6152  *	Kernel thread context (may sleep)
6153  *
6154  *	RETURNS:
6155  *	The final register value.
6156  */
6157 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6158 		      unsigned long interval, unsigned long timeout)
6159 {
6160 	unsigned long deadline;
6161 	u32 tmp;
6162 
6163 	tmp = ioread32(reg);
6164 
6165 	/* Calculate timeout _after_ the first read to make sure
6166 	 * preceding writes reach the controller before starting to
6167 	 * eat away the timeout.
6168 	 */
6169 	deadline = ata_deadline(jiffies, timeout);
6170 
6171 	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6172 		msleep(interval);
6173 		tmp = ioread32(reg);
6174 	}
6175 
6176 	return tmp;
6177 }
6178 
6179 /*
6180  * Dummy port_ops
6181  */
6182 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6183 {
6184 	return AC_ERR_SYSTEM;
6185 }
6186 
6187 static void ata_dummy_error_handler(struct ata_port *ap)
6188 {
6189 	/* truly dummy */
6190 }
6191 
6192 struct ata_port_operations ata_dummy_port_ops = {
6193 	.qc_prep		= ata_noop_qc_prep,
6194 	.qc_issue		= ata_dummy_qc_issue,
6195 	.error_handler		= ata_dummy_error_handler,
6196 };
6197 
6198 const struct ata_port_info ata_dummy_port_info = {
6199 	.port_ops		= &ata_dummy_port_ops,
6200 };
6201 
6202 /*
6203  * libata is essentially a library of internal helper functions for
6204  * low-level ATA host controller drivers.  As such, the API/ABI is
6205  * likely to change as new drivers are added and updated.
6206  * Do not depend on ABI/API stability.
6207  */
6208 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6209 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6210 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6211 EXPORT_SYMBOL_GPL(ata_base_port_ops);
6212 EXPORT_SYMBOL_GPL(sata_port_ops);
6213 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6214 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6215 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6216 EXPORT_SYMBOL_GPL(ata_host_init);
6217 EXPORT_SYMBOL_GPL(ata_host_alloc);
6218 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6219 EXPORT_SYMBOL_GPL(ata_host_start);
6220 EXPORT_SYMBOL_GPL(ata_host_register);
6221 EXPORT_SYMBOL_GPL(ata_host_activate);
6222 EXPORT_SYMBOL_GPL(ata_host_detach);
6223 EXPORT_SYMBOL_GPL(ata_sg_init);
6224 EXPORT_SYMBOL_GPL(ata_qc_complete);
6225 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6226 EXPORT_SYMBOL_GPL(atapi_cmd_type);
6227 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6228 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6229 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6230 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6231 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6232 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6233 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6234 EXPORT_SYMBOL_GPL(ata_mode_string);
6235 EXPORT_SYMBOL_GPL(ata_id_xfermask);
6236 EXPORT_SYMBOL_GPL(ata_port_start);
6237 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6238 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
6239 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6240 EXPORT_SYMBOL_GPL(ata_port_probe);
6241 EXPORT_SYMBOL_GPL(ata_dev_disable);
6242 EXPORT_SYMBOL_GPL(sata_set_spd);
6243 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
6244 EXPORT_SYMBOL_GPL(sata_link_debounce);
6245 EXPORT_SYMBOL_GPL(sata_link_resume);
6246 EXPORT_SYMBOL_GPL(ata_std_prereset);
6247 EXPORT_SYMBOL_GPL(sata_link_hardreset);
6248 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6249 EXPORT_SYMBOL_GPL(ata_std_postreset);
6250 EXPORT_SYMBOL_GPL(ata_dev_classify);
6251 EXPORT_SYMBOL_GPL(ata_dev_pair);
6252 EXPORT_SYMBOL_GPL(ata_port_disable);
6253 EXPORT_SYMBOL_GPL(ata_ratelimit);
6254 EXPORT_SYMBOL_GPL(ata_wait_register);
6255 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6256 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6257 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6258 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6259 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6260 EXPORT_SYMBOL_GPL(sata_scr_valid);
6261 EXPORT_SYMBOL_GPL(sata_scr_read);
6262 EXPORT_SYMBOL_GPL(sata_scr_write);
6263 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6264 EXPORT_SYMBOL_GPL(ata_link_online);
6265 EXPORT_SYMBOL_GPL(ata_link_offline);
6266 #ifdef CONFIG_PM
6267 EXPORT_SYMBOL_GPL(ata_host_suspend);
6268 EXPORT_SYMBOL_GPL(ata_host_resume);
6269 #endif /* CONFIG_PM */
6270 EXPORT_SYMBOL_GPL(ata_id_string);
6271 EXPORT_SYMBOL_GPL(ata_id_c_string);
6272 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6273 
6274 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6275 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
6276 EXPORT_SYMBOL_GPL(ata_timing_compute);
6277 EXPORT_SYMBOL_GPL(ata_timing_merge);
6278 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
6279 
6280 #ifdef CONFIG_PCI
6281 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6282 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6283 #ifdef CONFIG_PM
6284 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6285 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6286 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6287 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6288 #endif /* CONFIG_PM */
6289 #endif /* CONFIG_PCI */
6290 
6291 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6292 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6293 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
6294 EXPORT_SYMBOL_GPL(ata_port_desc);
6295 #ifdef CONFIG_PCI
6296 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6297 #endif /* CONFIG_PCI */
6298 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6299 EXPORT_SYMBOL_GPL(ata_link_abort);
6300 EXPORT_SYMBOL_GPL(ata_port_abort);
6301 EXPORT_SYMBOL_GPL(ata_port_freeze);
6302 EXPORT_SYMBOL_GPL(sata_async_notification);
6303 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6304 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6305 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6306 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6307 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
6308 EXPORT_SYMBOL_GPL(ata_do_eh);
6309 EXPORT_SYMBOL_GPL(ata_std_error_handler);
6310 
6311 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6312 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6313 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6314 EXPORT_SYMBOL_GPL(ata_cable_ignore);
6315 EXPORT_SYMBOL_GPL(ata_cable_sata);
6316